diff options
Diffstat (limited to 'block/blk-wbt.c')
-rw-r--r-- | block/blk-wbt.c | 55 |
1 files changed, 24 insertions, 31 deletions
diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 461a9af11efe..1d94a20374fc 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -166,7 +166,7 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) int diff = limit - inflight; if (!inflight || diff >= rwb->wb_background / 2) - wake_up_all(&rqw->wait); + wake_up(&rqw->wait); } } @@ -474,30 +474,6 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) return limit; } -static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw, - wait_queue_entry_t *wait, unsigned long rw) -{ - /* - * inc it here even if disabled, since we'll dec it at completion. - * this only happens if the task was sleeping in __wbt_wait(), - * and someone turned it off at the same time. - */ - if (!rwb_enabled(rwb)) { - atomic_inc(&rqw->inflight); - return true; - } - - /* - * If the waitqueue is already active and we are not the next - * in line to be woken up, wait for our turn. - */ - if (waitqueue_active(&rqw->wait) && - rqw->wait.head.next != &wait->entry) - return false; - - return rq_wait_inc_below(rqw, get_limit(rwb, rw)); -} - /* * Block if we will exceed our limit, or if we are currently waiting for * the timer to kick off queuing again. @@ -508,16 +484,32 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, __acquires(lock) { struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); - DEFINE_WAIT(wait); + DECLARE_WAITQUEUE(wait, current); + + /* + * inc it here even if disabled, since we'll dec it at completion. + * this only happens if the task was sleeping in __wbt_wait(), + * and someone turned it off at the same time. + */ + if (!rwb_enabled(rwb)) { + atomic_inc(&rqw->inflight); + return; + } - if (may_queue(rwb, rqw, &wait, rw)) + if (!waitqueue_active(&rqw->wait) + && rq_wait_inc_below(rqw, get_limit(rwb, rw))) return; + add_wait_queue_exclusive(&rqw->wait, &wait); do { - prepare_to_wait_exclusive(&rqw->wait, &wait, - TASK_UNINTERRUPTIBLE); + set_current_state(TASK_UNINTERRUPTIBLE); + + if (!rwb_enabled(rwb)) { + atomic_inc(&rqw->inflight); + break; + } - if (may_queue(rwb, rqw, &wait, rw)) + if (rq_wait_inc_below(rqw, get_limit(rwb, rw))) break; if (lock) { @@ -528,7 +520,8 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, io_schedule(); } while (1); - finish_wait(&rqw->wait, &wait); + __set_current_state(TASK_RUNNING); + remove_wait_queue(&rqw->wait, &wait); } static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) |