summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-09-14 01:40:12 +0200
committerJens Axboe <axboe@fb.com>2014-09-22 20:00:07 +0200
commit46f92d42ee37e10970e33891b7b61a342bd97aeb (patch)
treef8e8a59f0daa0c0fa4406a34b120d1d67b9a1c22 /block/blk-mq.c
parentblk-mq: fix and simplify tag iteration for the timeout handler (diff)
downloadlinux-46f92d42ee37e10970e33891b7b61a342bd97aeb.tar.xz
linux-46f92d42ee37e10970e33891b7b61a342bd97aeb.zip
blk-mq: unshared timeout handler
Duplicate the (small) timeout handler in blk-mq so that we can pass arguments more easily to the driver timeout handler. This enables the next patch. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c53
1 files changed, 37 insertions, 16 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3baebcaf36db..298d6e360661 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -525,9 +525,15 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
}
EXPORT_SYMBOL(blk_mq_tag_to_rq);
-static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
+struct blk_mq_timeout_data {
+ unsigned long next;
+ unsigned int next_set;
+};
+
+static void blk_mq_rq_timed_out(struct request *req)
{
- struct request_queue *q = rq->q;
+ struct blk_mq_ops *ops = req->q->mq_ops;
+ enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
/*
* We know that complete is set at this point. If STARTED isn't set
@@ -538,27 +544,43 @@ static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
* we both flags will get cleared. So check here again, and ignore
* a timeout event with a request that isn't active.
*/
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
- return BLK_EH_NOT_HANDLED;
-
- if (!q->mq_ops->timeout)
- return BLK_EH_RESET_TIMER;
+ if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
+ return;
- return q->mq_ops->timeout(rq);
+ if (ops->timeout)
+ ret = ops->timeout(req);
+
+ switch (ret) {
+ case BLK_EH_HANDLED:
+ __blk_mq_complete_request(req);
+ break;
+ case BLK_EH_RESET_TIMER:
+ blk_add_timer(req);
+ blk_clear_rq_complete(req);
+ break;
+ case BLK_EH_NOT_HANDLED:
+ break;
+ default:
+ printk(KERN_ERR "block: bad eh return: %d\n", ret);
+ break;
+ }
}
-struct blk_mq_timeout_data {
- unsigned long next;
- unsigned int next_set;
-};
-
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
struct blk_mq_timeout_data *data = priv;
- if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
- blk_rq_check_expired(rq, &data->next, &data->next_set);
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ return;
+
+ if (time_after_eq(jiffies, rq->deadline)) {
+ if (!blk_mark_rq_complete(rq))
+ blk_mq_rq_timed_out(rq);
+ } else if (!data->next_set || time_after(data->next, rq->deadline)) {
+ data->next = rq->deadline;
+ data->next_set = 1;
+ }
}
static void blk_mq_rq_timer(unsigned long priv)
@@ -1781,7 +1803,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
else
blk_queue_make_request(q, blk_sq_make_request);
- blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
if (set->timeout)
blk_queue_rq_timeout(q, set->timeout);