summaryrefslogtreecommitdiffstats
path: root/block/blk-flush.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-05-28 16:08:02 +0200
committerJens Axboe <axboe@fb.com>2014-05-28 16:08:02 +0200
commit6fca6a611c27f1f0d90fbe1cc3c229dbf8c09e48 (patch)
treed3348f3ab1169db9b5a1fca67a8fd2164152530c /block/blk-flush.c
parentblk-mq: remove stale comment for blk_mq_complete_request() (diff)
downloadlinux-6fca6a611c27f1f0d90fbe1cc3c229dbf8c09e48.tar.xz
linux-6fca6a611c27f1f0d90fbe1cc3c229dbf8c09e48.zip
blk-mq: add helper to insert requests from irq context
Both the cache flush state machine and the SCSI midlayer want to submit requests from irq context, and the current per-request requeue_work unfortunately causes corruption due to sharing with the csd field for flushes. Replace them with a per-request_queue list of requests to be requeued. Based on an earlier test by Ming Lei. Signed-off-by: Christoph Hellwig <hch@lst.de> Reported-by: Ming Lei <tom.leiming@gmail.com> Tested-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r--block/blk-flush.c16
1 files changed, 4 insertions, 12 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ec7a224d6733..ef608b35d9be 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq)
blk_clear_rq_complete(rq);
}
-static void mq_flush_run(struct work_struct *work)
-{
- struct request *rq;
-
- rq = container_of(work, struct request, requeue_work);
-
- memset(&rq->csd, 0, sizeof(rq->csd));
- blk_mq_insert_request(rq, false, true, false);
-}
-
static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{
if (rq->q->mq_ops) {
- INIT_WORK(&rq->requeue_work, mq_flush_run);
- kblockd_schedule_work(&rq->requeue_work);
+ struct request_queue *q = rq->q;
+
+ blk_mq_add_to_requeue_list(rq, add_front);
+ blk_mq_kick_requeue_list(q);
return false;
} else {
if (add_front)