summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-04-16 18:38:35 +0200
committerJens Axboe <axboe@fb.com>2014-04-16 22:15:24 +0200
commit6700a678c02e47b6d50c51da2a46ff80efedb8c7 (patch)
tree6a3bed7a3f012c54ca099d5e7198d4b223cdd3db
parentblk-mq: don't use preempt_count() to check for right CPU (diff)
downloadlinux-6700a678c02e47b6d50c51da2a46ff80efedb8c7.tar.xz
linux-6700a678c02e47b6d50c51da2a46ff80efedb8c7.zip
blk-mq: kill preempt disable/enable in blk_mq_work_fn()
blk_mq_work_fn() is always invoked off the bounded workqueues, so it can happily preempt among the queues in that set without causing any issues for blk-mq. Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq.c2
1 files changed, 0 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1fa01ff0f9bc..b59a8d027dff 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -714,9 +714,7 @@ static void blk_mq_work_fn(struct work_struct *work)
hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work);
- preempt_disable();
__blk_mq_run_hw_queue(hctx);
- preempt_enable();
}
static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,