summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-05-27 20:59:49 +0200
committerJens Axboe <axboe@fb.com>2014-05-28 17:49:25 +0200
commit793597a6a95675f4f85671cf747c1d92e7dbc295 (patch)
tree03512fdd3c26e9a7b24890beb84a99b2bc7cd77d /block
parentblk-mq: remove blk_mq_wait_for_tags (diff)
downloadlinux-793597a6a95675f4f85671cf747c1d92e7dbc295.tar.xz
linux-793597a6a95675f4f85671cf747c1d92e7dbc295.zip
blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request
We already do a non-blocking allocation in blk_mq_map_request, no need to repeat it. Just call __blk_mq_alloc_request to wait directly. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3224888d329a..43f0c8ffa92a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1179,12 +1179,14 @@ static struct request *blk_mq_map_request(struct request_queue *q,
trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false);
if (unlikely(!rq)) {
+ __blk_mq_run_hw_queue(hctx);
blk_mq_put_ctx(ctx);
trace_block_sleeprq(q, bio, rw);
- rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
- false);
- ctx = rq->mq_ctx;
+
+ ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ rq = __blk_mq_alloc_request(q, hctx, ctx, rw,
+ __GFP_WAIT|GFP_ATOMIC, false);
}
hctx->queued++;