summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Moyer <jmoyer@redhat.com>2015-10-20 17:13:51 +0200
committerJens Axboe <axboe@fb.com>2015-10-21 23:00:48 +0200
commit0809e3ac62319dc7534b64f95ac37e230d740e8a (patch)
tree70814dd32f930745834e4186445acf8dcbd1e3c1
parentblk-mq: remove unused blk_mq_clone_flush_request prototype (diff)
downloadlinux-0809e3ac62319dc7534b64f95ac37e230d740e8a.tar.xz
linux-0809e3ac62319dc7534b64f95ac37e230d740e8a.zip
block: fix plug list flushing for nomerge queues
Request queues with merging disabled will not flush the plug list after BLK_MAX_REQUEST_COUNT requests have been queued, since the code relies on blk_attempt_plug_merge to compute the request_count. Fix this by computing the number of queued requests even for nomerge queues. Signed-off-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-core.c32
-rw-r--r--block/blk-mq.c9
-rw-r--r--block/blk.h1
3 files changed, 36 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2eb722d48773..f0ae087ead06 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1594,6 +1594,30 @@ out:
return ret;
}
+unsigned int blk_plug_queued_count(struct request_queue *q)
+{
+ struct blk_plug *plug;
+ struct request *rq;
+ struct list_head *plug_list;
+ unsigned int ret = 0;
+
+ plug = current->plug;
+ if (!plug)
+ goto out;
+
+ if (q->mq_ops)
+ plug_list = &plug->mq_list;
+ else
+ plug_list = &plug->list;
+
+ list_for_each_entry(rq, plug_list, queuelist) {
+ if (rq->q == q)
+ ret++;
+ }
+out:
+ return ret;
+}
+
void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;
@@ -1641,9 +1665,11 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing
* any locks.
*/
- if (!blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return;
+ if (!blk_queue_nomerges(q)) {
+ if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+ return;
+ } else
+ request_count = blk_plug_queued_count(q);
spin_lock_irq(q->queue_lock);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d921cd5177f5..9683a561efcd 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1268,9 +1268,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio, q->bio_split);
- if (!is_flush_fua && !blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
- return;
+ if (!is_flush_fua && !blk_queue_nomerges(q)) {
+ if (blk_attempt_plug_merge(q, bio, &request_count,
+ &same_queue_rq))
+ return;
+ } else
+ request_count = blk_plug_queued_count(q);
rq = blk_mq_map_request(q, bio, &data);
if (unlikely(!rq))
diff --git a/block/blk.h b/block/blk.h
index 98614ad37c81..aa27d0292af1 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -86,6 +86,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count,
struct request **same_queue_rq);
+unsigned int blk_plug_queued_count(struct request_queue *q);
void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);