summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2015-04-17 16:28:50 +0200
committerJens Axboe <axboe@fb.com>2015-04-17 16:31:12 +0200
commit569fd0ce96087283866ab8c438dac4bcf1738846 (patch)
treef1fcf0648a33638655ca7142667a5e67f4ed073b /block/blk-mq.c
parentMerge branch 'akpm' (patches from Andrew) (diff)
downloadlinux-569fd0ce96087283866ab8c438dac4bcf1738846.tar.xz
linux-569fd0ce96087283866ab8c438dac4bcf1738846.zip
blk-mq: fix iteration of busy bitmap
Commit 889fa31f00b2 was a bit too eager in reducing the loop count, so we ended up missing queues in some configurations. Ensure that our division rounds up, so that's not the case. Reported-by: Guenter Roeck <linux@roeck-us.net> Fixes: 889fa31f00b2 ("blk-mq: reduce unnecessary software queue looping") Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c82de08f3721..ade8a2d1b0aa 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -41,7 +41,7 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
unsigned int i;
- for (i = 0; i < hctx->ctx_map.map_size; i++)
+ for (i = 0; i < hctx->ctx_map.size; i++)
if (hctx->ctx_map.map[i].word)
return true;
@@ -730,7 +730,7 @@ static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
struct blk_mq_ctx *ctx;
int i;
- for (i = 0; i < hctx->ctx_map.map_size; i++) {
+ for (i = 0; i < hctx->ctx_map.size; i++) {
struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
unsigned int off, bit;
@@ -1818,7 +1818,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* This is more accurate and more efficient than looping
* over all possibly mapped software queues.
*/
- map->map_size = hctx->nr_ctx / map->bits_per_word;
+ map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
/*
* Initialize batch roundrobin counts