diff options
author | Matias Bjørling <m@bjorling.me> | 2013-12-21 00:11:01 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-12-21 17:30:34 +0100 |
commit | fc1bc35443741e132dd0118e8dbac53f69a6f76e (patch) | |
tree | ba0da257a26bc61ea8bef1f8f2b76828f3e539fd /drivers | |
parent | null_blk: set use_per_node_hctx param to false (diff) | |
download | linux-fc1bc35443741e132dd0118e8dbac53f69a6f76e.tar.xz linux-fc1bc35443741e132dd0118e8dbac53f69a6f76e.zip |
null_blk: support submit_queues on use_per_node_hctx
In the case of both the submit_queues param and use_per_node_hctx param
are used. We limit the number af submit_queues to the number of online
nodes.
If the submit_queues is a multiple of nr_online_nodes, its trivial. Simply map
them to the nodes. For example: 8 submit queues are mapped as node0[0,1],
node1[2,3], ...
If uneven, we are left with an uneven number of submit_queues that must be
mapped. These are mapped toward the first node and onward. E.g. 5
submit queues mapped onto 4 nodes are mapped as node0[0,1], node1[2], ...
Signed-off-by: Matias Bjorling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/null_blk.c | 39 |
1 files changed, 35 insertions, 4 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 9b0311b61fe1..528f4e47f38e 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1,4 +1,5 @@ #include <linux/module.h> + #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/fs.h> @@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) { - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, - hctx_index); + int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); + int tip = (reg->nr_hw_queues % nr_online_nodes); + int node = 0, i, n; + + /* + * Split submit queues evenly wrt to the number of nodes. If uneven, + * fill the first buckets with one extra, until the rest is filled with + * no extra. + */ + for (i = 0, n = 1; i < hctx_index; i++, n++) { + if (n % b_size == 0) { + n = 0; + node++; + + tip--; + if (!tip) + b_size = reg->nr_hw_queues / nr_online_nodes; + } + } + + /* + * A node might not be online, therefore map the relative node id to the + * real node id. + */ + for_each_online_node(n) { + if (!node) + break; + node--; + } + + return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); } static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) @@ -591,10 +621,11 @@ static int __init null_init(void) #endif if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { - if (submit_queues > 0) + if (submit_queues < nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.", nr_online_nodes); - submit_queues = nr_online_nodes; + submit_queues = nr_online_nodes; + } } else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues) |