summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c183
1 files changed, 63 insertions, 120 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c207fa9870eb..dc5f47f60931 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -22,6 +22,7 @@
#include <linux/sched/sysctl.h>
#include <linux/delay.h>
#include <linux/crash_dump.h>
+#include <linux/prefetch.h>
#include <trace/events/block.h>
@@ -33,49 +34,28 @@
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
-static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
-
/*
* Check if any of the ctx's have pending work in this hardware queue
*/
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
- unsigned int i;
-
- for (i = 0; i < hctx->ctx_map.size; i++)
- if (hctx->ctx_map.map[i].word)
- return true;
-
- return false;
-}
-
-static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_ctx *ctx)
-{
- return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
+ return sbitmap_any_bit_set(&hctx->ctx_map);
}
-#define CTX_TO_BIT(hctx, ctx) \
- ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
-
/*
* Mark this ctx as having pending work in this hardware queue
*/
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
- struct blk_align_bitmap *bm = get_bm(hctx, ctx);
-
- if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
- set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
+ if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
+ sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
}
static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
- struct blk_align_bitmap *bm = get_bm(hctx, ctx);
-
- clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
+ sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
}
void blk_mq_freeze_queue_start(struct request_queue *q)
@@ -246,19 +226,9 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
-
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
- if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) {
- __blk_mq_run_hw_queue(hctx);
- blk_mq_put_ctx(ctx);
-
- ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
- blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
- ctx = alloc_data.ctx;
- }
blk_mq_put_ctx(ctx);
+
if (!rq) {
blk_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
@@ -333,7 +303,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
rq->cmd_flags = 0;
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- blk_mq_put_tag(hctx, tag, &ctx->last_tag);
+ blk_mq_put_tag(hctx, ctx, tag);
blk_queue_exit(q);
}
@@ -513,7 +483,7 @@ EXPORT_SYMBOL(blk_mq_requeue_request);
static void blk_mq_requeue_work(struct work_struct *work)
{
struct request_queue *q =
- container_of(work, struct request_queue, requeue_work);
+ container_of(work, struct request_queue, requeue_work.work);
LIST_HEAD(rq_list);
struct request *rq, *next;
unsigned long flags;
@@ -568,16 +538,24 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_cancel_requeue_work(struct request_queue *q)
{
- cancel_work_sync(&q->requeue_work);
+ cancel_delayed_work_sync(&q->requeue_work);
}
EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
void blk_mq_kick_requeue_list(struct request_queue *q)
{
- kblockd_schedule_work(&q->requeue_work);
+ kblockd_schedule_delayed_work(&q->requeue_work, 0);
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
+void blk_mq_delay_kick_requeue_list(struct request_queue *q,
+ unsigned long msecs)
+{
+ kblockd_schedule_delayed_work(&q->requeue_work,
+ msecs_to_jiffies(msecs));
+}
+EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
+
void blk_mq_abort_requeue_list(struct request_queue *q)
{
unsigned long flags;
@@ -600,8 +578,10 @@ EXPORT_SYMBOL(blk_mq_abort_requeue_list);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
{
- if (tag < tags->nr_tags)
+ if (tag < tags->nr_tags) {
+ prefetch(tags->rqs[tag]);
return tags->rqs[tag];
+ }
return NULL;
}
@@ -756,38 +736,44 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
return false;
}
+struct flush_busy_ctx_data {
+ struct blk_mq_hw_ctx *hctx;
+ struct list_head *list;
+};
+
+static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
+{
+ struct flush_busy_ctx_data *flush_data = data;
+ struct blk_mq_hw_ctx *hctx = flush_data->hctx;
+ struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
+
+ sbitmap_clear_bit(sb, bitnr);
+ spin_lock(&ctx->lock);
+ list_splice_tail_init(&ctx->rq_list, flush_data->list);
+ spin_unlock(&ctx->lock);
+ return true;
+}
+
/*
* Process software queues that have been marked busy, splicing them
* to the for-dispatch
*/
static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
{
- struct blk_mq_ctx *ctx;
- int i;
-
- for (i = 0; i < hctx->ctx_map.size; i++) {
- struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
- unsigned int off, bit;
-
- if (!bm->word)
- continue;
+ struct flush_busy_ctx_data data = {
+ .hctx = hctx,
+ .list = list,
+ };
- bit = 0;
- off = i * hctx->ctx_map.bits_per_word;
- do {
- bit = find_next_bit(&bm->word, bm->depth, bit);
- if (bit >= bm->depth)
- break;
+ sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
+}
- ctx = hctx->ctxs[bit + off];
- clear_bit(bit, &bm->word);
- spin_lock(&ctx->lock);
- list_splice_tail_init(&ctx->rq_list, list);
- spin_unlock(&ctx->lock);
+static inline unsigned int queued_to_index(unsigned int queued)
+{
+ if (!queued)
+ return 0;
- bit++;
- } while (1);
- }
+ return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
}
/*
@@ -878,10 +864,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
dptr = &driver_list;
}
- if (!queued)
- hctx->dispatched[0]++;
- else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
- hctx->dispatched[ilog2(queued) + 1]++;
+ hctx->dispatched[queued_to_index(queued)]++;
/*
* Any items that need requeuing? Stuff them into hctx->dispatch,
@@ -937,7 +920,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
!blk_mq_hw_queue_mapped(hctx)))
return;
- if (!async) {
+ if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
int cpu = get_cpu();
if (cpumask_test_cpu(cpu, hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
@@ -948,8 +931,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
put_cpu();
}
- kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
- &hctx->run_work, 0);
+ kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
}
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -970,7 +952,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
- cancel_delayed_work(&hctx->run_work);
+ cancel_work(&hctx->run_work);
cancel_delayed_work(&hctx->delay_work);
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
@@ -1023,7 +1005,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
{
struct blk_mq_hw_ctx *hctx;
- hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
+ hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
__blk_mq_run_hw_queue(hctx);
}
@@ -1240,20 +1222,8 @@ static struct request *blk_mq_map_request(struct request_queue *q,
op_flags |= REQ_SYNC;
trace_block_getrq(q, bio, op);
- blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx);
+ blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
- if (unlikely(!rq)) {
- __blk_mq_run_hw_queue(hctx);
- blk_mq_put_ctx(ctx);
- trace_block_sleeprq(q, bio, op);
-
- ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
- blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
- rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
- ctx = alloc_data.ctx;
- hctx = alloc_data.hctx;
- }
hctx->queued++;
data->hctx = hctx;
@@ -1606,32 +1576,6 @@ fail:
return NULL;
}
-static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
-{
- kfree(bitmap->map);
-}
-
-static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
-{
- unsigned int bpw = 8, total, num_maps, i;
-
- bitmap->bits_per_word = bpw;
-
- num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
- bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
- GFP_KERNEL, node);
- if (!bitmap->map)
- return -ENOMEM;
-
- total = nr_cpu_ids;
- for (i = 0; i < num_maps; i++) {
- bitmap->map[i].depth = min(total, bitmap->bits_per_word);
- total -= bitmap->map[i].depth;
- }
-
- return 0;
-}
-
/*
* 'cpu' is going away. splice any existing rq_list entries from this
* software queue to the hw queue dispatch list, and ensure that it
@@ -1697,7 +1641,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
blk_free_flush_queue(hctx->fq);
- blk_mq_free_bitmap(&hctx->ctx_map);
+ sbitmap_free(&hctx->ctx_map);
}
static void blk_mq_exit_hw_queues(struct request_queue *q,
@@ -1734,7 +1678,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (node == NUMA_NO_NODE)
node = hctx->numa_node = set->numa_node;
- INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
@@ -1757,7 +1701,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (!hctx->ctxs)
goto unregister_cpu_notifier;
- if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
+ if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
+ node))
goto free_ctxs;
hctx->nr_ctx = 0;
@@ -1784,7 +1729,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
free_bitmap:
- blk_mq_free_bitmap(&hctx->ctx_map);
+ sbitmap_free(&hctx->ctx_map);
free_ctxs:
kfree(hctx->ctxs);
unregister_cpu_notifier:
@@ -1860,8 +1805,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
mutex_unlock(&q->sysfs_lock);
queue_for_each_hw_ctx(q, hctx, i) {
- struct blk_mq_ctxmap *map = &hctx->ctx_map;
-
/*
* If no software queues are mapped to this hardware queue,
* disable it and free the request entries.
@@ -1887,7 +1830,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
* This is more accurate and more efficient than looping
* over all possibly mapped software queues.
*/
- map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
+ sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
/*
* Initialize batch roundrobin counts
@@ -2094,7 +2037,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->sg_reserved_size = INT_MAX;
- INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
+ INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
INIT_LIST_HEAD(&q->requeue_list);
spin_lock_init(&q->requeue_lock);