diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/bfq-cgroup.c | 8 | ||||
-rw-r--r-- | block/bfq-iosched.c | 17 | ||||
-rw-r--r-- | block/blk-cgroup.c | 122 | ||||
-rw-r--r-- | block/blk-cgroup.h | 10 | ||||
-rw-r--r-- | block/blk-flush.c | 17 | ||||
-rw-r--r-- | block/blk-iocost.c | 58 | ||||
-rw-r--r-- | block/blk-iolatency.c | 39 | ||||
-rw-r--r-- | block/blk-mq-cpumap.c | 1 | ||||
-rw-r--r-- | block/blk-mq-debugfs.c | 2 | ||||
-rw-r--r-- | block/blk-mq-pci.c | 1 | ||||
-rw-r--r-- | block/blk-mq-sched.c | 143 | ||||
-rw-r--r-- | block/blk-mq-sched.h | 7 | ||||
-rw-r--r-- | block/blk-mq-sysfs.c | 2 | ||||
-rw-r--r-- | block/blk-mq-tag.c | 2 | ||||
-rw-r--r-- | block/blk-mq-tag.h | 73 | ||||
-rw-r--r-- | block/blk-mq-virtio.c | 1 | ||||
-rw-r--r-- | block/blk-mq.c | 386 | ||||
-rw-r--r-- | block/blk-mq.h | 77 | ||||
-rw-r--r-- | block/blk-pm.c | 2 | ||||
-rw-r--r-- | block/blk-rq-qos.h | 2 | ||||
-rw-r--r-- | block/blk-stat.c | 5 | ||||
-rw-r--r-- | block/blk-sysfs.c | 1 | ||||
-rw-r--r-- | block/blk-throttle.c | 19 | ||||
-rw-r--r-- | block/blk.h | 6 | ||||
-rw-r--r-- | block/elevator.h | 4 | ||||
-rw-r--r-- | block/kyber-iosched.c | 7 | ||||
-rw-r--r-- | block/mq-deadline.c | 13 |
27 files changed, 480 insertions, 545 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 74f7d051665b..2c90e5de0acd 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -1105,9 +1105,11 @@ static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of, struct bfq_group *bfqg; u64 v; - ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx); + blkg_conf_init(&ctx, buf); + + ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, &ctx); if (ret) - return ret; + goto out; if (sscanf(ctx.body, "%llu", &v) == 1) { /* require "default" on dfl */ @@ -1129,7 +1131,7 @@ static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of, ret = 0; } out: - blkg_conf_finish(&ctx); + blkg_conf_exit(&ctx); return ret ?: nbytes; } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index d9ed3108c17a..b4c4b4808c6c 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -129,7 +129,6 @@ #include "elevator.h" #include "blk.h" #include "blk-mq.h" -#include "blk-mq-tag.h" #include "blk-mq-sched.h" #include "bfq-iosched.h" #include "blk-wbt.h" @@ -6232,7 +6231,7 @@ static inline void bfq_update_insert_stats(struct request_queue *q, static struct bfq_queue *bfq_init_rq(struct request *rq); static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bool at_head) + blk_insert_t flags) { struct request_queue *q = hctx->queue; struct bfq_data *bfqd = q->elevator->elevator_data; @@ -6255,11 +6254,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, trace_block_rq_insert(rq); - if (!bfqq || at_head) { - if (at_head) - list_add(&rq->queuelist, &bfqd->dispatch); - else - list_add_tail(&rq->queuelist, &bfqd->dispatch); + if (flags & BLK_MQ_INSERT_AT_HEAD) { + list_add(&rq->queuelist, &bfqd->dispatch); + } else if (!bfqq) { + list_add_tail(&rq->queuelist, &bfqd->dispatch); } else { idle_timer_disabled = __bfq_insert_request(bfqd, rq); /* @@ -6289,14 +6287,15 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, } static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, - struct list_head *list, bool at_head) + struct list_head *list, + blk_insert_t flags) { while (!list_empty(list)) { struct request *rq; rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); - bfq_insert_request(hctx, rq, at_head); + bfq_insert_request(hctx, rq, flags); } } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 18331cb92914..1c1ebeb51003 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -33,7 +33,6 @@ #include "blk-cgroup.h" #include "blk-ioprio.h" #include "blk-throttle.h" -#include "blk-rq-qos.h" /* * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. @@ -653,69 +652,93 @@ u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) EXPORT_SYMBOL_GPL(__blkg_prfill_u64); /** - * blkcg_conf_open_bdev - parse and open bdev for per-blkg config update - * @inputp: input string pointer + * blkg_conf_init - initialize a blkg_conf_ctx + * @ctx: blkg_conf_ctx to initialize + * @input: input string + * + * Initialize @ctx which can be used to parse blkg config input string @input. + * Once initialized, @ctx can be used with blkg_conf_open_bdev() and + * blkg_conf_prep(), and must be cleaned up with blkg_conf_exit(). + */ +void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input) +{ + *ctx = (struct blkg_conf_ctx){ .input = input }; +} +EXPORT_SYMBOL_GPL(blkg_conf_init); + +/** + * blkg_conf_open_bdev - parse and open bdev for per-blkg config update + * @ctx: blkg_conf_ctx initialized with blkg_conf_init() * - * Parse the device node prefix part, MAJ:MIN, of per-blkg config update - * from @input and get and return the matching bdev. *@inputp is - * updated to point past the device node prefix. Returns an ERR_PTR() - * value on error. + * Parse the device node prefix part, MAJ:MIN, of per-blkg config update from + * @ctx->input and get and store the matching bdev in @ctx->bdev. @ctx->body is + * set to point past the device node prefix. * - * Use this function iff blkg_conf_prep() can't be used for some reason. + * This function may be called multiple times on @ctx and the extra calls become + * NOOPs. blkg_conf_prep() implicitly calls this function. Use this function + * explicitly if bdev access is needed without resolving the blkcg / policy part + * of @ctx->input. Returns -errno on error. */ -struct block_device *blkcg_conf_open_bdev(char **inputp) +int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx) { - char *input = *inputp; + char *input = ctx->input; unsigned int major, minor; struct block_device *bdev; int key_len; + if (ctx->bdev) + return 0; + if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) - return ERR_PTR(-EINVAL); + return -EINVAL; input += key_len; if (!isspace(*input)) - return ERR_PTR(-EINVAL); + return -EINVAL; input = skip_spaces(input); bdev = blkdev_get_no_open(MKDEV(major, minor)); if (!bdev) - return ERR_PTR(-ENODEV); + return -ENODEV; if (bdev_is_partition(bdev)) { blkdev_put_no_open(bdev); - return ERR_PTR(-ENODEV); + return -ENODEV; } - *inputp = input; - return bdev; + ctx->body = input; + ctx->bdev = bdev; + return 0; } /** * blkg_conf_prep - parse and prepare for per-blkg config update * @blkcg: target block cgroup * @pol: target policy - * @input: input string - * @ctx: blkg_conf_ctx to be filled + * @ctx: blkg_conf_ctx initialized with blkg_conf_init() + * + * Parse per-blkg config update from @ctx->input and initialize @ctx + * accordingly. On success, @ctx->body points to the part of @ctx->input + * following MAJ:MIN, @ctx->bdev points to the target block device and + * @ctx->blkg to the blkg being configured. * - * Parse per-blkg config update from @input and initialize @ctx with the - * result. @ctx->blkg points to the blkg to be updated and @ctx->body the - * part of @input following MAJ:MIN. This function returns with RCU read - * lock and queue lock held and must be paired with blkg_conf_finish(). + * blkg_conf_open_bdev() may be called on @ctx beforehand. On success, this + * function returns with queue lock held and must be followed by + * blkg_conf_exit(). */ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, - char *input, struct blkg_conf_ctx *ctx) - __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock) + struct blkg_conf_ctx *ctx) + __acquires(&bdev->bd_queue->queue_lock) { - struct block_device *bdev; struct gendisk *disk; struct request_queue *q; struct blkcg_gq *blkg; int ret; - bdev = blkcg_conf_open_bdev(&input); - if (IS_ERR(bdev)) - return PTR_ERR(bdev); - disk = bdev->bd_disk; + ret = blkg_conf_open_bdev(ctx); + if (ret) + return ret; + + disk = ctx->bdev->bd_disk; q = disk->queue; /* @@ -726,7 +749,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, if (ret) goto fail; - rcu_read_lock(); spin_lock_irq(&q->queue_lock); if (!blkcg_policy_enabled(q, pol)) { @@ -755,7 +777,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, /* Drop locks to do new blkg allocation with GFP_KERNEL. */ spin_unlock_irq(&q->queue_lock); - rcu_read_unlock(); new_blkg = blkg_alloc(pos, disk, GFP_KERNEL); if (unlikely(!new_blkg)) { @@ -769,7 +790,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, goto fail_exit_queue; } - rcu_read_lock(); spin_lock_irq(&q->queue_lock); if (!blkcg_policy_enabled(q, pol)) { @@ -796,20 +816,16 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, } success: blk_queue_exit(q); - ctx->bdev = bdev; ctx->blkg = blkg; - ctx->body = input; return 0; fail_preloaded: radix_tree_preload_end(); fail_unlock: spin_unlock_irq(&q->queue_lock); - rcu_read_unlock(); fail_exit_queue: blk_queue_exit(q); fail: - blkdev_put_no_open(bdev); /* * If queue was bypassing, we should retry. Do so after a * short msleep(). It isn't strictly necessary but queue @@ -825,20 +841,27 @@ fail: EXPORT_SYMBOL_GPL(blkg_conf_prep); /** - * blkg_conf_finish - finish up per-blkg config update - * @ctx: blkg_conf_ctx initialized by blkg_conf_prep() + * blkg_conf_exit - clean up per-blkg config update + * @ctx: blkg_conf_ctx initialized with blkg_conf_init() * - * Finish up after per-blkg config update. This function must be paired - * with blkg_conf_prep(). + * Clean up after per-blkg config update. This function must be called on all + * blkg_conf_ctx's initialized with blkg_conf_init(). */ -void blkg_conf_finish(struct blkg_conf_ctx *ctx) - __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu) +void blkg_conf_exit(struct blkg_conf_ctx *ctx) + __releases(&ctx->bdev->bd_queue->queue_lock) { - spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); - rcu_read_unlock(); - blkdev_put_no_open(ctx->bdev); + if (ctx->blkg) { + spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); + ctx->blkg = NULL; + } + + if (ctx->bdev) { + blkdev_put_no_open(ctx->bdev); + ctx->body = NULL; + ctx->bdev = NULL; + } } -EXPORT_SYMBOL_GPL(blkg_conf_finish); +EXPORT_SYMBOL_GPL(blkg_conf_exit); static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src) { @@ -1326,14 +1349,8 @@ int blkcg_init_disk(struct gendisk *disk) if (ret) goto err_ioprio_exit; - ret = blk_iolatency_init(disk); - if (ret) - goto err_throtl_exit; - return 0; -err_throtl_exit: - blk_throtl_exit(disk); err_ioprio_exit: blk_ioprio_exit(disk); err_destroy_all: @@ -1349,7 +1366,6 @@ err_unlock: void blkcg_exit_disk(struct gendisk *disk) { blkg_destroy_all(disk); - rq_qos_exit(disk->queue); blk_throtl_exit(disk); } diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 2c6788658544..d6ad3abc6eca 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -206,15 +206,17 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); struct blkg_conf_ctx { + char *input; + char *body; struct block_device *bdev; struct blkcg_gq *blkg; - char *body; }; -struct block_device *blkcg_conf_open_bdev(char **inputp); +void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input); +int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx); int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, - char *input, struct blkg_conf_ctx *ctx); -void blkg_conf_finish(struct blkg_conf_ctx *ctx); + struct blkg_conf_ctx *ctx); +void blkg_conf_exit(struct blkg_conf_ctx *ctx); /** * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg diff --git a/block/blk-flush.c b/block/blk-flush.c index 53202eff545e..00dd2f61312d 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -68,12 +68,10 @@ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/gfp.h> -#include <linux/blk-mq.h> #include <linux/part_stat.h> #include "blk.h" #include "blk-mq.h" -#include "blk-mq-tag.h" #include "blk-mq-sched.h" /* PREFLUSH/FUA sequences */ @@ -138,11 +136,6 @@ static void blk_flush_restore_request(struct request *rq) rq->end_io = rq->flush.saved_end_io; } -static void blk_flush_queue_rq(struct request *rq, bool add_front) -{ - blk_mq_add_to_requeue_list(rq, add_front, true); -} - static void blk_account_io_flush(struct request *rq) { struct block_device *part = rq->q->disk->part0; @@ -195,7 +188,8 @@ static void blk_flush_complete_seq(struct request *rq, case REQ_FSEQ_DATA: list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); - blk_flush_queue_rq(rq, true); + blk_mq_add_to_requeue_list(rq, BLK_MQ_INSERT_AT_HEAD); + blk_mq_kick_requeue_list(q); break; case REQ_FSEQ_DONE: @@ -352,7 +346,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, smp_wmb(); req_ref_set(flush_rq, 1); - blk_flush_queue_rq(flush_rq, false); + blk_mq_add_to_requeue_list(flush_rq, BLK_MQ_INSERT_AT_HEAD); + blk_mq_kick_requeue_list(q); } static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq, @@ -396,6 +391,7 @@ void blk_insert_flush(struct request *rq) unsigned long fflags = q->queue_flags; /* may change, cache */ unsigned int policy = blk_flush_policy(fflags, rq); struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; /* * @policy now records what operations need to be done. Adjust @@ -432,7 +428,8 @@ void blk_insert_flush(struct request *rq) */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { - blk_mq_request_bypass_insert(rq, false, true); + blk_mq_request_bypass_insert(rq, 0); + blk_mq_run_hw_queue(hctx, false); return; } diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 4442c7a85112..285ced3467ab 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -3106,9 +3106,11 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, return nbytes; } - ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx); + blkg_conf_init(&ctx, buf); + + ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, &ctx); if (ret) - return ret; + goto err; iocg = blkg_to_iocg(ctx.blkg); @@ -3127,12 +3129,14 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, weight_updated(iocg, &now); spin_unlock(&iocg->ioc->lock); - blkg_conf_finish(&ctx); + blkg_conf_exit(&ctx); return nbytes; einval: - blkg_conf_finish(&ctx); - return -EINVAL; + ret = -EINVAL; +err: + blkg_conf_exit(&ctx); + return ret; } static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd, @@ -3189,19 +3193,22 @@ static const match_table_t qos_tokens = { static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input, size_t nbytes, loff_t off) { - struct block_device *bdev; + struct blkg_conf_ctx ctx; struct gendisk *disk; struct ioc *ioc; u32 qos[NR_QOS_PARAMS]; bool enable, user; - char *p; + char *body, *p; int ret; - bdev = blkcg_conf_open_bdev(&input); - if (IS_ERR(bdev)) - return PTR_ERR(bdev); + blkg_conf_init(&ctx, input); - disk = bdev->bd_disk; + ret = blkg_conf_open_bdev(&ctx); + if (ret) + goto err; + + body = ctx.body; + disk = ctx.bdev->bd_disk; if (!queue_is_mq(disk->queue)) { ret = -EOPNOTSUPP; goto err; @@ -3223,7 +3230,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input, enable = ioc->enabled; user = ioc->user_qos_params; - while ((p = strsep(&input, " \t\n"))) { + while ((p = strsep(&body, " \t\n"))) { substring_t args[MAX_OPT_ARGS]; char buf[32]; int tok; @@ -3313,7 +3320,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input, blk_mq_unquiesce_queue(disk->queue); blk_mq_unfreeze_queue(disk->queue); - blkdev_put_no_open(bdev); + blkg_conf_exit(&ctx); return nbytes; einval: spin_unlock_irq(&ioc->lock); @@ -3323,7 +3330,7 @@ einval: ret = -EINVAL; err: - blkdev_put_no_open(bdev); + blkg_conf_exit(&ctx); return ret; } @@ -3376,19 +3383,22 @@ static const match_table_t i_lcoef_tokens = { static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, size_t nbytes, loff_t off) { - struct block_device *bdev; + struct blkg_conf_ctx ctx; struct request_queue *q; struct ioc *ioc; u64 u[NR_I_LCOEFS]; bool user; - char *p; + char *body, *p; int ret; - bdev = blkcg_conf_open_bdev(&input); - if (IS_ERR(bdev)) - return PTR_ERR(bdev); + blkg_conf_init(&ctx, input); + + ret = blkg_conf_open_bdev(&ctx); + if (ret) + goto err; - q = bdev_get_queue(bdev); + body = ctx.body; + q = bdev_get_queue(ctx.bdev); if (!queue_is_mq(q)) { ret = -EOPNOTSUPP; goto err; @@ -3396,7 +3406,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, ioc = q_to_ioc(q); if (!ioc) { - ret = blk_iocost_init(bdev->bd_disk); + ret = blk_iocost_init(ctx.bdev->bd_disk); if (ret) goto err; ioc = q_to_ioc(q); @@ -3409,7 +3419,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, memcpy(u, ioc->params.i_lcoefs, sizeof(u)); user = ioc->user_cost_model; - while ((p = strsep(&input, " \t\n"))) { + while ((p = strsep(&body, " \t\n"))) { substring_t args[MAX_OPT_ARGS]; char buf[32]; int tok; @@ -3456,7 +3466,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, blk_mq_unquiesce_queue(q); blk_mq_unfreeze_queue(q); - blkdev_put_no_open(bdev); + blkg_conf_exit(&ctx); return nbytes; einval: @@ -3467,7 +3477,7 @@ einval: ret = -EINVAL; err: - blkdev_put_no_open(bdev); + blkg_conf_exit(&ctx); return ret; } diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 0dc910568b31..fd5fec989e39 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -755,7 +755,7 @@ static void blkiolatency_enable_work_fn(struct work_struct *work) } } -int blk_iolatency_init(struct gendisk *disk) +static int blk_iolatency_init(struct gendisk *disk) { struct blk_iolatency *blkiolat; int ret; @@ -824,6 +824,29 @@ static void iolatency_clear_scaling(struct blkcg_gq *blkg) } } +static int blk_iolatency_try_init(struct blkg_conf_ctx *ctx) +{ + static DEFINE_MUTEX(init_mutex); + int ret; + + ret = blkg_conf_open_bdev(ctx); + if (ret) + return ret; + + /* + * blk_iolatency_init() may fail after rq_qos_add() succeeds which can + * confuse iolat_rq_qos() test. Make the test and init atomic. + */ + mutex_lock(&init_mutex); + + if (!iolat_rq_qos(ctx->bdev->bd_queue)) + ret = blk_iolatency_init(ctx->bdev->bd_disk); + + mutex_unlock(&init_mutex); + + return ret; +} + static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { @@ -836,9 +859,15 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, u64 oldval; int ret; - ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); + blkg_conf_init(&ctx, buf); + + ret = blk_iolatency_try_init(&ctx); if (ret) - return ret; + goto out; + + ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, &ctx); + if (ret) + goto out; iolat = blkg_to_lat(ctx.blkg); p = ctx.body; @@ -874,7 +903,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, iolatency_clear_scaling(blkg); ret = 0; out: - blkg_conf_finish(&ctx); + blkg_conf_exit(&ctx); return ret ?: nbytes; } @@ -967,7 +996,7 @@ static void iolatency_pd_init(struct blkg_policy_data *pd) { struct iolatency_grp *iolat = pd_to_lat(pd); struct blkcg_gq *blkg = lat_to_blkg(iolat); - struct rq_qos *rqos = blkcg_rq_qos(blkg->q); + struct rq_qos *rqos = iolat_rq_qos(blkg->q); struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); u64 now = ktime_to_ns(ktime_get()); int cpu; diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 0c612c19feb8..9638b25fd521 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -12,7 +12,6 @@ #include <linux/cpu.h> #include <linux/group_cpus.h> -#include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 212a7f301e73..d23a8554ec4a 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -7,12 +7,10 @@ #include <linux/blkdev.h> #include <linux/debugfs.h> -#include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-mq-sched.h" -#include "blk-mq-tag.h" #include "blk-rq-qos.h" static int queue_poll_stat_show(void *data, struct seq_file *m) diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c index a90b88fd1332..d47b5c73c9eb 100644 --- a/block/blk-mq-pci.c +++ b/block/blk-mq-pci.c @@ -4,7 +4,6 @@ */ #include <linux/kobject.h> #include <linux/blkdev.h> -#include <linux/blk-mq.h> #include <linux/blk-mq-pci.h> #include <linux/pci.h> #include <linux/module.h> diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 06b312c69114..67c95f31b15b 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -6,7 +6,6 @@ */ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/blk-mq.h> #include <linux/list_sort.h> #include <trace/events/block.h> @@ -15,7 +14,6 @@ #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-mq-sched.h" -#include "blk-mq-tag.h" #include "blk-wbt.h" /* @@ -271,9 +269,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) { - struct request_queue *q = hctx->queue; - const bool has_sched = q->elevator; - int ret = 0; + bool need_dispatch = false; LIST_HEAD(rq_list); /* @@ -302,23 +298,22 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); - if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { - if (has_sched) - ret = blk_mq_do_dispatch_sched(hctx); - else - ret = blk_mq_do_dispatch_ctx(hctx); - } - } else if (has_sched) { - ret = blk_mq_do_dispatch_sched(hctx); - } else if (hctx->dispatch_busy) { - /* dequeue request one by one from sw queue if queue is busy */ - ret = blk_mq_do_dispatch_ctx(hctx); + if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) + return 0; + need_dispatch = true; } else { - blk_mq_flush_busy_ctxs(hctx, &rq_list); - blk_mq_dispatch_rq_list(hctx, &rq_list, 0); + need_dispatch = hctx->dispatch_busy; } - return ret; + if (hctx->queue->elevator) + return blk_mq_do_dispatch_sched(hctx); + + /* dequeue request one by one from sw queue if queue is busy */ + if (need_dispatch) + return blk_mq_do_dispatch_ctx(hctx); + blk_mq_flush_busy_ctxs(hctx, &rq_list); + blk_mq_dispatch_rq_list(hctx, &rq_list, 0); + return 0; } void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) @@ -384,116 +379,6 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); -static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, - struct request *rq) -{ - /* - * dispatch flush and passthrough rq directly - * - * passthrough request has to be added to hctx->dispatch directly. - * For some reason, device may be in one situation which can't - * handle FS request, so STS_RESOURCE is always returned and the - * FS request will be added to hctx->dispatch. However passthrough - * request may be required at that time for fixing the problem. If - * passthrough request is added to scheduler queue, there isn't any - * chance to dispatch it given we prioritize requests in hctx->dispatch. - */ - if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) - return true; - - return false; -} - -void blk_mq_sched_insert_request(struct request *rq, bool at_head, - bool run_queue, bool async) -{ - struct request_queue *q = rq->q; - struct elevator_queue *e = q->elevator; - struct blk_mq_ctx *ctx = rq->mq_ctx; - struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - - WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG)); - - if (blk_mq_sched_bypass_insert(hctx, rq)) { - /* - * Firstly normal IO request is inserted to scheduler queue or - * sw queue, meantime we add flush request to dispatch queue( - * hctx->dispatch) directly and there is at most one in-flight - * flush request for each hw queue, so it doesn't matter to add - * flush request to tail or front of the dispatch queue. - * - * Secondly in case of NCQ, flush request belongs to non-NCQ - * command, and queueing it will fail when there is any - * in-flight normal IO request(NCQ command). When adding flush - * rq to the front of hctx->dispatch, it is easier to introduce - * extra time to flush rq's latency because of S_SCHED_RESTART - * compared with adding to the tail of dispatch queue, then - * chance of flush merge is increased, and less flush requests - * will be issued to controller. It is observed that ~10% time - * is saved in blktests block/004 on disk attached to AHCI/NCQ - * drive when adding flush rq to the front of hctx->dispatch. - * - * Simply queue flush rq to the front of hctx->dispatch so that - * intensive flush workloads can benefit in case of NCQ HW. - */ - at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; - blk_mq_request_bypass_insert(rq, at_head, false); - goto run; - } - - if (e) { - LIST_HEAD(list); - - list_add(&rq->queuelist, &list); - e->type->ops.insert_requests(hctx, &list, at_head); - } else { - spin_lock(&ctx->lock); - __blk_mq_insert_request(hctx, rq, at_head); - spin_unlock(&ctx->lock); - } - -run: - if (run_queue) - blk_mq_run_hw_queue(hctx, async); -} - -void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async) -{ - struct elevator_queue *e; - struct request_queue *q = hctx->queue; - - /* - * blk_mq_sched_insert_requests() is called from flush plug - * context only, and hold one usage counter to prevent queue - * from being released. - */ - percpu_ref_get(&q->q_usage_counter); - - e = hctx->queue->elevator; - if (e) { - e->type->ops.insert_requests(hctx, list, false); - } else { - /* - * try to issue requests directly if the hw queue isn't - * busy in case of 'none' scheduler, and this way may save - * us one extra enqueue & dequeue to sw queue. - */ - if (!hctx->dispatch_busy && !run_queue_async) { - blk_mq_run_dispatch_ops(hctx->queue, - blk_mq_try_issue_list_directly(hctx, list)); - if (list_empty(list)) - goto out; - } - blk_mq_insert_requests(hctx, ctx, list); - } - - blk_mq_run_hw_queue(hctx, run_queue_async); - out: - percpu_ref_put(&q->q_usage_counter); -} - static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 025013972453..7c3cbad17f30 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -4,7 +4,6 @@ #include "elevator.h" #include "blk-mq.h" -#include "blk-mq-tag.h" #define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ) @@ -17,12 +16,6 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); -void blk_mq_sched_insert_request(struct request *rq, bool at_head, - bool run_queue, bool async); -void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, - struct list_head *list, bool run_queue_async); - void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 1b2b0d258e46..156e9bb07abf 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -10,10 +10,8 @@ #include <linux/workqueue.h> #include <linux/smp.h> -#include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" -#include "blk-mq-tag.h" static void blk_mq_sysfs_release(struct kobject *kobj) { diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 9eb968e14d31..d6af9d431dc6 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -9,12 +9,10 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/blk-mq.h> #include <linux/delay.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-sched.h" -#include "blk-mq-tag.h" /* * Recalculate wakeup batch when tag is shared by hctx. diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h deleted file mode 100644 index 91ff37e3b43d..000000000000 --- a/block/blk-mq-tag.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef INT_BLK_MQ_TAG_H -#define INT_BLK_MQ_TAG_H - -struct blk_mq_alloc_data; - -extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, - unsigned int reserved_tags, - int node, int alloc_policy); -extern void blk_mq_free_tags(struct blk_mq_tags *tags); -extern int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, - struct sbitmap_queue *breserved_tags, - unsigned int queue_depth, - unsigned int reserved, - int node, int alloc_policy); - -extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); -unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, - unsigned int *offset); -extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, - unsigned int tag); -void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); -extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, - struct blk_mq_tags **tags, - unsigned int depth, bool can_grow); -extern void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, - unsigned int size); -extern void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); - -extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); -void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, - void *priv); -void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, - void *priv); - -static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt, - struct blk_mq_hw_ctx *hctx) -{ - if (!hctx) - return &bt->ws[0]; - return sbq_wait_ptr(bt, &hctx->wait_index); -} - -enum { - BLK_MQ_NO_TAG = -1U, - BLK_MQ_TAG_MIN = 1, - BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, -}; - -extern void __blk_mq_tag_busy(struct blk_mq_hw_ctx *); -extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); - -static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) -{ - if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) - __blk_mq_tag_busy(hctx); -} - -static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) -{ - if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) - return; - - __blk_mq_tag_idle(hctx); -} - -static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, - unsigned int tag) -{ - return tag < tags->nr_reserved_tags; -} - -#endif diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c index 6589f076a096..68d0945c0b08 100644 --- a/block/blk-mq-virtio.c +++ b/block/blk-mq-virtio.c @@ -3,7 +3,6 @@ * Copyright (c) 2016 Christoph Hellwig. */ #include <linux/device.h> -#include <linux/blk-mq.h> #include <linux/blk-mq-virtio.h> #include <linux/virtio_config.h> #include <linux/module.h> diff --git a/block/blk-mq.c b/block/blk-mq.c index 52f8e0099c7f..c2d297efe229 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -32,12 +32,10 @@ #include <trace/events/block.h> -#include <linux/blk-mq.h> #include <linux/t10-pi.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" -#include "blk-mq-tag.h" #include "blk-pm.h" #include "blk-stat.h" #include "blk-mq-sched.h" @@ -46,6 +44,10 @@ static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); +static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); +static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, + struct list_head *list); + static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, blk_qc_t qc) { @@ -1289,6 +1291,8 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) */ void blk_execute_rq_nowait(struct request *rq, bool at_head) { + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; + WARN_ON(irqs_disabled()); WARN_ON(!blk_rq_is_passthrough(rq)); @@ -1299,10 +1303,13 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head) * device, directly accessing the plug instead of using blk_mq_plug() * should not have any consequences. */ - if (current->plug) + if (current->plug && !at_head) { blk_add_rq_to_plug(current->plug, rq); - else - blk_mq_sched_insert_request(rq, at_head, true, false); + return; + } + + blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); + blk_mq_run_hw_queue(hctx, false); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); @@ -1352,6 +1359,7 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait) */ blk_status_t blk_execute_rq(struct request *rq, bool at_head) { + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_rq_wait wait = { .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), }; @@ -1363,7 +1371,8 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head) rq->end_io = blk_end_sync_rq; blk_account_io_start(rq); - blk_mq_sched_insert_request(rq, at_head, true, false); + blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); + blk_mq_run_hw_queue(hctx, false); if (blk_rq_is_poll(rq)) { blk_rq_poll_completion(rq, &wait.done); @@ -1403,12 +1412,17 @@ static void __blk_mq_requeue_request(struct request *rq) void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) { + struct request_queue *q = rq->q; + __blk_mq_requeue_request(rq); /* this request will be re-inserted to io scheduler queue */ blk_mq_sched_requeue_request(rq); - blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); + blk_mq_add_to_requeue_list(rq, BLK_MQ_INSERT_AT_HEAD); + + if (kick_requeue_list) + blk_mq_kick_requeue_list(q); } EXPORT_SYMBOL(blk_mq_requeue_request); @@ -1424,33 +1438,33 @@ static void blk_mq_requeue_work(struct work_struct *work) spin_unlock_irq(&q->requeue_lock); list_for_each_entry_safe(rq, next, &rq_list, queuelist) { - if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) - continue; - - rq->rq_flags &= ~RQF_SOFTBARRIER; - list_del_init(&rq->queuelist); /* - * If RQF_DONTPREP, rq has contained some driver specific - * data, so insert it to hctx dispatch list to avoid any - * merge. + * If RQF_DONTPREP ist set, the request has been started by the + * driver already and might have driver-specific data allocated + * already. Insert it into the hctx dispatch list to avoid + * block layer merges for the request. */ - if (rq->rq_flags & RQF_DONTPREP) - blk_mq_request_bypass_insert(rq, false, false); - else - blk_mq_sched_insert_request(rq, true, false, false); + if (rq->rq_flags & RQF_DONTPREP) { + rq->rq_flags &= ~RQF_SOFTBARRIER; + list_del_init(&rq->queuelist); + blk_mq_request_bypass_insert(rq, 0); + } else if (rq->rq_flags & RQF_SOFTBARRIER) { + rq->rq_flags &= ~RQF_SOFTBARRIER; + list_del_init(&rq->queuelist); + blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD); + } } while (!list_empty(&rq_list)) { rq = list_entry(rq_list.next, struct request, queuelist); list_del_init(&rq->queuelist); - blk_mq_sched_insert_request(rq, false, false, false); + blk_mq_insert_request(rq, 0); } blk_mq_run_hw_queues(q, false); } -void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, - bool kick_requeue_list) +void blk_mq_add_to_requeue_list(struct request *rq, blk_insert_t insert_flags) { struct request_queue *q = rq->q; unsigned long flags; @@ -1462,16 +1476,13 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); spin_lock_irqsave(&q->requeue_lock, flags); - if (at_head) { + if (insert_flags & BLK_MQ_INSERT_AT_HEAD) { rq->rq_flags |= RQF_SOFTBARRIER; list_add(&rq->queuelist, &q->requeue_list); } else { list_add_tail(&rq->queuelist, &q->requeue_list); } spin_unlock_irqrestore(&q->requeue_lock, flags); - - if (kick_requeue_list) - blk_mq_kick_requeue_list(q); } void blk_mq_kick_requeue_list(struct request_queue *q) @@ -2127,24 +2138,6 @@ out: return true; } -/** - * __blk_mq_run_hw_queue - Run a hardware queue. - * @hctx: Pointer to the hardware queue to run. - * - * Send pending requests to the hardware. - */ -static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) -{ - /* - * We can't run the queue inline with ints disabled. Ensure that - * we catch bad users of this early. - */ - WARN_ON_ONCE(in_interrupt()); - - blk_mq_run_dispatch_ops(hctx->queue, - blk_mq_sched_dispatch_requests(hctx)); -} - static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) { int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); @@ -2201,42 +2194,19 @@ select_cpu: } /** - * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue. + * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. * @hctx: Pointer to the hardware queue to run. - * @async: If we want to run the queue asynchronously. * @msecs: Milliseconds of delay to wait before running the queue. * - * If !@async, try to run the queue now. Else, run the queue asynchronously and - * with a delay of @msecs. + * Run a hardware queue asynchronously with a delay of @msecs. */ -static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, - unsigned long msecs) +void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) { if (unlikely(blk_mq_hctx_stopped(hctx))) return; - - if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { - if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { - __blk_mq_run_hw_queue(hctx); - return; - } - } - kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, msecs_to_jiffies(msecs)); } - -/** - * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. - * @hctx: Pointer to the hardware queue to run. - * @msecs: Milliseconds of delay to wait before running the queue. - * - * Run a hardware queue asynchronously with a delay of @msecs. - */ -void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) -{ - __blk_mq_delay_run_hw_queue(hctx, true, msecs); -} EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); /** @@ -2253,6 +2223,11 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) bool need_run; /* + * We can't run the queue inline with interrupts disabled. + */ + WARN_ON_ONCE(!async && in_interrupt()); + + /* * When queue is quiesced, we may be switching io scheduler, or * updating nr_hw_queues, or other things, and we can't run queue * any more, even __blk_mq_hctx_has_pending() can't be called safely. @@ -2264,8 +2239,17 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) need_run = !blk_queue_quiesced(hctx->queue) && blk_mq_hctx_has_pending(hctx)); - if (need_run) - __blk_mq_delay_run_hw_queue(hctx, async, 0); + if (!need_run) + return; + + if (async || (hctx->flags & BLK_MQ_F_BLOCKING) || + !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { + blk_mq_delay_run_hw_queue(hctx, 0); + return; + } + + blk_mq_run_dispatch_ops(hctx->queue, + blk_mq_sched_dispatch_requests(hctx)); } EXPORT_SYMBOL(blk_mq_run_hw_queue); @@ -2430,80 +2414,52 @@ EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); static void blk_mq_run_work_fn(struct work_struct *work) { - struct blk_mq_hw_ctx *hctx; - - hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); - - /* - * If we are stopped, don't run the queue. - */ - if (blk_mq_hctx_stopped(hctx)) - return; - - __blk_mq_run_hw_queue(hctx); -} - -static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, - struct request *rq, - bool at_head) -{ - struct blk_mq_ctx *ctx = rq->mq_ctx; - enum hctx_type type = hctx->type; + struct blk_mq_hw_ctx *hctx = + container_of(work, struct blk_mq_hw_ctx, run_work.work); - lockdep_assert_held(&ctx->lock); - - trace_block_rq_insert(rq); - - if (at_head) - list_add(&rq->queuelist, &ctx->rq_lists[type]); - else - list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); -} - -void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bool at_head) -{ - struct blk_mq_ctx *ctx = rq->mq_ctx; - - lockdep_assert_held(&ctx->lock); - - __blk_mq_insert_req_list(hctx, rq, at_head); - blk_mq_hctx_mark_pending(hctx, ctx); + blk_mq_run_dispatch_ops(hctx->queue, + blk_mq_sched_dispatch_requests(hctx)); } /** * blk_mq_request_bypass_insert - Insert a request at dispatch list. * @rq: Pointer to request to be inserted. - * @at_head: true if the request should be inserted at the head of the list. - * @run_queue: If we should run the hardware queue after inserting the request. + * @flags: BLK_MQ_INSERT_* * * Should only be used carefully, when the caller knows we want to * bypass a potential IO scheduler on the target device. */ -void blk_mq_request_bypass_insert(struct request *rq, bool at_head, - bool run_queue) +void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; spin_lock(&hctx->lock); - if (at_head) + if (flags & BLK_MQ_INSERT_AT_HEAD) list_add(&rq->queuelist, &hctx->dispatch); else list_add_tail(&rq->queuelist, &hctx->dispatch); spin_unlock(&hctx->lock); - - if (run_queue) - blk_mq_run_hw_queue(hctx, false); } -void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list) - +static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, struct list_head *list, + bool run_queue_async) { struct request *rq; enum hctx_type type = hctx->type; /* + * Try to issue requests directly if the hw queue isn't busy to save an + * extra enqueue & dequeue to the sw queue. + */ + if (!hctx->dispatch_busy && !run_queue_async) { + blk_mq_run_dispatch_ops(hctx->queue, + blk_mq_try_issue_list_directly(hctx, list)); + if (list_empty(list)) + goto out; + } + + /* * preemption doesn't flush plug list, so it's possible ctx->cpu is * offline now */ @@ -2516,6 +2472,70 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, list_splice_tail_init(list, &ctx->rq_lists[type]); blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); +out: + blk_mq_run_hw_queue(hctx, run_queue_async); +} + +static void blk_mq_insert_request(struct request *rq, blk_insert_t flags) +{ + struct request_queue *q = rq->q; + struct blk_mq_ctx *ctx = rq->mq_ctx; + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; + + if (blk_rq_is_passthrough(rq)) { + /* + * Passthrough request have to be added to hctx->dispatch + * directly. The device may be in a situation where it can't + * handle FS request, and always returns BLK_STS_RESOURCE for + * them, which gets them added to hctx->dispatch. + * + * If a passthrough request is required to unblock the queues, + * and it is added to the scheduler queue, there is no chance to + * dispatch it given we prioritize requests in hctx->dispatch. + */ + blk_mq_request_bypass_insert(rq, flags); + } else if (rq->rq_flags & RQF_FLUSH_SEQ) { + /* + * Firstly normal IO request is inserted to scheduler queue or + * sw queue, meantime we add flush request to dispatch queue( + * hctx->dispatch) directly and there is at most one in-flight + * flush request for each hw queue, so it doesn't matter to add + * flush request to tail or front of the dispatch queue. + * + * Secondly in case of NCQ, flush request belongs to non-NCQ + * command, and queueing it will fail when there is any + * in-flight normal IO request(NCQ command). When adding flush + * rq to the front of hctx->dispatch, it is easier to introduce + * extra time to flush rq's latency because of S_SCHED_RESTART + * compared with adding to the tail of dispatch queue, then + * chance of flush merge is increased, and less flush requests + * will be issued to controller. It is observed that ~10% time + * is saved in blktests block/004 on disk attached to AHCI/NCQ + * drive when adding flush rq to the front of hctx->dispatch. + * + * Simply queue flush rq to the front of hctx->dispatch so that + * intensive flush workloads can benefit in case of NCQ HW. + */ + blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD); + } else if (q->elevator) { + LIST_HEAD(list); + + WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG); + + list_add(&rq->queuelist, &list); + q->elevator->type->ops.insert_requests(hctx, &list, flags); + } else { + trace_block_rq_insert(rq); + + spin_lock(&ctx->lock); + if (flags & BLK_MQ_INSERT_AT_HEAD) + list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]); + else + list_add_tail(&rq->queuelist, + &ctx->rq_lists[hctx->type]); + blk_mq_hctx_mark_pending(hctx, ctx); + spin_unlock(&ctx->lock); + } } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, @@ -2569,49 +2589,19 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, return ret; } -static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, - struct request *rq, - bool bypass_insert, bool last) +static bool blk_mq_get_budget_and_tag(struct request *rq) { - struct request_queue *q = rq->q; - bool run_queue = true; int budget_token; - /* - * RCU or SRCU read lock is needed before checking quiesced flag. - * - * When queue is stopped or quiesced, ignore 'bypass_insert' from - * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller, - * and avoid driver to try to dispatch again. - */ - if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { - run_queue = false; - bypass_insert = false; - goto insert; - } - - if ((rq->rq_flags & RQF_ELV) && !bypass_insert) - goto insert; - - budget_token = blk_mq_get_dispatch_budget(q); + budget_token = blk_mq_get_dispatch_budget(rq->q); if (budget_token < 0) - goto insert; - + return false; blk_mq_set_rq_budget_token(rq, budget_token); - if (!blk_mq_get_driver_tag(rq)) { - blk_mq_put_dispatch_budget(q, budget_token); - goto insert; + blk_mq_put_dispatch_budget(rq->q, budget_token); + return false; } - - return __blk_mq_issue_directly(hctx, rq, last); -insert: - if (bypass_insert) - return BLK_STS_RESOURCE; - - blk_mq_sched_insert_request(rq, false, run_queue, false); - - return BLK_STS_OK; + return true; } /** @@ -2627,18 +2617,46 @@ insert: static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq) { - blk_status_t ret = - __blk_mq_try_issue_directly(hctx, rq, false, true); + blk_status_t ret; + + if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { + blk_mq_insert_request(rq, 0); + return; + } - if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) - blk_mq_request_bypass_insert(rq, false, true); - else if (ret != BLK_STS_OK) + if ((rq->rq_flags & RQF_ELV) || !blk_mq_get_budget_and_tag(rq)) { + blk_mq_insert_request(rq, 0); + blk_mq_run_hw_queue(hctx, false); + return; + } + + ret = __blk_mq_issue_directly(hctx, rq, true); + switch (ret) { + case BLK_STS_OK: + break; + case BLK_STS_RESOURCE: + case BLK_STS_DEV_RESOURCE: + blk_mq_request_bypass_insert(rq, 0); + blk_mq_run_hw_queue(hctx, false); + break; + default: blk_mq_end_request(rq, ret); + break; + } } static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) { - return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last); + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; + + if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { + blk_mq_insert_request(rq, 0); + return BLK_STS_OK; + } + + if (!blk_mq_get_budget_and_tag(rq)) + return BLK_STS_RESOURCE; + return __blk_mq_issue_directly(hctx, rq, last); } static void blk_mq_plug_issue_direct(struct blk_plug *plug) @@ -2666,7 +2684,8 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug) break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: - blk_mq_request_bypass_insert(rq, false, true); + blk_mq_request_bypass_insert(rq, 0); + blk_mq_run_hw_queue(hctx, false); goto out; default: blk_mq_end_request(rq, ret); @@ -2711,7 +2730,16 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) plug->mq_list = requeue_list; trace_block_unplug(this_hctx->queue, depth, !from_sched); - blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched); + + percpu_ref_get(&this_hctx->queue->q_usage_counter); + if (this_hctx->queue->elevator) { + this_hctx->queue->elevator->type->ops.insert_requests(this_hctx, + &list, 0); + blk_mq_run_hw_queue(this_hctx, from_sched); + } else { + blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched); + } + percpu_ref_put(&this_hctx->queue->q_usage_counter); } void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) @@ -2757,7 +2785,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) } while (!rq_list_empty(plug->mq_list)); } -void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, +static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list) { int queued = 0; @@ -2775,8 +2803,9 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: - blk_mq_request_bypass_insert(rq, false, - list_empty(list)); + blk_mq_request_bypass_insert(rq, 0); + if (list_empty(list)) + blk_mq_run_hw_queue(hctx, false); goto out; default: blk_mq_end_request(rq, ret); @@ -2903,6 +2932,7 @@ void blk_mq_submit_bio(struct bio *bio) struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct blk_plug *plug = blk_mq_plug(bio); const int is_sync = op_is_sync(bio->bi_opf); + struct blk_mq_hw_ctx *hctx; struct request *rq; unsigned int nr_segs = 1; blk_status_t ret; @@ -2947,15 +2977,19 @@ void blk_mq_submit_bio(struct bio *bio) return; } - if (plug) + if (plug) { blk_add_rq_to_plug(plug, rq); - else if ((rq->rq_flags & RQF_ELV) || - (rq->mq_hctx->dispatch_busy && - (q->nr_hw_queues == 1 || !is_sync))) - blk_mq_sched_insert_request(rq, false, true, true); - else - blk_mq_run_dispatch_ops(rq->q, - blk_mq_try_issue_directly(rq->mq_hctx, rq)); + return; + } + + hctx = rq->mq_hctx; + if ((rq->rq_flags & RQF_ELV) || + (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) { + blk_mq_insert_request(rq, 0); + blk_mq_run_hw_queue(hctx, true); + } else { + blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq)); + } } #ifdef CONFIG_BLK_MQ_STACKING diff --git a/block/blk-mq.h b/block/blk-mq.h index ef59fee62780..f882677ff106 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -2,8 +2,8 @@ #ifndef INT_BLK_MQ_H #define INT_BLK_MQ_H +#include <linux/blk-mq.h> #include "blk-stat.h" -#include "blk-mq-tag.h" struct blk_mq_tag_set; @@ -30,6 +30,15 @@ struct blk_mq_ctx { struct kobject kobj; } ____cacheline_aligned_in_smp; +enum { + BLK_MQ_NO_TAG = -1U, + BLK_MQ_TAG_MIN = 1, + BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, +}; + +typedef unsigned int __bitwise blk_insert_t; +#define BLK_MQ_INSERT_AT_HEAD ((__force blk_insert_t)0x01) + void blk_mq_submit_bio(struct bio *bio); int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, unsigned int flags); @@ -38,8 +47,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, unsigned int); -void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, - bool kick_requeue_list); +void blk_mq_add_to_requeue_list(struct request *rq, blk_insert_t insert_flags); void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start); @@ -59,14 +67,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, /* * Internal helpers for request insertion into sw queues */ -void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bool at_head); -void blk_mq_request_bypass_insert(struct request *rq, bool at_head, - bool run_queue); -void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, - struct list_head *list); -void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, - struct list_head *list); +void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags); /* * CPU -> queue mappings @@ -164,6 +165,60 @@ struct blk_mq_alloc_data { struct blk_mq_hw_ctx *hctx; }; +struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, + unsigned int reserved_tags, int node, int alloc_policy); +void blk_mq_free_tags(struct blk_mq_tags *tags); +int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, + struct sbitmap_queue *breserved_tags, unsigned int queue_depth, + unsigned int reserved, int node, int alloc_policy); + +unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); +unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, + unsigned int *offset); +void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, + unsigned int tag); +void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); +int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, + struct blk_mq_tags **tags, unsigned int depth, bool can_grow); +void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, + unsigned int size); +void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); + +void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); +void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, + void *priv); +void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, + void *priv); + +static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt, + struct blk_mq_hw_ctx *hctx) +{ + if (!hctx) + return &bt->ws[0]; + return sbq_wait_ptr(bt, &hctx->wait_index); +} + +void __blk_mq_tag_busy(struct blk_mq_hw_ctx *); +void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); + +static inline void blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) + __blk_mq_tag_busy(hctx); +} + +static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) + __blk_mq_tag_idle(hctx); +} + +static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, + unsigned int tag) +{ + return tag < tags->nr_reserved_tags; +} + static inline bool blk_mq_is_shared_tags(unsigned int flags) { return flags & BLK_MQ_F_TAG_HCTX_SHARED; diff --git a/block/blk-pm.c b/block/blk-pm.c index 2dad62cc1572..6b72b2e03fc8 100644 --- a/block/blk-pm.c +++ b/block/blk-pm.c @@ -1,11 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/blk-mq.h> #include <linux/blk-pm.h> #include <linux/blkdev.h> #include <linux/pm_runtime.h> #include "blk-mq.h" -#include "blk-mq-tag.h" /** * blk_pm_runtime_init - Block layer runtime PM initialization routine diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index b02a1a3d33a8..f48ee150d667 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -74,7 +74,7 @@ static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) return rq_qos_id(q, RQ_QOS_WBT); } -static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) +static inline struct rq_qos *iolat_rq_qos(struct request_queue *q) { return rq_qos_id(q, RQ_QOS_LATENCY); } diff --git a/block/blk-stat.c b/block/blk-stat.c index 74a1a8c32d86..7ff76ae6c76a 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -6,7 +6,6 @@ */ #include <linux/kernel.h> #include <linux/rculist.h> -#include <linux/blk-mq.h> #include "blk-stat.h" #include "blk-mq.h" @@ -190,7 +189,7 @@ void blk_stat_disable_accounting(struct request_queue *q) unsigned long flags; spin_lock_irqsave(&q->stats->lock, flags); - if (!--q->stats->accounting) + if (!--q->stats->accounting && list_empty(&q->stats->callbacks)) blk_queue_flag_clear(QUEUE_FLAG_STATS, q); spin_unlock_irqrestore(&q->stats->lock, flags); } @@ -201,7 +200,7 @@ void blk_stat_enable_accounting(struct request_queue *q) unsigned long flags; spin_lock_irqsave(&q->stats->lock, flags); - if (!q->stats->accounting++) + if (!q->stats->accounting++ && list_empty(&q->stats->callbacks)) blk_queue_flag_set(QUEUE_FLAG_STATS, q); spin_unlock_irqrestore(&q->stats->lock, flags); } diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 1a743b4f2958..a64208583853 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -9,7 +9,6 @@ #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/blktrace_api.h> -#include <linux/blk-mq.h> #include <linux/debugfs.h> #include "blk.h" diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 47e9d8be68f3..9d010d867fbf 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1368,9 +1368,11 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of, int ret; u64 v; - ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); + blkg_conf_init(&ctx, buf); + + ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx); if (ret) - return ret; + goto out_finish; ret = -EINVAL; if (sscanf(ctx.body, "%llu", &v) != 1) @@ -1389,7 +1391,7 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of, tg_conf_updated(tg, false); ret = 0; out_finish: - blkg_conf_finish(&ctx); + blkg_conf_exit(&ctx); return ret ?: nbytes; } @@ -1561,9 +1563,11 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of, int ret; int index = of_cft(of)->private; - ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); + blkg_conf_init(&ctx, buf); + + ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, &ctx); if (ret) - return ret; + goto out_finish; tg = blkg_to_tg(ctx.blkg); tg_update_carryover(tg); @@ -1662,7 +1666,7 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of, tg->td->limit_valid[LIMIT_LOW]); ret = 0; out_finish: - blkg_conf_finish(&ctx); + blkg_conf_exit(&ctx); return ret ?: nbytes; } @@ -2439,11 +2443,12 @@ void blk_throtl_register(struct gendisk *disk) #ifndef CONFIG_BLK_DEV_THROTTLING_LOW /* if no low limit, use previous default */ td->throtl_slice = DFL_THROTL_SLICE_HD; -#endif +#else td->track_bio_latency = !queue_is_mq(q); if (!td->track_bio_latency) blk_stat_enable_accounting(q); +#endif } #ifdef CONFIG_BLK_DEV_THROTTLING_LOW diff --git a/block/blk.h b/block/blk.h index cc4e8873dfde..2da831103471 100644 --- a/block/blk.h +++ b/block/blk.h @@ -399,12 +399,6 @@ static inline struct bio *blk_queue_bounce(struct bio *bio, return bio; } -#ifdef CONFIG_BLK_CGROUP_IOLATENCY -int blk_iolatency_init(struct gendisk *disk); -#else -static inline int blk_iolatency_init(struct gendisk *disk) { return 0; }; -#endif - #ifdef CONFIG_BLK_DEV_ZONED void disk_free_zone_bitmaps(struct gendisk *disk); void disk_clear_zone_settings(struct gendisk *disk); diff --git a/block/elevator.h b/block/elevator.h index 774a8f6b99e6..7ca3d7b6ed82 100644 --- a/block/elevator.h +++ b/block/elevator.h @@ -4,6 +4,7 @@ #include <linux/percpu.h> #include <linux/hashtable.h> +#include "blk-mq.h" struct io_cq; struct elevator_type; @@ -37,7 +38,8 @@ struct elevator_mq_ops { void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *); void (*prepare_request)(struct request *); void (*finish_request)(struct request *); - void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); + void (*insert_requests)(struct blk_mq_hw_ctx *hctx, struct list_head *list, + blk_insert_t flags); struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); bool (*has_work)(struct blk_mq_hw_ctx *); void (*completed_request)(struct request *, u64); diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 2146969237bf..4155594aefc6 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -8,7 +8,6 @@ #include <linux/kernel.h> #include <linux/blkdev.h> -#include <linux/blk-mq.h> #include <linux/module.h> #include <linux/sbitmap.h> @@ -19,7 +18,6 @@ #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-mq-sched.h" -#include "blk-mq-tag.h" #define CREATE_TRACE_POINTS #include <trace/events/kyber.h> @@ -590,7 +588,8 @@ static void kyber_prepare_request(struct request *rq) } static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, - struct list_head *rq_list, bool at_head) + struct list_head *rq_list, + blk_insert_t flags) { struct kyber_hctx_data *khd = hctx->sched_data; struct request *rq, *next; @@ -602,7 +601,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx, spin_lock(&kcq->lock); trace_block_rq_insert(rq); - if (at_head) + if (flags & BLK_MQ_INSERT_AT_HEAD) list_move(&rq->queuelist, head); else list_move_tail(&rq->queuelist, head); diff --git a/block/mq-deadline.c b/block/mq-deadline.c index f10c2a0d18d4..5839a027e0f0 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -8,7 +8,6 @@ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/blkdev.h> -#include <linux/blk-mq.h> #include <linux/bio.h> #include <linux/module.h> #include <linux/slab.h> @@ -23,7 +22,6 @@ #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" -#include "blk-mq-tag.h" #include "blk-mq-sched.h" /* @@ -768,7 +766,7 @@ static bool dd_bio_merge(struct request_queue *q, struct bio *bio, * add rq to rbtree and fifo */ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, - bool at_head) + blk_insert_t flags) { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; @@ -801,7 +799,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, trace_block_rq_insert(rq); - if (at_head) { + if (flags & BLK_MQ_INSERT_AT_HEAD) { list_add(&rq->queuelist, &per_prio->dispatch); rq->fifo_time = jiffies; } else { @@ -822,10 +820,11 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, } /* - * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests(). + * Called from blk_mq_insert_request() or blk_mq_dispatch_plug_list(). */ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, - struct list_head *list, bool at_head) + struct list_head *list, + blk_insert_t flags) { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; @@ -836,7 +835,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); - dd_insert_request(hctx, rq, at_head); + dd_insert_request(hctx, rq, flags); } spin_unlock(&dd->lock); } |