summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 17:19:50 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 17:19:50 +0100
commit33c8846c814c1c27c6e33af005042d15061f948b (patch)
treeda7c105b61758094d1d55ec1326ff28b521dbe9e /block/blk-mq-sched.h
parentMerge tag 'locks-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/jlay... (diff)
parentblk-mq-debugfs: Show active requests per queue for shared tags (diff)
downloadlinux-33c8846c814c1c27c6e33af005042d15061f948b.tar.xz
linux-33c8846c814c1c27c6e33af005042d15061f948b.zip
Merge tag 'for-5.16/block-2021-10-29' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: - mq-deadline accounting improvements (Bart) - blk-wbt timer fix (Andrea) - Untangle the block layer includes (Christoph) - Rework the poll support to be bio based, which will enable adding support for polling for bio based drivers (Christoph) - Block layer core support for multi-actuator drives (Damien) - blk-crypto improvements (Eric) - Batched tag allocation support (me) - Request completion batching support (me) - Plugging improvements (me) - Shared tag set improvements (John) - Concurrent queue quiesce support (Ming) - Cache bdev in ->private_data for block devices (Pavel) - bdev dio improvements (Pavel) - Block device invalidation and block size improvements (Xie) - Various cleanups, fixes, and improvements (Christoph, Jackie, Masahira, Tejun, Yu, Pavel, Zheng, me) * tag 'for-5.16/block-2021-10-29' of git://git.kernel.dk/linux-block: (174 commits) blk-mq-debugfs: Show active requests per queue for shared tags block: improve readability of blk_mq_end_request_batch() virtio-blk: Use blk_validate_block_size() to validate block size loop: Use blk_validate_block_size() to validate block size nbd: Use blk_validate_block_size() to validate block size block: Add a helper to validate the block size block: re-flow blk_mq_rq_ctx_init() block: prefetch request to be initialized block: pass in blk_mq_tags to blk_mq_rq_ctx_init() block: add rq_flags to struct blk_mq_alloc_data block: add async version of bio_set_polled block: kill DIO_MULTI_BIO block: kill unused polling bits in __blkdev_direct_IO() block: avoid extra iter advance with async iocb block: Add independent access ranges support blk-mq: don't issue request directly in case that current is to be blocked sbitmap: silence data race warning blk-cgroup: synchronize blkg creation against policy deactivation block: refactor bio_iov_bvec_set() block: add single bio async direct IO helper ...
Diffstat (limited to 'block/blk-mq-sched.h')
-rw-r--r--block/blk-mq-sched.h49
1 files changed, 28 insertions, 21 deletions
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 5246ae040704..25d1034952b6 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -2,21 +2,22 @@
#ifndef BLK_MQ_SCHED_H
#define BLK_MQ_SCHED_H
+#include "elevator.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
-#define MAX_SCHED_RQ (16 * BLKDEV_MAX_RQ)
+#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
void blk_mq_sched_assign_ioc(struct request *rq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs, struct request **merged_request);
-bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
+bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
struct list_head *free);
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
-void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
+void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async);
@@ -28,45 +29,51 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
-void blk_mq_sched_free_requests(struct request_queue *q);
+void blk_mq_sched_free_rqs(struct request_queue *q);
-static inline bool
-blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
- unsigned int nr_segs)
+static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
{
- if (blk_queue_nomerges(q) || !bio_mergeable(bio))
- return false;
+ if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+ __blk_mq_sched_restart(hctx);
+}
- return __blk_mq_sched_bio_merge(q, bio, nr_segs);
+static inline bool bio_mergeable(struct bio *bio)
+{
+ return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
}
static inline bool
blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- struct elevator_queue *e = q->elevator;
-
- if (e && e->type->ops.allow_merge)
- return e->type->ops.allow_merge(q, rq, bio);
+ if (rq->rq_flags & RQF_ELV) {
+ struct elevator_queue *e = q->elevator;
+ if (e->type->ops.allow_merge)
+ return e->type->ops.allow_merge(q, rq, bio);
+ }
return true;
}
static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
{
- struct elevator_queue *e = rq->q->elevator;
+ if (rq->rq_flags & RQF_ELV) {
+ struct elevator_queue *e = rq->q->elevator;
- if (e && e->type->ops.completed_request)
- e->type->ops.completed_request(rq, now);
+ if (e->type->ops.completed_request)
+ e->type->ops.completed_request(rq, now);
+ }
}
static inline void blk_mq_sched_requeue_request(struct request *rq)
{
- struct request_queue *q = rq->q;
- struct elevator_queue *e = q->elevator;
+ if (rq->rq_flags & RQF_ELV) {
+ struct request_queue *q = rq->q;
+ struct elevator_queue *e = q->elevator;
- if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
- e->type->ops.requeue_request(rq);
+ if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request)
+ e->type->ops.requeue_request(rq);
+ }
}
static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)