diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-12 19:26:52 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-01-12 19:26:52 +0100 |
commit | d3c810803576d867265277df8e94eee386351c9d (patch) | |
tree | 2f40646e0bbcbe64e86d16a7800f1b19e8592d6b /block/blk.h | |
parent | Merge tag 'for-5.17/io_uring-2022-01-11' of git://git.kernel.dk/linux-block (diff) | |
parent | MAINTAINERS: add entries for block layer documentation (diff) | |
download | linux-d3c810803576d867265277df8e94eee386351c9d.tar.xz linux-d3c810803576d867265277df8e94eee386351c9d.zip |
Merge tag 'for-5.17/block-2022-01-11' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe:
- Unify where the struct request handling code is located in the blk-mq
code (Christoph)
- Header cleanups (Christoph)
- Clean up the io_context handling code (Christoph, me)
- Get rid of ->rq_disk in struct request (Christoph)
- Error handling fix for add_disk() (Christoph)
- request allocation cleanusp (Christoph)
- Documentation updates (Eric, Matthew)
- Remove trivial crypto unregister helper (Eric)
- Reduce shared tag overhead (John)
- Reduce poll_stats memory overhead (me)
- Known indirect function call for dio (me)
- Use atomic references for struct request (me)
- Support request list issue for block and NVMe (me)
- Improve queue dispatch pinning (Ming)
- Improve the direct list issue code (Keith)
- BFQ improvements (Jan)
- Direct completion helper and use it in mmc block (Sebastian)
- Use raw spinlock for the blktrace code (Wander)
- fsync error handling fix (Ye)
- Various fixes and cleanups (Lukas, Randy, Yang, Tetsuo, Ming, me)
* tag 'for-5.17/block-2022-01-11' of git://git.kernel.dk/linux-block: (132 commits)
MAINTAINERS: add entries for block layer documentation
docs: block: remove queue-sysfs.rst
docs: sysfs-block: document virt_boundary_mask
docs: sysfs-block: document stable_writes
docs: sysfs-block: fill in missing documentation from queue-sysfs.rst
docs: sysfs-block: add contact for nomerges
docs: sysfs-block: sort alphabetically
docs: sysfs-block: move to stable directory
block: don't protect submit_bio_checks by q_usage_counter
block: fix old-style declaration
nvme-pci: fix queue_rqs list splitting
block: introduce rq_list_move
block: introduce rq_list_for_each_safe macro
block: move rq_list macros to blk-mq.h
block: drop needless assignment in set_task_ioprio()
block: remove unnecessary trailing '\'
bio.h: fix kernel-doc warnings
block: check minor range in device_add_disk()
block: use "unsigned long" for blk_validate_block_size().
block: fix error unwinding in device_add_disk
...
Diffstat (limited to 'block/blk.h')
-rw-r--r-- | block/blk.h | 115 |
1 files changed, 64 insertions, 51 deletions
diff --git a/block/blk.h b/block/blk.h index ccde6e6f1736..8bd43b3ad33d 100644 --- a/block/blk.h +++ b/block/blk.h @@ -2,15 +2,10 @@ #ifndef BLK_INTERNAL_H #define BLK_INTERNAL_H -#include <linux/idr.h> -#include <linux/blk-mq.h> -#include <linux/part_stat.h> #include <linux/blk-crypto.h> #include <linux/memblock.h> /* for max_pfn/max_low_pfn */ #include <xen/xen.h> #include "blk-crypto-internal.h" -#include "blk-mq.h" -#include "blk-mq-sched.h" struct elevator_type; @@ -32,15 +27,10 @@ struct blk_flush_queue { }; extern struct kmem_cache *blk_requestq_cachep; +extern struct kmem_cache *blk_requestq_srcu_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida; -static inline struct blk_flush_queue * -blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) -{ - return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; -} - static inline void __blk_get_queue(struct request_queue *q) { kobject_get(&q->kobj); @@ -250,16 +240,13 @@ static inline void blk_integrity_del(struct gendisk *disk) unsigned long blk_rq_timeout(unsigned long timeout); void blk_add_timer(struct request *req); -void blk_print_req_error(struct request *req, blk_status_t status); +const char *blk_status_to_str(blk_status_t status); bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, - unsigned int nr_segs, bool *same_queue_rq); + unsigned int nr_segs); bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); -void __blk_account_io_start(struct request *req); -void __blk_account_io_done(struct request *req, u64 now); - /* * Plug flush limits */ @@ -275,19 +262,10 @@ void blk_insert_flush(struct request *rq); int elevator_switch_mq(struct request_queue *q, struct elevator_type *new_e); -void __elevator_exit(struct request_queue *, struct elevator_queue *); +void elevator_exit(struct request_queue *q); int elv_register_queue(struct request_queue *q, bool uevent); void elv_unregister_queue(struct request_queue *q); -static inline void elevator_exit(struct request_queue *q, - struct elevator_queue *e) -{ - lockdep_assert_held(&q->sysfs_lock); - - blk_mq_sched_free_rqs(q); - __elevator_exit(q, e); -} - ssize_t part_size_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, @@ -347,26 +325,10 @@ int blk_dev_init(void); */ static inline bool blk_do_io_stat(struct request *rq) { - return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk; -} - -static inline void blk_account_io_done(struct request *req, u64 now) -{ - /* - * Account IO completion. flush_rq isn't accounted as a - * normal IO on queueing nor completion. Accounting the - * containing request is enough. - */ - if (blk_do_io_stat(req) && req->part && - !(req->rq_flags & RQF_FLUSH_SEQ)) - __blk_account_io_done(req, now); + return (rq->rq_flags & RQF_IO_STAT) && rq->q->disk; } -static inline void blk_account_io_start(struct request *req) -{ - if (blk_do_io_stat(req)) - __blk_account_io_start(req); -} +void update_io_ticks(struct block_device *part, unsigned long now, bool end); static inline void req_set_nomerge(struct request_queue *q, struct request *req) { @@ -402,13 +364,15 @@ static inline unsigned int bio_aligned_discard_max_sectors( /* * Internal io_context interface */ -void get_io_context(struct io_context *ioc); -struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); -struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, - gfp_t gfp_mask); +struct io_cq *ioc_find_get_icq(struct request_queue *q); +struct io_cq *ioc_lookup_icq(struct request_queue *q); +#ifdef CONFIG_BLK_ICQ void ioc_clear_queue(struct request_queue *q); - -int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); +#else +static inline void ioc_clear_queue(struct request_queue *q) +{ +} +#endif /* CONFIG_BLK_ICQ */ #ifdef CONFIG_BLK_DEV_THROTTLING_LOW extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); @@ -467,7 +431,15 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset, unsigned int max_sectors, bool *same_page); -struct request_queue *blk_alloc_queue(int node_id); +static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu) +{ + if (srcu) + return blk_requestq_srcu_cachep; + return blk_requestq_cachep; +} +struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu); + +int disk_scan_partitions(struct gendisk *disk, fmode_t mode); int disk_alloc_events(struct gendisk *disk); void disk_add_events(struct gendisk *disk); @@ -493,4 +465,45 @@ int disk_register_independent_access_ranges(struct gendisk *disk, struct blk_independent_access_ranges *new_iars); void disk_unregister_independent_access_ranges(struct gendisk *disk); +#ifdef CONFIG_FAIL_MAKE_REQUEST +bool should_fail_request(struct block_device *part, unsigned int bytes); +#else /* CONFIG_FAIL_MAKE_REQUEST */ +static inline bool should_fail_request(struct block_device *part, + unsigned int bytes) +{ + return false; +} +#endif /* CONFIG_FAIL_MAKE_REQUEST */ + +/* + * Optimized request reference counting. Ideally we'd make timeouts be more + * clever, as that's the only reason we need references at all... But until + * this happens, this is faster than using refcount_t. Also see: + * + * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count") + */ +#define req_ref_zero_or_close_to_overflow(req) \ + ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u) + +static inline bool req_ref_inc_not_zero(struct request *req) +{ + return atomic_inc_not_zero(&req->ref); +} + +static inline bool req_ref_put_and_test(struct request *req) +{ + WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); + return atomic_dec_and_test(&req->ref); +} + +static inline void req_ref_set(struct request *req, int value) +{ + atomic_set(&req->ref, value); +} + +static inline int req_ref_read(struct request *req) +{ + return atomic_read(&req->ref); +} + #endif /* BLK_INTERNAL_H */ |