diff options
author | Christoph Hellwig <hch@lst.de> | 2021-03-31 09:30:00 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-04-06 17:28:17 +0200 |
commit | 9bb33f24abbd0fa2fadad01ec75438d7cc239189 (patch) | |
tree | 7c503535797f615db7360dc4aa9081c68bffbded /block | |
parent | block: remove BLK_BOUNCE_ISA support (diff) | |
download | linux-9bb33f24abbd0fa2fadad01ec75438d7cc239189.tar.xz linux-9bb33f24abbd0fa2fadad01ec75438d7cc239189.zip |
block: refactor the bounce buffering code
Get rid of all the PFN arithmetics and just use an enum for the two
remaining options, and use PageHighMem for the actual bounce decision.
Add a fast path to entirely avoid the call for the common case of a queue
not using the legacy bouncing code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/20210331073001.46776-8-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 6 | ||||
-rw-r--r-- | block/blk-settings.c | 42 | ||||
-rw-r--r-- | block/blk.h | 16 | ||||
-rw-r--r-- | block/bounce.c | 35 |
4 files changed, 27 insertions, 72 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index fc60ff208497..9bcdae93f6d4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1161,10 +1161,8 @@ static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q, } /* - * queue's settings related to segment counting like q->bounce_pfn - * may differ from that of other stacking queues. - * Recalculate it to check the request correctly on this queue's - * limitation. + * The queue settings related to segment counting may differ from the + * original queue. */ rq->nr_phys_segments = blk_recalc_rq_segments(rq); if (rq->nr_phys_segments > queue_max_segments(q)) { diff --git a/block/blk-settings.c b/block/blk-settings.c index f9937dd2810e..9c009090c4b5 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -7,7 +7,6 @@ #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> -#include <linux/memblock.h> /* for max_pfn/max_low_pfn */ #include <linux/gcd.h> #include <linux/lcm.h> #include <linux/jiffies.h> @@ -17,11 +16,6 @@ #include "blk.h" #include "blk-wbt.h" -unsigned long blk_max_low_pfn; -EXPORT_SYMBOL(blk_max_low_pfn); - -unsigned long blk_max_pfn; - void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) { q->rq_timeout = timeout; @@ -55,7 +49,7 @@ void blk_set_default_limits(struct queue_limits *lim) lim->discard_alignment = 0; lim->discard_misaligned = 0; lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; - lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); + lim->bounce = BLK_BOUNCE_NONE; lim->alignment_offset = 0; lim->io_opt = 0; lim->misaligned = 0; @@ -92,28 +86,16 @@ EXPORT_SYMBOL(blk_set_stacking_limits); /** * blk_queue_bounce_limit - set bounce buffer limit for queue * @q: the request queue for the device - * @max_addr: the maximum address the device can handle + * @bounce: bounce limit to enforce * * Description: - * Different hardware can have different requirements as to what pages - * it can do I/O directly to. A low level driver can call - * blk_queue_bounce_limit to have lower memory pages allocated as bounce - * buffers for doing I/O to pages residing above @max_addr. + * Force bouncing for ISA DMA ranges or highmem. + * + * DEPRECATED, don't use in new code. **/ -void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) +void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) { - unsigned long b_pfn = max_addr >> PAGE_SHIFT; - -#if BITS_PER_LONG == 64 - /* - * Assume anything <= 4GB can be handled by IOMMU. Actually - * some IOMMUs can handle everything, but I don't know of a - * way to test this here. - */ - q->limits.bounce_pfn = max(max_low_pfn, b_pfn); -#else - q->limits.bounce_pfn = b_pfn; -#endif + q->limits.bounce = bounce; } EXPORT_SYMBOL(blk_queue_bounce_limit); @@ -536,7 +518,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, b->max_write_zeroes_sectors); t->max_zone_append_sectors = min(t->max_zone_append_sectors, b->max_zone_append_sectors); - t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); + t->bounce = max(t->bounce, b->bounce); t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); @@ -916,11 +898,3 @@ void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) } } EXPORT_SYMBOL_GPL(blk_queue_set_zoned); - -static int __init blk_settings_init(void) -{ - blk_max_low_pfn = max_low_pfn - 1; - blk_max_pfn = max_pfn - 1; - return 0; -} -subsys_initcall(blk_settings_init); diff --git a/block/blk.h b/block/blk.h index 895c9f4a5182..8f4337c5a9e6 100644 --- a/block/blk.h +++ b/block/blk.h @@ -6,6 +6,7 @@ #include <linux/blk-mq.h> #include <linux/part_stat.h> #include <linux/blk-crypto.h> +#include <linux/memblock.h> /* for max_pfn/max_low_pfn */ #include <xen/xen.h> #include "blk-crypto-internal.h" #include "blk-mq.h" @@ -311,13 +312,20 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { } static inline void blk_throtl_stat_add(struct request *rq, u64 time) { } #endif -#ifdef CONFIG_BOUNCE -extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); -#else +void __blk_queue_bounce(struct request_queue *q, struct bio **bio); + +static inline bool blk_queue_may_bounce(struct request_queue *q) +{ + return IS_ENABLED(CONFIG_BOUNCE) && + q->limits.bounce == BLK_BOUNCE_HIGH && + max_low_pfn >= max_pfn; +} + static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) { + if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio))) + __blk_queue_bounce(q, bio); } -#endif /* CONFIG_BOUNCE */ #ifdef CONFIG_BLK_CGROUP_IOLATENCY extern int blk_iolatency_init(struct request_queue *q); diff --git a/block/bounce.c b/block/bounce.c index debd5b0bd318..6bafc0d1f867 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -18,7 +18,6 @@ #include <linux/init.h> #include <linux/hash.h> #include <linux/highmem.h> -#include <linux/memblock.h> #include <linux/printk.h> #include <asm/tlbflush.h> @@ -49,11 +48,11 @@ static void init_bounce_bioset(void) bounce_bs_setup = true; } -#if defined(CONFIG_HIGHMEM) static __init int init_emergency_pool(void) { int ret; -#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG) + +#ifndef CONFIG_MEMORY_HOTPLUG if (max_pfn <= max_low_pfn) return 0; #endif @@ -67,9 +66,7 @@ static __init int init_emergency_pool(void) } __initcall(init_emergency_pool); -#endif -#ifdef CONFIG_HIGHMEM /* * highmem version, map in to vec */ @@ -82,13 +79,6 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) kunmap_atomic(vto); } -#else /* CONFIG_HIGHMEM */ - -#define bounce_copy_vec(to, vfrom) \ - memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len) - -#endif /* CONFIG_HIGHMEM */ - /* * Simple bounce buffer support for highmem pages. Depending on the * queue gfp mask set, *to may or may not be a highmem page. kmap it @@ -236,8 +226,7 @@ err_put: return NULL; } - -void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) +void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) { struct bio *bio; int rw = bio_data_dir(*bio_orig); @@ -247,24 +236,10 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) bool bounce = false; int sectors = 0; - /* - * Data-less bio, nothing to bounce - */ - if (!bio_has_data(*bio_orig)) - return; - - /* - * Just check if the bounce pfn is equal to or bigger than the highest - * pfn in the system -- in that case, don't waste time iterating over - * bio segments - */ - if (q->limits.bounce_pfn >= blk_max_pfn) - return; - bio_for_each_segment(from, *bio_orig, iter) { if (i++ < BIO_MAX_VECS) sectors += from.bv_len >> 9; - if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn) + if (PageHighMem(from.bv_page)) bounce = true; } if (!bounce) @@ -287,7 +262,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) { struct page *page = to->bv_page; - if (page_to_pfn(page) <= q->limits.bounce_pfn) + if (!PageHighMem(page)) continue; to->bv_page = mempool_alloc(&page_pool, GFP_NOIO); |