diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-09 19:24:22 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-09 19:24:22 +0200 |
commit | f71f64210d698f603ab119740d1656f7bdfd3171 (patch) | |
tree | 31940a31dff428f0d49fef1bc1da1fd20c7b9d9f /kernel | |
parent | Merge tag 'irq_urgent_for_v6.5_rc1' of git://git.kernel.org/pub/scm/linux/ker... (diff) | |
parent | swiotlb: reduce the number of areas to match actual memory pool size (diff) | |
download | linux-f71f64210d698f603ab119740d1656f7bdfd3171.tar.xz linux-f71f64210d698f603ab119740d1656f7bdfd3171.zip |
Merge tag 'dma-mapping-6.5-2023-07-09' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig:
- swiotlb area sizing fixes (Petr Tesarik)
* tag 'dma-mapping-6.5-2023-07-09' of git://git.infradead.org/users/hch/dma-mapping:
swiotlb: reduce the number of areas to match actual memory pool size
swiotlb: always set the number of areas before allocating the pool
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/dma/swiotlb.c | 46 |
1 files changed, 35 insertions, 11 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 775f7bb10ab1..2b83e3ad9dca 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -115,9 +115,16 @@ static bool round_up_default_nslabs(void) return true; } +/** + * swiotlb_adjust_nareas() - adjust the number of areas and slots + * @nareas: Desired number of areas. Zero is treated as 1. + * + * Adjust the default number of areas in a memory pool. + * The default size of the memory pool may also change to meet minimum area + * size requirements. + */ static void swiotlb_adjust_nareas(unsigned int nareas) { - /* use a single area when non is specified */ if (!nareas) nareas = 1; else if (!is_power_of_2(nareas)) @@ -131,6 +138,23 @@ static void swiotlb_adjust_nareas(unsigned int nareas) (default_nslabs << IO_TLB_SHIFT) >> 20); } +/** + * limit_nareas() - get the maximum number of areas for a given memory pool size + * @nareas: Desired number of areas. + * @nslots: Total number of slots in the memory pool. + * + * Limit the number of areas to the maximum possible number of areas in + * a memory pool of the given size. + * + * Return: Maximum possible number of areas. + */ +static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots) +{ + if (nslots < nareas * IO_TLB_SEGSIZE) + return nslots / IO_TLB_SEGSIZE; + return nareas; +} + static int __init setup_io_tlb_npages(char *str) { @@ -290,6 +314,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, { struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long nslabs; + unsigned int nareas; size_t alloc_size; void *tlb; @@ -298,18 +323,16 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, if (swiotlb_force_disable) return; - /* - * default_nslabs maybe changed when adjust area number. - * So allocate bounce buffer after adjusting area number. - */ if (!default_nareas) swiotlb_adjust_nareas(num_possible_cpus()); nslabs = default_nslabs; + nareas = limit_nareas(default_nareas, nslabs); while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) { if (nslabs <= IO_TLB_MIN_SLABS) return; nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); + nareas = limit_nareas(nareas, nslabs); } if (default_nslabs != nslabs) { @@ -355,6 +378,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask, { struct io_tlb_mem *mem = &io_tlb_default_mem; unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); + unsigned int nareas; unsigned char *vstart = NULL; unsigned int order, area_order; bool retried = false; @@ -363,6 +387,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask, if (swiotlb_force_disable) return 0; + if (!default_nareas) + swiotlb_adjust_nareas(num_possible_cpus()); + retry: order = get_order(nslabs << IO_TLB_SHIFT); nslabs = SLABS_PER_PAGE << order; @@ -397,11 +424,8 @@ retry: (PAGE_SIZE << order) >> 20); } - if (!default_nareas) - swiotlb_adjust_nareas(num_possible_cpus()); - - area_order = get_order(array_size(sizeof(*mem->areas), - default_nareas)); + nareas = limit_nareas(default_nareas, nslabs); + area_order = get_order(array_size(sizeof(*mem->areas), nareas)); mem->areas = (struct io_tlb_area *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order); if (!mem->areas) @@ -415,7 +439,7 @@ retry: set_memory_decrypted((unsigned long)vstart, (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT); swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true, - default_nareas); + nareas); swiotlb_print_info(); return 0; |