diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-30 05:32:10 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-30 05:32:10 +0200 |
commit | 6c1b980a7e79e55e951b4b2c47eefebc75071209 (patch) | |
tree | 75fadaa5bc0b8bb1b488bd107926cdb6373c3946 /mm | |
parent | Merge tag 'for-6.6/block-2023-08-28' of git://git.kernel.dk/linux (diff) | |
parent | swiotlb: optimize get_max_slots() (diff) | |
download | linux-6c1b980a7e79e55e951b4b2c47eefebc75071209.tar.xz linux-6c1b980a7e79e55e951b4b2c47eefebc75071209.zip |
Merge tag 'dma-mapping-6.6-2023-08-29' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-maping updates from Christoph Hellwig:
- allow dynamic sizing of the swiotlb buffer, to cater for secure
virtualization workloads that require all I/O to be bounce buffered
(Petr Tesarik)
- move a declaration to a header (Arnd Bergmann)
- check for memory region overlap in dma-contiguous (Binglei Wang)
- remove the somewhat dangerous runtime swiotlb-xen enablement and
unexport is_swiotlb_active (Christoph Hellwig, Juergen Gross)
- per-node CMA improvements (Yajun Deng)
* tag 'dma-mapping-6.6-2023-08-29' of git://git.infradead.org/users/hch/dma-mapping:
swiotlb: optimize get_max_slots()
swiotlb: move slot allocation explanation comment where it belongs
swiotlb: search the software IO TLB only if the device makes use of it
swiotlb: allocate a new memory pool when existing pools are full
swiotlb: determine potential physical address limit
swiotlb: if swiotlb is full, fall back to a transient memory pool
swiotlb: add a flag whether SWIOTLB is allowed to grow
swiotlb: separate memory pool data from other allocator data
swiotlb: add documentation and rename swiotlb_do_find_slots()
swiotlb: make io_tlb_default_mem local to swiotlb.c
swiotlb: bail out of swiotlb_init_late() if swiotlb is already allocated
dma-contiguous: check for memory region overlap
dma-contiguous: support numa CMA for specified node
dma-contiguous: support per-numa CMA for all architectures
dma-mapping: move arch_dma_set_mask() declaration to header
swiotlb: unexport is_swiotlb_active
x86: always initialize xen-swiotlb when xen-pcifront is enabling
xen/pci: add flag for PCI passthrough being possible
Diffstat (limited to 'mm')
-rw-r--r-- | mm/cma.c | 10 | ||||
-rw-r--r-- | mm/slab_common.c | 5 |
2 files changed, 9 insertions, 6 deletions
@@ -267,6 +267,9 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, if (alignment && !is_power_of_2(alignment)) return -EINVAL; + if (!IS_ENABLED(CONFIG_NUMA)) + nid = NUMA_NO_NODE; + /* Sanitise input arguments. */ alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); if (fixed && base & (alignment - 1)) { @@ -372,14 +375,15 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, if (ret) goto free_mem; - pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, - &base); + pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M, + &base, nid); return 0; free_mem: memblock_phys_free(base, size); err: - pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); + pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M, + nid); return ret; } diff --git a/mm/slab_common.c b/mm/slab_common.c index 01cdbf122463..cd71f9581e67 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -895,10 +895,9 @@ void __init setup_kmalloc_cache_index_table(void) static unsigned int __kmalloc_minalign(void) { -#ifdef CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC - if (io_tlb_default_mem.nslabs) + if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && + is_swiotlb_allocated()) return ARCH_KMALLOC_MINALIGN; -#endif return dma_get_cache_alignment(); } |