diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-09-10 18:18:19 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-09-10 18:18:19 +0200 |
commit | 16547b21b1c6c8e8c5546a5206ebccef3e28c92a (patch) | |
tree | b59411a10771653f96f84c104bb616034d75f12f /kernel | |
parent | Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/... (diff) | |
parent | dma-mapping: mark dma_supported static (diff) | |
download | linux-16547b21b1c6c8e8c5546a5206ebccef3e28c92a.tar.xz linux-16547b21b1c6c8e8c5546a5206ebccef3e28c92a.zip |
Merge tag 'dma-mapping-6.0-2022-09-10' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig:
- revert a panic on swiotlb initialization failure (Yu Zhao)
- fix the lookup for partial syncs in dma-debug (Robin Murphy)
- fix a shift overflow in swiotlb (Chao Gao)
- fix a comment typo in swiotlb (Chao Gao)
- mark a function static now that all abusers are gone (Christoph
Hellwig)
* tag 'dma-mapping-6.0-2022-09-10' of git://git.infradead.org/users/hch/dma-mapping:
dma-mapping: mark dma_supported static
swiotlb: fix a typo
swiotlb: avoid potential left shift overflow
dma-debug: improve search for partial syncs
Revert "swiotlb: panic if nslabs is too small"
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/dma/debug.c | 6 | ||||
-rw-r--r-- | kernel/dma/mapping.c | 3 | ||||
-rw-r--r-- | kernel/dma/swiotlb.c | 13 |
3 files changed, 9 insertions, 13 deletions
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 2caafd13f8aa..18c93c2276ca 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -350,11 +350,10 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, unsigned long *flags) { - unsigned int max_range = dma_get_max_seg_size(ref->dev); struct dma_debug_entry *entry, index = *ref; - unsigned int range = 0; + int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1); - while (range <= max_range) { + for (int i = 0; i < limit; i++) { entry = __hash_bucket_find(*bucket, ref, containing_match); if (entry) @@ -364,7 +363,6 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, * Nothing found, go back a hash bucket */ put_hash_bucket(*bucket, *flags); - range += (1 << HASH_FN_SHIFT); index.dev_addr -= (1 << HASH_FN_SHIFT); *bucket = get_hash_bucket(&index, flags); } diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 49cbf3e33de7..27f272381cf2 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -707,7 +707,7 @@ int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, } EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); -int dma_supported(struct device *dev, u64 mask) +static int dma_supported(struct device *dev, u64 mask) { const struct dma_map_ops *ops = get_dma_ops(dev); @@ -721,7 +721,6 @@ int dma_supported(struct device *dev, u64 mask) return 1; return ops->dma_supported(dev, mask); } -EXPORT_SYMBOL(dma_supported); bool dma_pci_p2pdma_supported(struct device *dev) { diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index c5a9190b218f..0ef6b12f961d 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -326,9 +326,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, swiotlb_adjust_nareas(num_possible_cpus()); nslabs = default_nslabs; - if (nslabs < IO_TLB_MIN_SLABS) - panic("%s: nslabs = %lu too small\n", __func__, nslabs); - /* * By default allocate the bounce buffer memory from low memory, but * allow to pick a location everywhere for hypervisors with guest @@ -341,8 +338,7 @@ retry: else tlb = memblock_alloc_low(bytes, PAGE_SIZE); if (!tlb) { - pr_warn("%s: Failed to allocate %zu bytes tlb structure\n", - __func__, bytes); + pr_warn("%s: failed to allocate tlb structure\n", __func__); return; } @@ -579,7 +575,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size } } -#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) +static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx) +{ + return start + (idx << IO_TLB_SHIFT); +} /* * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. @@ -765,7 +764,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, /* * When dir == DMA_FROM_DEVICE we could omit the copy from the orig * to the tlb buffer, if we knew for sure the device will - * overwirte the entire current content. But we don't. Thus + * overwrite the entire current content. But we don't. Thus * unconditional bounce may prevent leaking swiotlb content (i.e. * kernel memory) to user-space. */ |