summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2024-03-08 16:28:29 +0100
committerChristoph Hellwig <hch@lst.de>2024-03-13 19:39:34 +0100
commit14cebf689a78e8a1c041138af221ef6eac6bc7da (patch)
treea556053871a62a03b875d023c52449470416b361
parentiommu/dma: Force swiotlb_max_mapping_size on an untrusted device (diff)
downloadlinux-14cebf689a78e8a1c041138af221ef6eac6bc7da.tar.xz
linux-14cebf689a78e8a1c041138af221ef6eac6bc7da.zip
swiotlb: Reinstate page-alignment for mappings >= PAGE_SIZE
For swiotlb allocations >= PAGE_SIZE, the slab search historically adjusted the stride to avoid checking unaligned slots. This had the side-effect of aligning large mapping requests to PAGE_SIZE, but that was broken by 0eee5ae10256 ("swiotlb: fix slot alignment checks"). Since this alignment could be relied upon drivers, reinstate PAGE_SIZE alignment for swiotlb mappings >= PAGE_SIZE. Reported-by: Michael Kelley <mhklinux@outlook.com> Signed-off-by: Will Deacon <will@kernel.org> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Reviewed-by: Petr Tesarik <petr.tesarik1@huawei-partners.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Tested-by: Michael Kelley <mhklinux@outlook.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--kernel/dma/swiotlb.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index f212943e51ca..86fe172b5958 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -1015,6 +1015,17 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
BUG_ON(area_index >= pool->nareas);
/*
+ * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
+ * page-aligned in the absence of any other alignment requirements.
+ * 'alloc_align_mask' was later introduced to specify the alignment
+ * explicitly, however this is passed as zero for streaming mappings
+ * and so we preserve the old behaviour there in case any drivers are
+ * relying on it.
+ */
+ if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
+ alloc_align_mask = PAGE_SIZE - 1;
+
+ /*
* Ensure that the allocation is at least slot-aligned and update
* 'iotlb_align_mask' to ignore bits that will be preserved when
* offsetting into the allocation.
@@ -1028,13 +1039,6 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
*/
stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
- /*
- * For allocations of PAGE_SIZE or larger only look for page aligned
- * allocations.
- */
- if (alloc_size >= PAGE_SIZE)
- stride = umax(stride, PAGE_SHIFT - IO_TLB_SHIFT + 1);
-
spin_lock_irqsave(&area->lock, flags);
if (unlikely(nslots > pool->area_nslabs - area->used))
goto not_found;