diff options
author | Joerg Roedel <jroedel@suse.de> | 2023-01-25 11:54:58 +0100 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2023-01-25 11:54:58 +0100 |
commit | ff489fe002ab20daf0985f119682474cfda83496 (patch) | |
tree | ee367a7703f1570680524e4fc6dafc7e11a0bc09 /drivers/iommu/s390-iommu.c | |
parent | iommu: dma: Use of_iommu_get_resv_regions() (diff) | |
parent | iommu/s390: Use GFP_KERNEL in sleepable contexts (diff) | |
download | linux-ff489fe002ab20daf0985f119682474cfda83496.tar.xz linux-ff489fe002ab20daf0985f119682474cfda83496.zip |
Merge branch 'iommu-memory-accounting' into core
Merge patch-set from Jason:
"Let iommufd charge IOPTE allocations to the memory cgroup"
Description:
IOMMUFD follows the same design as KVM and uses memory cgroups to limit
the amount of kernel memory a iommufd file descriptor can pin down. The
various internal data structures already use GFP_KERNEL_ACCOUNT to charge
its own memory.
However, one of the biggest consumers of kernel memory is the IOPTEs
stored under the iommu_domain and these allocations are not tracked.
This series is the first step in fixing it.
The iommu driver contract already includes a 'gfp' argument to the
map_pages op, allowing iommufd to specify GFP_KERNEL_ACCOUNT and then
having the driver allocate the IOPTE tables with that flag will capture a
significant amount of the allocations.
Update the iommu_map() API to pass in the GFP argument, and fix all call
sites. Replace iommu_map_atomic().
Audit the "enterprise" iommu drivers to make sure they do the right thing.
Intel and S390 ignore the GFP argument and always use GFP_ATOMIC. This is
problematic for iommufd anyhow, so fix it. AMD and ARM SMMUv2/3 are
already correct.
A follow up series will be needed to capture the allocations made when the
iommu_domain itself is allocated, which will complete the job.
Link: https://lore.kernel.org/linux-iommu/0-v3-76b587fe28df+6e3-iommu_map_gfp_jgg@nvidia.com/
Diffstat (limited to 'drivers/iommu/s390-iommu.c')
-rw-r--r-- | drivers/iommu/s390-iommu.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 5591dab99446..6849644e2892 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -52,7 +52,7 @@ static struct iommu_domain *s390_domain_alloc(unsigned domain_type) if (!s390_domain) return NULL; - s390_domain->dma_table = dma_alloc_cpu_table(); + s390_domain->dma_table = dma_alloc_cpu_table(GFP_KERNEL); if (!s390_domain->dma_table) { kfree(s390_domain); return NULL; @@ -257,7 +257,8 @@ static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain, static int s390_iommu_validate_trans(struct s390_domain *s390_domain, phys_addr_t pa, dma_addr_t dma_addr, - unsigned long nr_pages, int flags) + unsigned long nr_pages, int flags, + gfp_t gfp) { phys_addr_t page_addr = pa & PAGE_MASK; unsigned long *entry; @@ -265,7 +266,8 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain, int rc; for (i = 0; i < nr_pages; i++) { - entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); + entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr, + gfp); if (unlikely(!entry)) { rc = -ENOMEM; goto undo_cpu_trans; @@ -281,7 +283,7 @@ undo_cpu_trans: while (i-- > 0) { dma_addr -= PAGE_SIZE; entry = dma_walk_cpu_trans(s390_domain->dma_table, - dma_addr); + dma_addr, gfp); if (!entry) break; dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID); @@ -298,7 +300,8 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain, int rc = 0; for (i = 0; i < nr_pages; i++) { - entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); + entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr, + GFP_ATOMIC); if (unlikely(!entry)) { rc = -EINVAL; break; @@ -336,7 +339,7 @@ static int s390_iommu_map_pages(struct iommu_domain *domain, flags |= ZPCI_TABLE_PROTECTED; rc = s390_iommu_validate_trans(s390_domain, paddr, iova, - pgcount, flags); + pgcount, flags, gfp); if (!rc) *mapped = size; |