diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2024-08-30 02:06:15 +0200 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2024-09-04 11:38:32 +0200 |
commit | 670b57796c5dc1ca58912132cad914cf4b3c0cdd (patch) | |
tree | 3979146b2680a6def9a1d7bda18d461d72f62646 /drivers/iommu | |
parent | iommu/amd: Remove the amd_iommu_domain_set_pt_root() and related (diff) | |
download | linux-670b57796c5dc1ca58912132cad914cf4b3c0cdd.tar.xz linux-670b57796c5dc1ca58912132cad914cf4b3c0cdd.zip |
iommu/amd: Rename struct amd_io_pgtable iopt to pgtbl
There is struct protection_domain iopt and struct amd_io_pgtable iopt.
Next patches are going to want to write domain.iopt.iopt.xx which is quite
unnatural to read.
Give one of them a different name, amd_io_pgtable has fewer references so
call it pgtbl, to match pgtbl_cfg, instead.
Suggested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/6-v2-831cdc4d00f3+1a315-amd_iopgtbl_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd/amd_iommu_types.h | 4 | ||||
-rw-r--r-- | drivers/iommu/amd/io_pgtable.c | 12 | ||||
-rw-r--r-- | drivers/iommu/amd/io_pgtable_v2.c | 14 | ||||
-rw-r--r-- | drivers/iommu/amd/iommu.c | 14 |
4 files changed, 22 insertions, 22 deletions
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index c7432296bb90..ce7825b4d631 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -519,7 +519,7 @@ struct amd_irte_ops; #define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0) #define io_pgtable_to_data(x) \ - container_of((x), struct amd_io_pgtable, iop) + container_of((x), struct amd_io_pgtable, pgtbl) #define io_pgtable_ops_to_data(x) \ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) @@ -540,7 +540,7 @@ struct gcr3_tbl_info { struct amd_io_pgtable { struct io_pgtable_cfg pgtbl_cfg; - struct io_pgtable iop; + struct io_pgtable pgtbl; int mode; u64 *root; u64 *pgd; /* v2 pgtable pgd pointer */ diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index e0abcf38c314..53de1146928e 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -541,7 +541,7 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops, */ static void v1_free_pgtable(struct io_pgtable *iop) { - struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop); + struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl); LIST_HEAD(freelist); if (pgtable->mode == PAGE_MODE_NONE) @@ -569,12 +569,12 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE; cfg->tlb = &v1_flush_ops; - pgtable->iop.ops.map_pages = iommu_v1_map_pages; - pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages; - pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; - pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty; + pgtable->pgtbl.ops.map_pages = iommu_v1_map_pages; + pgtable->pgtbl.ops.unmap_pages = iommu_v1_unmap_pages; + pgtable->pgtbl.ops.iova_to_phys = iommu_v1_iova_to_phys; + pgtable->pgtbl.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty; - return &pgtable->iop; + return &pgtable->pgtbl; } struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = { diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index 6088822180e1..45a6bc332639 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -234,7 +234,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, int prot, gfp_t gfp, size_t *mapped) { struct protection_domain *pdom = io_pgtable_ops_to_domain(ops); - struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg; + struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg; u64 *pte; unsigned long map_size; unsigned long mapped_size = 0; @@ -281,7 +281,7 @@ static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops, struct iommu_iotlb_gather *gather) { struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); - struct io_pgtable_cfg *cfg = &pgtable->iop.cfg; + struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg; unsigned long unmap_size; unsigned long unmapped = 0; size_t size = pgcount << __ffs(pgsize); @@ -346,7 +346,7 @@ static const struct iommu_flush_ops v2_flush_ops = { static void v2_free_pgtable(struct io_pgtable *iop) { - struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop); + struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl); if (!pgtable || !pgtable->pgd) return; @@ -369,16 +369,16 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo if (get_pgtable_level() == PAGE_MODE_5_LEVEL) ias = 57; - pgtable->iop.ops.map_pages = iommu_v2_map_pages; - pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages; - pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys; + pgtable->pgtbl.ops.map_pages = iommu_v2_map_pages; + pgtable->pgtbl.ops.unmap_pages = iommu_v2_unmap_pages; + pgtable->pgtbl.ops.iova_to_phys = iommu_v2_iova_to_phys; cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2, cfg->ias = ias, cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE, cfg->tlb = &v2_flush_ops; - return &pgtable->iop; + return &pgtable->pgtbl; } struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = { diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 05ae44f13ec8..14030adba3d0 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2258,7 +2258,7 @@ void protection_domain_free(struct protection_domain *domain) WARN_ON(!list_empty(&domain->dev_list)); if (domain->iop.pgtbl_cfg.tlb) - free_io_pgtable_ops(&domain->iop.iop.ops); + free_io_pgtable_ops(&domain->iop.pgtbl.ops); if (domain->id) domain_id_free(domain->id); @@ -2366,7 +2366,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, domain->domain.geometry.aperture_start = 0; domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true; - domain->domain.pgsize_bitmap = domain->iop.iop.cfg.pgsize_bitmap; + domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap; if (iommu) { domain->domain.type = type; @@ -2510,7 +2510,7 @@ static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom, unsigned long iova, size_t size) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; if (ops->map_pages) domain_flush_np_cache(domain, iova, size); @@ -2522,7 +2522,7 @@ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova, int iommu_prot, gfp_t gfp, size_t *mapped) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; int prot = 0; int ret = -EINVAL; @@ -2569,7 +2569,7 @@ static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova struct iommu_iotlb_gather *gather) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; size_t r; if ((domain->pd_mode == PD_MODE_V1) && @@ -2588,7 +2588,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; return ops->iova_to_phys(ops, iova); } @@ -2666,7 +2666,7 @@ static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain, struct iommu_dirty_bitmap *dirty) { struct protection_domain *pdomain = to_pdomain(domain); - struct io_pgtable_ops *ops = &pdomain->iop.iop.ops; + struct io_pgtable_ops *ops = &pdomain->iop.pgtbl.ops; unsigned long lflags; if (!ops || !ops->read_and_clear_dirty) |