diff options
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd_iommu_v2.c | 2 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu-v3.c | 48 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 112 | ||||
-rw-r--r-- | drivers/iommu/dma-iommu.c | 283 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 35 | ||||
-rw-r--r-- | drivers/iommu/exynos-iommu.c | 32 | ||||
-rw-r--r-- | drivers/iommu/fsl_pamu.h | 1 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 36 | ||||
-rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 15 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 50 | ||||
-rw-r--r-- | drivers/iommu/iova.c | 89 | ||||
-rw-r--r-- | drivers/iommu/mtk_iommu_v1.c | 25 | ||||
-rw-r--r-- | drivers/iommu/of_iommu.c | 126 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 190 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.h | 34 | ||||
-rw-r--r-- | drivers/iommu/rockchip-iommu.c | 31 | ||||
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 1 |
17 files changed, 702 insertions, 408 deletions
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 063343909b0d..6629c472eafd 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -696,9 +696,9 @@ out_clear_state: out_unregister: mmu_notifier_unregister(&pasid_state->mn, mm); + mmput(mm); out_free: - mmput(mm); free_pasid_state(pasid_state); out: diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 6ef9c3ed4344..380969aa60d5 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1912,6 +1912,8 @@ static void arm_smmu_get_resv_regions(struct device *dev, return; list_add_tail(®ion->list, head); + + iommu_dma_get_resv_regions(dev, head); } static void arm_smmu_put_resv_regions(struct device *dev, @@ -2780,51 +2782,9 @@ static struct platform_driver arm_smmu_driver = { .probe = arm_smmu_device_probe, .remove = arm_smmu_device_remove, }; +module_platform_driver(arm_smmu_driver); -static int __init arm_smmu_init(void) -{ - static bool registered; - int ret = 0; - - if (!registered) { - ret = platform_driver_register(&arm_smmu_driver); - registered = !ret; - } - return ret; -} - -static void __exit arm_smmu_exit(void) -{ - return platform_driver_unregister(&arm_smmu_driver); -} - -subsys_initcall(arm_smmu_init); -module_exit(arm_smmu_exit); - -static int __init arm_smmu_of_init(struct device_node *np) -{ - int ret = arm_smmu_init(); - - if (ret) - return ret; - - if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) - return -ENODEV; - - return 0; -} -IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init); - -#ifdef CONFIG_ACPI -static int __init acpi_smmu_v3_init(struct acpi_table_header *table) -{ - if (iort_node_match(ACPI_IORT_NODE_SMMU_V3)) - return arm_smmu_init(); - - return 0; -} -IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init); -#endif +IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index a36e80abf084..7ec30b08b3bd 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1692,6 +1692,8 @@ static void arm_smmu_get_resv_regions(struct device *dev, return; list_add_tail(®ion->list, head); + + iommu_dma_get_resv_regions(dev, head); } static void arm_smmu_put_resv_regions(struct device *dev, @@ -2155,6 +2157,23 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, return 0; } +static void arm_smmu_bus_init(void) +{ + /* Oh, for a proper bus abstraction */ + if (!iommu_present(&platform_bus_type)) + bus_set_iommu(&platform_bus_type, &arm_smmu_ops); +#ifdef CONFIG_ARM_AMBA + if (!iommu_present(&amba_bustype)) + bus_set_iommu(&amba_bustype, &arm_smmu_ops); +#endif +#ifdef CONFIG_PCI + if (!iommu_present(&pci_bus_type)) { + pci_request_acs(); + bus_set_iommu(&pci_bus_type, &arm_smmu_ops); + } +#endif +} + static int arm_smmu_device_probe(struct platform_device *pdev) { struct resource *res; @@ -2260,22 +2279,31 @@ static int arm_smmu_device_probe(struct platform_device *pdev) arm_smmu_device_reset(smmu); arm_smmu_test_smr_masks(smmu); - /* Oh, for a proper bus abstraction */ - if (!iommu_present(&platform_bus_type)) - bus_set_iommu(&platform_bus_type, &arm_smmu_ops); -#ifdef CONFIG_ARM_AMBA - if (!iommu_present(&amba_bustype)) - bus_set_iommu(&amba_bustype, &arm_smmu_ops); -#endif -#ifdef CONFIG_PCI - if (!iommu_present(&pci_bus_type)) { - pci_request_acs(); - bus_set_iommu(&pci_bus_type, &arm_smmu_ops); - } -#endif + /* + * For ACPI and generic DT bindings, an SMMU will be probed before + * any device which might need it, so we want the bus ops in place + * ready to handle default domain setup as soon as any SMMU exists. + */ + if (!using_legacy_binding) + arm_smmu_bus_init(); + return 0; } +/* + * With the legacy DT binding in play, though, we have no guarantees about + * probe order, but then we're also not doing default domains, so we can + * delay setting bus ops until we're sure every possible SMMU is ready, + * and that way ensure that no add_device() calls get missed. + */ +static int arm_smmu_legacy_bus_init(void) +{ + if (using_legacy_binding) + arm_smmu_bus_init(); + return 0; +} +device_initcall_sync(arm_smmu_legacy_bus_init); + static int arm_smmu_device_remove(struct platform_device *pdev) { struct arm_smmu_device *smmu = platform_get_drvdata(pdev); @@ -2299,56 +2327,14 @@ static struct platform_driver arm_smmu_driver = { .probe = arm_smmu_device_probe, .remove = arm_smmu_device_remove, }; - -static int __init arm_smmu_init(void) -{ - static bool registered; - int ret = 0; - - if (!registered) { - ret = platform_driver_register(&arm_smmu_driver); - registered = !ret; - } - return ret; -} - -static void __exit arm_smmu_exit(void) -{ - return platform_driver_unregister(&arm_smmu_driver); -} - -subsys_initcall(arm_smmu_init); -module_exit(arm_smmu_exit); - -static int __init arm_smmu_of_init(struct device_node *np) -{ - int ret = arm_smmu_init(); - - if (ret) - return ret; - - if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) - return -ENODEV; - - return 0; -} -IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init); -IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init); - -#ifdef CONFIG_ACPI -static int __init arm_smmu_acpi_init(struct acpi_table_header *table) -{ - if (iort_node_match(ACPI_IORT_NODE_SMMU)) - return arm_smmu_init(); - - return 0; -} -IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init); -#endif +module_platform_driver(arm_smmu_driver); + +IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL); +IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL); +IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL); +IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL); +IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL); +IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 48d36ce59efb..8348f366ddd1 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -61,15 +61,6 @@ static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) return PAGE_SIZE; } -static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) -{ - struct iommu_dma_cookie *cookie = domain->iova_cookie; - - if (cookie->type == IOMMU_DMA_IOVA_COOKIE) - return &cookie->iovad; - return NULL; -} - static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) { struct iommu_dma_cookie *cookie; @@ -167,22 +158,99 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) } EXPORT_SYMBOL(iommu_put_dma_cookie); -static void iova_reserve_pci_windows(struct pci_dev *dev, - struct iova_domain *iovad) +/** + * iommu_dma_get_resv_regions - Reserved region driver helper + * @dev: Device from iommu_get_resv_regions() + * @list: Reserved region list from iommu_get_resv_regions() + * + * IOMMU drivers can use this to implement their .get_resv_regions callback + * for general non-IOMMU-specific reservations. Currently, this covers host + * bridge windows for PCI devices. + */ +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) { - struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); + struct pci_host_bridge *bridge; struct resource_entry *window; - unsigned long lo, hi; + if (!dev_is_pci(dev)) + return; + + bridge = pci_find_host_bridge(to_pci_dev(dev)->bus); resource_list_for_each_entry(window, &bridge->windows) { - if (resource_type(window->res) != IORESOURCE_MEM && - resource_type(window->res) != IORESOURCE_IO) + struct iommu_resv_region *region; + phys_addr_t start; + size_t length; + + if (resource_type(window->res) != IORESOURCE_MEM) + continue; + + start = window->res->start - window->offset; + length = window->res->end - window->res->start + 1; + region = iommu_alloc_resv_region(start, length, 0, + IOMMU_RESV_RESERVED); + if (!region) + return; + + list_add_tail(®ion->list, list); + } +} +EXPORT_SYMBOL(iommu_dma_get_resv_regions); + +static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, + phys_addr_t start, phys_addr_t end) +{ + struct iova_domain *iovad = &cookie->iovad; + struct iommu_dma_msi_page *msi_page; + int i, num_pages; + + start -= iova_offset(iovad, start); + num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); + + msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); + if (!msi_page) + return -ENOMEM; + + for (i = 0; i < num_pages; i++) { + msi_page[i].phys = start; + msi_page[i].iova = start; + INIT_LIST_HEAD(&msi_page[i].list); + list_add(&msi_page[i].list, &cookie->msi_page_list); + start += iovad->granule; + } + + return 0; +} + +static int iova_reserve_iommu_regions(struct device *dev, + struct iommu_domain *domain) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + struct iommu_resv_region *region; + LIST_HEAD(resv_regions); + int ret = 0; + + iommu_get_resv_regions(dev, &resv_regions); + list_for_each_entry(region, &resv_regions, list) { + unsigned long lo, hi; + + /* We ARE the software that manages these! */ + if (region->type == IOMMU_RESV_SW_MSI) continue; - lo = iova_pfn(iovad, window->res->start - window->offset); - hi = iova_pfn(iovad, window->res->end - window->offset); + lo = iova_pfn(iovad, region->start); + hi = iova_pfn(iovad, region->start + region->length - 1); reserve_iova(iovad, lo, hi); + + if (region->type == IOMMU_RESV_MSI) + ret = cookie_init_hw_msi_region(cookie, region->start, + region->start + region->length); + if (ret) + break; } + iommu_put_resv_regions(dev, &resv_regions); + + return ret; } /** @@ -203,7 +271,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_domain *iovad = &cookie->iovad; unsigned long order, base_pfn, end_pfn; - bool pci = dev && dev_is_pci(dev); if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; @@ -233,7 +300,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, * leave the cache limit at the top of their range to save an rb_last() * traversal on every allocation. */ - if (pci) + if (dev && dev_is_pci(dev)) end_pfn &= DMA_BIT_MASK(32) >> order; /* start_pfn is always nonzero for an already-initialised domain */ @@ -248,12 +315,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, * area cache limit down for the benefit of the smaller one. */ iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); - } else { - init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); - if (pci) - iova_reserve_pci_windows(to_pci_dev(dev), iovad); + + return 0; } - return 0; + + init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); + if (!dev) + return 0; + + return iova_reserve_iommu_regions(dev, domain); } EXPORT_SYMBOL(iommu_dma_init_domain); @@ -286,48 +356,67 @@ int dma_info_to_prot(enum dma_data_direction dir, bool coherent, } } -static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, - dma_addr_t dma_limit, struct device *dev) +static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, + size_t size, dma_addr_t dma_limit, struct device *dev) { - struct iova_domain *iovad = cookie_iovad(domain); - unsigned long shift = iova_shift(iovad); - unsigned long length = iova_align(iovad, size) >> shift; - struct iova *iova = NULL; + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + unsigned long shift, iova_len, iova = 0; + + if (cookie->type == IOMMU_DMA_MSI_COOKIE) { + cookie->msi_iova += size; + return cookie->msi_iova - size; + } + + shift = iova_shift(iovad); + iova_len = size >> shift; + /* + * Freeing non-power-of-two-sized allocations back into the IOVA caches + * will come back to bite us badly, so we have to waste a bit of space + * rounding up anything cacheable to make sure that can't happen. The + * order of the unadjusted size will still match upon freeing. + */ + if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) + iova_len = roundup_pow_of_two(iova_len); if (domain->geometry.force_aperture) dma_limit = min(dma_limit, domain->geometry.aperture_end); /* Try to get PCI devices a SAC address */ if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) - iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift, - true); - /* - * Enforce size-alignment to be safe - there could perhaps be an - * attribute to control this per-device, or at least per-domain... - */ + iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift); + if (!iova) - iova = alloc_iova(iovad, length, dma_limit >> shift, true); + iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift); - return iova; + return (dma_addr_t)iova << shift; } -/* The IOVA allocator knows what we mapped, so just unmap whatever that was */ -static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) +static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, + dma_addr_t iova, size_t size) { - struct iova_domain *iovad = cookie_iovad(domain); + struct iova_domain *iovad = &cookie->iovad; unsigned long shift = iova_shift(iovad); - unsigned long pfn = dma_addr >> shift; - struct iova *iova = find_iova(iovad, pfn); - size_t size; - if (WARN_ON(!iova)) - return; + /* The MSI case is only ever cleaning up its most recent allocation */ + if (cookie->type == IOMMU_DMA_MSI_COOKIE) + cookie->msi_iova -= size; + else + free_iova_fast(iovad, iova >> shift, size >> shift); +} + +static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, + size_t size) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + size_t iova_off = iova_offset(iovad, dma_addr); + + dma_addr -= iova_off; + size = iova_align(iovad, size + iova_off); - size = iova_size(iova) << shift; - size -= iommu_unmap(domain, pfn << shift, size); - /* ...and if we can't, then something is horribly, horribly wrong */ - WARN_ON(size > 0); - __free_iova(iovad, iova); + WARN_ON(iommu_unmap(domain, dma_addr, size) != size); + iommu_dma_free_iova(cookie, dma_addr, size); } static void __iommu_dma_free_pages(struct page **pages, int count) @@ -409,7 +498,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, void iommu_dma_free(struct device *dev, struct page **pages, size_t size, dma_addr_t *handle) { - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle); + __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); *handle = DMA_ERROR_CODE; } @@ -437,11 +526,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, void (*flush_page)(struct device *, const void *, phys_addr_t)) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iova_domain *iovad = cookie_iovad(domain); - struct iova *iova; + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; struct page **pages; struct sg_table sgt; - dma_addr_t dma_addr; + dma_addr_t iova; unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; *handle = DMA_ERROR_CODE; @@ -461,11 +550,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, if (!pages) return NULL; - iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev); + size = iova_align(iovad, size); + iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); if (!iova) goto out_free_pages; - size = iova_align(iovad, size); if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) goto out_free_iova; @@ -481,19 +570,18 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, sg_miter_stop(&miter); } - dma_addr = iova_dma_addr(iovad, iova); - if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot) + if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) < size) goto out_free_sg; - *handle = dma_addr; + *handle = iova; sg_free_table(&sgt); return pages; out_free_sg: sg_free_table(&sgt); out_free_iova: - __free_iova(iovad, iova); + iommu_dma_free_iova(cookie, iova, size); out_free_pages: __iommu_dma_free_pages(pages, count); return NULL; @@ -527,22 +615,22 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t size, int prot) { - dma_addr_t dma_addr; struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iova_domain *iovad = cookie_iovad(domain); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; size_t iova_off = iova_offset(iovad, phys); - size_t len = iova_align(iovad, size + iova_off); - struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev); + dma_addr_t iova; + size = iova_align(iovad, size + iova_off); + iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); if (!iova) return DMA_ERROR_CODE; - dma_addr = iova_dma_addr(iovad, iova); - if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) { - __free_iova(iovad, iova); + if (iommu_map(domain, iova, phys - iova_off, size, prot)) { + iommu_dma_free_iova(cookie, iova, size); return DMA_ERROR_CODE; } - return dma_addr + iova_off; + return iova + iova_off; } dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, @@ -554,7 +642,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); + __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); } /* @@ -643,10 +731,10 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int prot) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - struct iova_domain *iovad = cookie_iovad(domain); - struct iova *iova; + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; struct scatterlist *s, *prev = NULL; - dma_addr_t dma_addr; + dma_addr_t iova; size_t iova_len = 0; unsigned long mask = dma_get_seg_boundary(dev); int i; @@ -690,7 +778,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, prev = s; } - iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev); + iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); if (!iova) goto out_restore_sg; @@ -698,14 +786,13 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, * We'll leave any physical concatenation to the IOMMU driver's * implementation - it knows better than we do. */ - dma_addr = iova_dma_addr(iovad, iova); - if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len) + if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) goto out_free_iova; - return __finalise_sg(dev, sg, nents, dma_addr); + return __finalise_sg(dev, sg, nents, iova); out_free_iova: - __free_iova(iovad, iova); + iommu_dma_free_iova(cookie, iova, iova_len); out_restore_sg: __invalidate_sg(sg, nents); return 0; @@ -714,11 +801,21 @@ out_restore_sg: void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { + dma_addr_t start, end; + struct scatterlist *tmp; + int i; /* * The scatterlist segments are mapped into a single * contiguous IOVA allocation, so this is incredibly easy. */ - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg)); + start = sg_dma_address(sg); + for_each_sg(sg_next(sg), tmp, nents - 1, i) { + if (sg_dma_len(tmp) == 0) + break; + sg = tmp; + } + end = sg_dma_address(sg) + sg_dma_len(sg); + __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); } dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, @@ -731,7 +828,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs) { - __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); + __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); } int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) @@ -744,8 +841,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, { struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_msi_page *msi_page; - struct iova_domain *iovad = cookie_iovad(domain); - struct iova *iova; + dma_addr_t iova; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; size_t size = cookie_msi_granule(cookie); @@ -758,29 +854,16 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; - msi_page->phys = msi_addr; - if (iovad) { - iova = __alloc_iova(domain, size, dma_get_mask(dev), dev); - if (!iova) - goto out_free_page; - msi_page->iova = iova_dma_addr(iovad, iova); - } else { - msi_page->iova = cookie->msi_iova; - cookie->msi_iova += size; - } - - if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) - goto out_free_iova; + iova = __iommu_dma_map(dev, msi_addr, size, prot); + if (iommu_dma_mapping_error(dev, iova)) + goto out_free_page; INIT_LIST_HEAD(&msi_page->list); + msi_page->phys = msi_addr; + msi_page->iova = iova; list_add(&msi_page->list, &cookie->msi_page_list); return msi_page; -out_free_iova: - if (iovad) - __free_iova(iovad, iova); - else - cookie->msi_iova -= size; out_free_page: kfree(msi_page); return NULL; diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 36e3f430d265..cbf7763d8091 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -311,7 +311,7 @@ static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info) ((void *)drhd) + drhd->header.length, dmaru->segment, dmaru->devices, dmaru->devices_cnt); - if (ret != 0) + if (ret) break; } if (ret >= 0) @@ -391,7 +391,7 @@ static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_hardware_unit *drhd; struct dmar_drhd_unit *dmaru; - int ret = 0; + int ret; drhd = (struct acpi_dmar_hardware_unit *)header; dmaru = dmar_find_dmaru(drhd); @@ -551,17 +551,16 @@ static int __init dmar_table_detect(void) status = AE_NOT_FOUND; } - return (ACPI_SUCCESS(status) ? 1 : 0); + return ACPI_SUCCESS(status) ? 0 : -ENOENT; } static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, size_t len, struct dmar_res_callback *cb) { - int ret = 0; struct acpi_dmar_header *iter, *next; struct acpi_dmar_header *end = ((void *)start) + len; - for (iter = start; iter < end && ret == 0; iter = next) { + for (iter = start; iter < end; iter = next) { next = (void *)iter + iter->length; if (iter->length == 0) { /* Avoid looping forever on bad ACPI tables */ @@ -570,8 +569,7 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, } else if (next > end) { /* Avoid passing table end */ pr_warn(FW_BUG "Record passes table end\n"); - ret = -EINVAL; - break; + return -EINVAL; } if (cb->print_entry) @@ -582,15 +580,19 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, pr_debug("Unknown DMAR structure type %d\n", iter->type); } else if (cb->cb[iter->type]) { + int ret; + ret = cb->cb[iter->type](iter, cb->arg[iter->type]); + if (ret) + return ret; } else if (!cb->ignore_unhandled) { pr_warn("No handler for DMAR structure type %d\n", iter->type); - ret = -EINVAL; + return -EINVAL; } } - return ret; + return 0; } static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar, @@ -607,8 +609,8 @@ static int __init parse_dmar_table(void) { struct acpi_table_dmar *dmar; - int ret = 0; int drhd_count = 0; + int ret; struct dmar_res_callback cb = { .print_entry = true, .ignore_unhandled = true, @@ -891,17 +893,17 @@ int __init detect_intel_iommu(void) down_write(&dmar_global_lock); ret = dmar_table_detect(); - if (ret) - ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, - &validate_drhd_cb); - if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { + if (!ret) + ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, + &validate_drhd_cb); + if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) { iommu_detected = 1; /* Make sure ACS will be enabled */ pci_request_acs(); } #ifdef CONFIG_X86 - if (ret) + if (!ret) x86_init.iommu.iommu_init = intel_iommu_init; #endif @@ -911,10 +913,9 @@ int __init detect_intel_iommu(void) } up_write(&dmar_global_lock); - return ret ? 1 : -ENODEV; + return ret ? ret : 1; } - static void unmap_iommu(struct intel_iommu *iommu) { iounmap(iommu->reg); diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index c01bfcdb2383..2395478dde75 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -171,6 +171,9 @@ static u32 lv2ent_offset(sysmmu_iova_t iova) #define REG_V5_PT_BASE_PFN 0x00C #define REG_V5_MMU_FLUSH_ALL 0x010 #define REG_V5_MMU_FLUSH_ENTRY 0x014 +#define REG_V5_MMU_FLUSH_RANGE 0x018 +#define REG_V5_MMU_FLUSH_START 0x020 +#define REG_V5_MMU_FLUSH_END 0x024 #define REG_V5_INT_STATUS 0x060 #define REG_V5_INT_CLEAR 0x064 #define REG_V5_FAULT_AR_VA 0x070 @@ -319,14 +322,23 @@ static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, { unsigned int i; - for (i = 0; i < num_inv; i++) { - if (MMU_MAJ_VER(data->version) < 5) + if (MMU_MAJ_VER(data->version) < 5) { + for (i = 0; i < num_inv; i++) { writel((iova & SPAGE_MASK) | 1, data->sfrbase + REG_MMU_FLUSH_ENTRY); - else + iova += SPAGE_SIZE; + } + } else { + if (num_inv == 1) { writel((iova & SPAGE_MASK) | 1, data->sfrbase + REG_V5_MMU_FLUSH_ENTRY); - iova += SPAGE_SIZE; + } else { + writel((iova & SPAGE_MASK), + data->sfrbase + REG_V5_MMU_FLUSH_START); + writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, + data->sfrbase + REG_V5_MMU_FLUSH_END); + writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE); + } } } @@ -747,16 +759,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) goto err_counter; /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ - for (i = 0; i < NUM_LV1ENTRIES; i += 8) { - domain->pgtable[i + 0] = ZERO_LV2LINK; - domain->pgtable[i + 1] = ZERO_LV2LINK; - domain->pgtable[i + 2] = ZERO_LV2LINK; - domain->pgtable[i + 3] = ZERO_LV2LINK; - domain->pgtable[i + 4] = ZERO_LV2LINK; - domain->pgtable[i + 5] = ZERO_LV2LINK; - domain->pgtable[i + 6] = ZERO_LV2LINK; - domain->pgtable[i + 7] = ZERO_LV2LINK; - } + for (i = 0; i < NUM_LV1ENTRIES; i++) + domain->pgtable[i] = ZERO_LV2LINK; handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, DMA_TO_DEVICE); diff --git a/drivers/iommu/fsl_pamu.h b/drivers/iommu/fsl_pamu.h index aab723f91f12..c3434f29c967 100644 --- a/drivers/iommu/fsl_pamu.h +++ b/drivers/iommu/fsl_pamu.h @@ -20,6 +20,7 @@ #define __FSL_PAMU_H #include <linux/iommu.h> +#include <linux/pci.h> #include <asm/fsl_pamu_stash.h> diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index d412a313a372..90ab0115d78e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -183,6 +183,7 @@ static int rwbf_quirk; * (used when kernel is launched w/ TXT) */ static int force_on = 0; +int intel_iommu_tboot_noforce; /* * 0: Present @@ -607,6 +608,10 @@ static int __init intel_iommu_setup(char *str) "Intel-IOMMU: enable pre-production PASID support\n"); intel_iommu_pasid28 = 1; iommu_identity_mapping |= IDENTMAP_GFX; + } else if (!strncmp(str, "tboot_noforce", 13)) { + printk(KERN_INFO + "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); + intel_iommu_tboot_noforce = 1; } str += strcspn(str, ","); @@ -4730,6 +4735,15 @@ static int intel_iommu_cpu_dead(unsigned int cpu) return 0; } +static void intel_disable_iommus(void) +{ + struct intel_iommu *iommu = NULL; + struct dmar_drhd_unit *drhd; + + for_each_iommu(iommu, drhd) + iommu_disable_translation(iommu); +} + static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) { return container_of(dev, struct intel_iommu, iommu.dev); @@ -4840,8 +4854,28 @@ int __init intel_iommu_init(void) goto out_free_dmar; } - if (no_iommu || dmar_disabled) + if (no_iommu || dmar_disabled) { + /* + * We exit the function here to ensure IOMMU's remapping and + * mempool aren't setup, which means that the IOMMU's PMRs + * won't be disabled via the call to init_dmars(). So disable + * it explicitly here. The PMRs were setup by tboot prior to + * calling SENTER, but the kernel is expected to reset/tear + * down the PMRs. + */ + if (intel_iommu_tboot_noforce) { + for_each_iommu(iommu, drhd) + iommu_disable_protect_mem_regions(iommu); + } + + /* + * Make sure the IOMMUs are switched off, even when we + * boot into a kexec kernel and the previous kernel left + * them enabled + */ + intel_disable_iommus(); goto out_free_dmar; + } if (list_empty(&dmar_rmrr_units)) pr_info("No RMRR found\n"); diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index ac596928f6b4..a190cbd76ef7 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -408,14 +408,6 @@ static int iommu_load_old_irte(struct intel_iommu *iommu) size_t size; u64 irta; - if (!is_kdump_kernel()) { - pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n", - iommu->name); - clear_ir_pre_enabled(iommu); - iommu_disable_irq_remapping(iommu); - return -EINVAL; - } - /* Check whether the old ir-table has the same size as ours */ irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK) @@ -567,7 +559,12 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) init_ir_status(iommu); if (ir_pre_enabled(iommu)) { - if (iommu_load_old_irte(iommu)) + if (!is_kdump_kernel()) { + pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n", + iommu->name); + clear_ir_pre_enabled(iommu); + iommu_disable_irq_remapping(iommu); + } else if (iommu_load_old_irte(iommu)) pr_err("Failed to copy IR table for %s from previous kernel\n", iommu->name); else diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 770ba7e7ef4d..cf7ca7e70777 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1105,8 +1105,12 @@ static int iommu_bus_notifier(struct notifier_block *nb, * result in ADD/DEL notifiers to group->notifier */ if (action == BUS_NOTIFY_ADD_DEVICE) { - if (ops->add_device) - return ops->add_device(dev); + if (ops->add_device) { + int ret; + + ret = ops->add_device(dev); + return (ret) ? NOTIFY_DONE : NOTIFY_OK; + } } else if (action == BUS_NOTIFY_REMOVED_DEVICE) { if (ops->remove_device && dev->iommu_group) { ops->remove_device(dev); @@ -1674,6 +1678,48 @@ void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) } EXPORT_SYMBOL_GPL(iommu_domain_window_disable); +/** + * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework + * @domain: the iommu domain where the fault has happened + * @dev: the device where the fault has happened + * @iova: the faulting address + * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...) + * + * This function should be called by the low-level IOMMU implementations + * whenever IOMMU faults happen, to allow high-level users, that are + * interested in such events, to know about them. + * + * This event may be useful for several possible use cases: + * - mere logging of the event + * - dynamic TLB/PTE loading + * - if restarting of the faulting device is required + * + * Returns 0 on success and an appropriate error code otherwise (if dynamic + * PTE/TLB loading will one day be supported, implementations will be able + * to tell whether it succeeded or not according to this return value). + * + * Specifically, -ENOSYS is returned if a fault handler isn't installed + * (though fault handlers can also return -ENOSYS, in case they want to + * elicit the default behavior of the IOMMU drivers). + */ +int report_iommu_fault(struct iommu_domain *domain, struct device *dev, + unsigned long iova, int flags) +{ + int ret = -ENOSYS; + + /* + * if upper layers showed interest and installed a fault handler, + * invoke it. + */ + if (domain->handler) + ret = domain->handler(domain, dev, iova, flags, + domain->handler_token); + + trace_io_page_fault(dev, iova, flags); + return ret; +} +EXPORT_SYMBOL_GPL(report_iommu_fault); + static int __init iommu_init(void) { iommu_group_kset = kset_create_and_add("iommu_groups", diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b7268a14184f..5c88ba70e4e0 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -100,6 +100,34 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) } } +/* Insert the iova into domain rbtree by holding writer lock */ +static void +iova_insert_rbtree(struct rb_root *root, struct iova *iova, + struct rb_node *start) +{ + struct rb_node **new, *parent = NULL; + + new = (start) ? &start : &(root->rb_node); + /* Figure out where to put new node */ + while (*new) { + struct iova *this = rb_entry(*new, struct iova, node); + + parent = *new; + + if (iova->pfn_lo < this->pfn_lo) + new = &((*new)->rb_left); + else if (iova->pfn_lo > this->pfn_lo) + new = &((*new)->rb_right); + else { + WARN_ON(1); /* this should not happen */ + return; + } + } + /* Add new node and rebalance tree. */ + rb_link_node(&iova->node, parent, new); + rb_insert_color(&iova->node, root); +} + /* * Computes the padding size required, to make the start address * naturally aligned on the power-of-two order of its size @@ -138,7 +166,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, break; /* found a free slot */ } adjust_limit_pfn: - limit_pfn = curr_iova->pfn_lo - 1; + limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0; move_left: prev = curr; curr = rb_prev(curr); @@ -157,35 +185,8 @@ move_left: new->pfn_lo = limit_pfn - (size + pad_size) + 1; new->pfn_hi = new->pfn_lo + size - 1; - /* Insert the new_iova into domain rbtree by holding writer lock */ - /* Add new node and rebalance tree. */ - { - struct rb_node **entry, *parent = NULL; - - /* If we have 'prev', it's a valid place to start the - insertion. Otherwise, start from the root. */ - if (prev) - entry = &prev; - else - entry = &iovad->rbroot.rb_node; - - /* Figure out where to put new node */ - while (*entry) { - struct iova *this = rb_entry(*entry, struct iova, node); - parent = *entry; - - if (new->pfn_lo < this->pfn_lo) - entry = &((*entry)->rb_left); - else if (new->pfn_lo > this->pfn_lo) - entry = &((*entry)->rb_right); - else - BUG(); /* this should not happen */ - } - - /* Add new node and rebalance tree. */ - rb_link_node(&new->node, parent, entry); - rb_insert_color(&new->node, &iovad->rbroot); - } + /* If we have 'prev', it's a valid place to start the insertion. */ + iova_insert_rbtree(&iovad->rbroot, new, prev); __cached_rbnode_insert_update(iovad, saved_pfn, new); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); @@ -194,28 +195,6 @@ move_left: return 0; } -static void -iova_insert_rbtree(struct rb_root *root, struct iova *iova) -{ - struct rb_node **new = &(root->rb_node), *parent = NULL; - /* Figure out where to put new node */ - while (*new) { - struct iova *this = rb_entry(*new, struct iova, node); - - parent = *new; - - if (iova->pfn_lo < this->pfn_lo) - new = &((*new)->rb_left); - else if (iova->pfn_lo > this->pfn_lo) - new = &((*new)->rb_right); - else - BUG(); /* this should not happen */ - } - /* Add new node and rebalance tree. */ - rb_link_node(&iova->node, parent, new); - rb_insert_color(&iova->node, root); -} - static struct kmem_cache *iova_cache; static unsigned int iova_cache_users; static DEFINE_MUTEX(iova_cache_mutex); @@ -505,7 +484,7 @@ __insert_new_range(struct iova_domain *iovad, iova = alloc_and_init_iova(pfn_lo, pfn_hi); if (iova) - iova_insert_rbtree(&iovad->rbroot, iova); + iova_insert_rbtree(&iovad->rbroot, iova, NULL); return iova; } @@ -612,11 +591,11 @@ split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, rb_erase(&iova->node, &iovad->rbroot); if (prev) { - iova_insert_rbtree(&iovad->rbroot, prev); + iova_insert_rbtree(&iovad->rbroot, prev, NULL); iova->pfn_lo = pfn_lo; } if (next) { - iova_insert_rbtree(&iovad->rbroot, next); + iova_insert_rbtree(&iovad->rbroot, next, NULL); iova->pfn_hi = pfn_hi; } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 19e010083408..a27ef570c328 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -431,9 +431,10 @@ err_release_mapping: static int mtk_iommu_add_device(struct device *dev) { - struct iommu_group *group; struct of_phandle_args iommu_spec; struct of_phandle_iterator it; + struct mtk_iommu_data *data; + struct iommu_group *group; int err; of_for_each_phandle(&it, err, dev->of_node, "iommus", @@ -450,6 +451,9 @@ static int mtk_iommu_add_device(struct device *dev) if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) return -ENODEV; /* Not a iommu client device */ + data = dev->iommu_fwspec->iommu_priv; + iommu_device_link(&data->iommu, dev); + group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) return PTR_ERR(group); @@ -460,9 +464,14 @@ static int mtk_iommu_add_device(struct device *dev) static void mtk_iommu_remove_device(struct device *dev) { + struct mtk_iommu_data *data; + if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) return; + data = dev->iommu_fwspec->iommu_priv; + iommu_device_unlink(&data->iommu, dev); + iommu_group_remove_device(dev); iommu_fwspec_free(dev); } @@ -627,6 +636,17 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) return ret; + ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, + dev_name(&pdev->dev)); + if (ret) + return ret; + + iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); + + ret = iommu_device_register(&data->iommu); + if (ret) + return ret; + if (!iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); @@ -637,6 +657,9 @@ static int mtk_iommu_remove(struct platform_device *pdev) { struct mtk_iommu_data *data = platform_get_drvdata(pdev); + iommu_device_sysfs_remove(&data->iommu); + iommu_device_unregister(&data->iommu); + if (iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, NULL); diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 2683e9fc0dcf..9f44ee8ea1bc 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -96,6 +96,49 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index, } EXPORT_SYMBOL_GPL(of_get_dma_window); +static bool of_iommu_driver_present(struct device_node *np) +{ + /* + * If the IOMMU still isn't ready by the time we reach init, assume + * it never will be. We don't want to defer indefinitely, nor attempt + * to dereference __iommu_of_table after it's been freed. + */ + if (system_state > SYSTEM_BOOTING) + return false; + + return of_match_node(&__iommu_of_table, np); +} + +static const struct iommu_ops +*of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec) +{ + const struct iommu_ops *ops; + struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; + int err; + + ops = iommu_ops_from_fwnode(fwnode); + if ((ops && !ops->of_xlate) || + (!ops && !of_iommu_driver_present(iommu_spec->np))) + return NULL; + + err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); + if (err) + return ERR_PTR(err); + /* + * The otherwise-empty fwspec handily serves to indicate the specific + * IOMMU device we're waiting for, which will be useful if we ever get + * a proper probe-ordering dependency mechanism in future. + */ + if (!ops) + return ERR_PTR(-EPROBE_DEFER); + + err = ops->of_xlate(dev, iommu_spec); + if (err) + return ERR_PTR(err); + + return ops; +} + static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) { struct of_phandle_args *iommu_spec = data; @@ -105,10 +148,11 @@ static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) } static const struct iommu_ops -*of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np) +*of_pci_iommu_init(struct pci_dev *pdev, struct device_node *bridge_np) { const struct iommu_ops *ops; struct of_phandle_args iommu_spec; + int err; /* * Start by tracing the RID alias down the PCI topology as @@ -123,56 +167,76 @@ static const struct iommu_ops * bus into the system beyond, and which IOMMU it ends up at. */ iommu_spec.np = NULL; - if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", - "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) - return NULL; + err = of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", + "iommu-map-mask", &iommu_spec.np, + iommu_spec.args); + if (err) + return err == -ENODEV ? NULL : ERR_PTR(err); - ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode); - if (!ops || !ops->of_xlate || - iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || - ops->of_xlate(&pdev->dev, &iommu_spec)) - ops = NULL; + ops = of_iommu_xlate(&pdev->dev, &iommu_spec); of_node_put(iommu_spec.np); return ops; } -const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np) +static const struct iommu_ops +*of_platform_iommu_init(struct device *dev, struct device_node *np) { struct of_phandle_args iommu_spec; - struct device_node *np; const struct iommu_ops *ops = NULL; int idx = 0; - if (dev_is_pci(dev)) - return of_pci_iommu_configure(to_pci_dev(dev), master_np); - /* * We don't currently walk up the tree looking for a parent IOMMU. * See the `Notes:' section of * Documentation/devicetree/bindings/iommu/iommu.txt */ - while (!of_parse_phandle_with_args(master_np, "iommus", - "#iommu-cells", idx, - &iommu_spec)) { - np = iommu_spec.np; - ops = iommu_ops_from_fwnode(&np->fwnode); - - if (!ops || !ops->of_xlate || - iommu_fwspec_init(dev, &np->fwnode, ops) || - ops->of_xlate(dev, &iommu_spec)) - goto err_put_node; - - of_node_put(np); + while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", + idx, &iommu_spec)) { + ops = of_iommu_xlate(dev, &iommu_spec); + of_node_put(iommu_spec.np); idx++; + if (IS_ERR_OR_NULL(ops)) + break; } return ops; +} + +const struct iommu_ops *of_iommu_configure(struct device *dev, + struct device_node *master_np) +{ + const struct iommu_ops *ops; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + + if (!master_np) + return NULL; + + if (fwspec) { + if (fwspec->ops) + return fwspec->ops; + + /* In the deferred case, start again from scratch */ + iommu_fwspec_free(dev); + } -err_put_node: - of_node_put(np); - return NULL; + if (dev_is_pci(dev)) + ops = of_pci_iommu_init(to_pci_dev(dev), master_np); + else + ops = of_platform_iommu_init(dev, master_np); + /* + * If we have reason to believe the IOMMU driver missed the initial + * add_device callback for dev, replay it to get things in order. + */ + if (!IS_ERR_OR_NULL(ops) && ops->add_device && + dev->bus && !dev->iommu_group) { + int err = ops->add_device(dev); + + if (err) + ops = ERR_PTR(err); + } + + return ops; } static int __init of_iommu_init(void) @@ -183,7 +247,7 @@ static int __init of_iommu_init(void) for_each_matching_node_and_match(np, matches, &match) { const of_iommu_init_fn init_fn = match->data; - if (init_fn(np)) + if (init_fn && init_fn(np)) pr_err("Failed to initialise IOMMU %s\n", of_node_full_name(np)); } diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index e2583cce2cc1..95dfca36ccb9 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -36,28 +36,14 @@ #include "omap-iopgtable.h" #include "omap-iommu.h" +static const struct iommu_ops omap_iommu_ops; + #define to_iommu(dev) \ ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) /* bitmap of the page sizes currently supported */ #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) -/** - * struct omap_iommu_domain - omap iommu domain - * @pgtable: the page table - * @iommu_dev: an omap iommu device attached to this domain. only a single - * iommu device can be attached for now. - * @dev: Device using this domain. - * @lock: domain lock, should be taken when attaching/detaching - */ -struct omap_iommu_domain { - u32 *pgtable; - struct omap_iommu *iommu_dev; - struct device *dev; - spinlock_t lock; - struct iommu_domain domain; -}; - #define MMU_LOCK_BASE_SHIFT 10 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) #define MMU_LOCK_BASE(x) \ @@ -818,33 +804,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) return IRQ_NONE; } -static int device_match_by_alias(struct device *dev, void *data) -{ - struct omap_iommu *obj = to_iommu(dev); - const char *name = data; - - pr_debug("%s: %s %s\n", __func__, obj->name, name); - - return strcmp(obj->name, name) == 0; -} - /** * omap_iommu_attach() - attach iommu device to an iommu domain - * @name: name of target omap iommu device + * @obj: target omap iommu device * @iopgd: page table **/ -static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) +static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) { int err; - struct device *dev; - struct omap_iommu *obj; - - dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, - device_match_by_alias); - if (!dev) - return ERR_PTR(-ENODEV); - - obj = to_iommu(dev); spin_lock(&obj->iommu_lock); @@ -857,11 +824,13 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) spin_unlock(&obj->iommu_lock); dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); - return obj; + + return 0; err_enable: spin_unlock(&obj->iommu_lock); - return ERR_PTR(err); + + return err; } /** @@ -928,28 +897,26 @@ static int omap_iommu_probe(struct platform_device *pdev) int irq; struct omap_iommu *obj; struct resource *res; - struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device_node *of = pdev->dev.of_node; + if (!of) { + pr_err("%s: only DT-based devices are supported\n", __func__); + return -ENODEV; + } + obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); if (!obj) return -ENOMEM; - if (of) { - obj->name = dev_name(&pdev->dev); - obj->nr_tlb_entries = 32; - err = of_property_read_u32(of, "ti,#tlb-entries", - &obj->nr_tlb_entries); - if (err && err != -EINVAL) - return err; - if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) - return -EINVAL; - if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) - obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; - } else { - obj->nr_tlb_entries = pdata->nr_tlb_entries; - obj->name = pdata->name; - } + obj->name = dev_name(&pdev->dev); + obj->nr_tlb_entries = 32; + err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); + if (err && err != -EINVAL) + return err; + if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) + return -EINVAL; + if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) + obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; obj->dev = &pdev->dev; obj->ctx = (void *)obj + sizeof(*obj); @@ -976,19 +943,46 @@ static int omap_iommu_probe(struct platform_device *pdev) return err; platform_set_drvdata(pdev, obj); + obj->group = iommu_group_alloc(); + if (IS_ERR(obj->group)) + return PTR_ERR(obj->group); + + err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, obj->name); + if (err) + goto out_group; + + iommu_device_set_ops(&obj->iommu, &omap_iommu_ops); + + err = iommu_device_register(&obj->iommu); + if (err) + goto out_sysfs; + pm_runtime_irq_safe(obj->dev); pm_runtime_enable(obj->dev); omap_iommu_debugfs_add(obj); dev_info(&pdev->dev, "%s registered\n", obj->name); + return 0; + +out_sysfs: + iommu_device_sysfs_remove(&obj->iommu); +out_group: + iommu_group_put(obj->group); + return err; } static int omap_iommu_remove(struct platform_device *pdev) { struct omap_iommu *obj = platform_get_drvdata(pdev); + iommu_group_put(obj->group); + obj->group = NULL; + + iommu_device_sysfs_remove(&obj->iommu); + iommu_device_unregister(&obj->iommu); + omap_iommu_debugfs_remove(obj); pm_runtime_disable(obj->dev); @@ -1077,11 +1071,11 @@ static int omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { struct omap_iommu_domain *omap_domain = to_omap_domain(domain); - struct omap_iommu *oiommu; struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; + struct omap_iommu *oiommu; int ret = 0; - if (!arch_data || !arch_data->name) { + if (!arch_data || !arch_data->iommu_dev) { dev_err(dev, "device doesn't have an associated iommu\n"); return -EINVAL; } @@ -1095,15 +1089,16 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) goto out; } + oiommu = arch_data->iommu_dev; + /* get a handle to and enable the omap iommu */ - oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable); - if (IS_ERR(oiommu)) { - ret = PTR_ERR(oiommu); + ret = omap_iommu_attach(oiommu, omap_domain->pgtable); + if (ret) { dev_err(dev, "can't get omap iommu: %d\n", ret); goto out; } - omap_domain->iommu_dev = arch_data->iommu_dev = oiommu; + omap_domain->iommu_dev = oiommu; omap_domain->dev = dev; oiommu->domain = domain; @@ -1116,7 +1111,6 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, struct device *dev) { struct omap_iommu *oiommu = dev_to_omap_iommu(dev); - struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; /* only a single device is supported per domain for now */ if (omap_domain->iommu_dev != oiommu) { @@ -1128,7 +1122,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, omap_iommu_detach(oiommu); - omap_domain->iommu_dev = arch_data->iommu_dev = NULL; + omap_domain->iommu_dev = NULL; omap_domain->dev = NULL; oiommu->domain = NULL; } @@ -1232,8 +1226,11 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, static int omap_iommu_add_device(struct device *dev) { struct omap_iommu_arch_data *arch_data; + struct omap_iommu *oiommu; + struct iommu_group *group; struct device_node *np; struct platform_device *pdev; + int ret; /* * Allocate the archdata iommu structure for DT-based devices. @@ -1254,15 +1251,41 @@ static int omap_iommu_add_device(struct device *dev) return -EINVAL; } + oiommu = platform_get_drvdata(pdev); + if (!oiommu) { + of_node_put(np); + return -EINVAL; + } + arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); if (!arch_data) { of_node_put(np); return -ENOMEM; } - arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL); + ret = iommu_device_link(&oiommu->iommu, dev); + if (ret) { + kfree(arch_data); + of_node_put(np); + return ret; + } + + arch_data->iommu_dev = oiommu; dev->archdata.iommu = arch_data; + /* + * IOMMU group initialization calls into omap_iommu_device_group, which + * needs a valid dev->archdata.iommu pointer + */ + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) { + iommu_device_unlink(&oiommu->iommu, dev); + dev->archdata.iommu = NULL; + kfree(arch_data); + return PTR_ERR(group); + } + iommu_group_put(group); + of_node_put(np); return 0; @@ -1275,8 +1298,23 @@ static void omap_iommu_remove_device(struct device *dev) if (!dev->of_node || !arch_data) return; - kfree(arch_data->name); + iommu_device_unlink(&arch_data->iommu_dev->iommu, dev); + iommu_group_remove_device(dev); + + dev->archdata.iommu = NULL; kfree(arch_data); + +} + +static struct iommu_group *omap_iommu_device_group(struct device *dev) +{ + struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; + struct iommu_group *group = NULL; + + if (arch_data->iommu_dev) + group = arch_data->iommu_dev->group; + + return group; } static const struct iommu_ops omap_iommu_ops = { @@ -1290,6 +1328,7 @@ static const struct iommu_ops omap_iommu_ops = { .iova_to_phys = omap_iommu_iova_to_phys, .add_device = omap_iommu_add_device, .remove_device = omap_iommu_remove_device, + .device_group = omap_iommu_device_group, .pgsize_bitmap = OMAP_IOMMU_PGSIZES, }; @@ -1299,6 +1338,7 @@ static int __init omap_iommu_init(void) const unsigned long flags = SLAB_HWCACHE_ALIGN; size_t align = 1 << 10; /* L2 pagetable alignement */ struct device_node *np; + int ret; np = of_find_matching_node(NULL, omap_iommu_of_match); if (!np) @@ -1312,11 +1352,25 @@ static int __init omap_iommu_init(void) return -ENOMEM; iopte_cachep = p; - bus_set_iommu(&platform_bus_type, &omap_iommu_ops); - omap_iommu_debugfs_init(); - return platform_driver_register(&omap_iommu_driver); + ret = platform_driver_register(&omap_iommu_driver); + if (ret) { + pr_err("%s: failed to register driver\n", __func__); + goto fail_driver; + } + + ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops); + if (ret) + goto fail_bus; + + return 0; + +fail_bus: + platform_driver_unregister(&omap_iommu_driver); +fail_driver: + kmem_cache_destroy(iopte_cachep); + return ret; } subsys_initcall(omap_iommu_init); /* must be ready before omap3isp is probed */ diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 59628e5017b4..6e70515e6038 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -14,6 +14,7 @@ #define _OMAP_IOMMU_H #include <linux/bitops.h> +#include <linux/iommu.h> #define for_each_iotlb_cr(obj, n, __i, cr) \ for (__i = 0; \ @@ -27,6 +28,23 @@ struct iotlb_entry { u32 endian, elsz, mixed; }; +/** + * struct omap_iommu_domain - omap iommu domain + * @pgtable: the page table + * @iommu_dev: an omap iommu device attached to this domain. only a single + * iommu device can be attached for now. + * @dev: Device using this domain. + * @lock: domain lock, should be taken when attaching/detaching + * @domain: generic domain handle used by iommu core code + */ +struct omap_iommu_domain { + u32 *pgtable; + struct omap_iommu *iommu_dev; + struct device *dev; + spinlock_t lock; + struct iommu_domain domain; +}; + struct omap_iommu { const char *name; void __iomem *regbase; @@ -50,6 +68,22 @@ struct omap_iommu { int has_bus_err_back; u32 id; + + struct iommu_device iommu; + struct iommu_group *group; +}; + +/** + * struct omap_iommu_arch_data - omap iommu private data + * @iommu_dev: handle of the iommu device + * + * This is an omap iommu private data object, which binds an iommu user + * to its iommu device. This object should be placed at the iommu user's + * dev_archdata so generic IOMMU API can be used without having to + * utilize omap-specific plumbing anymore. + */ +struct omap_iommu_arch_data { + struct omap_iommu *iommu_dev; }; struct cr_regs { diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 9afcbf79f0b0..4ba48a26b389 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -8,6 +8,7 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-iommu.h> +#include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -90,6 +91,7 @@ struct rk_iommu { void __iomem **bases; int num_mmu; int irq; + struct iommu_device iommu; struct list_head node; /* entry in rk_iommu_domain.iommus */ struct iommu_domain *domain; /* domain to which iommu is attached */ }; @@ -1032,6 +1034,7 @@ static int rk_iommu_group_set_iommudata(struct iommu_group *group, static int rk_iommu_add_device(struct device *dev) { struct iommu_group *group; + struct rk_iommu *iommu; int ret; if (!rk_iommu_is_dev_iommu_master(dev)) @@ -1054,6 +1057,10 @@ static int rk_iommu_add_device(struct device *dev) if (ret) goto err_remove_device; + iommu = rk_iommu_from_dev(dev); + if (iommu) + iommu_device_link(&iommu->iommu, dev); + iommu_group_put(group); return 0; @@ -1067,9 +1074,15 @@ err_put_group: static void rk_iommu_remove_device(struct device *dev) { + struct rk_iommu *iommu; + if (!rk_iommu_is_dev_iommu_master(dev)) return; + iommu = rk_iommu_from_dev(dev); + if (iommu) + iommu_device_unlink(&iommu->iommu, dev); + iommu_group_remove_device(dev); } @@ -1117,7 +1130,7 @@ static int rk_iommu_probe(struct platform_device *pdev) struct rk_iommu *iommu; struct resource *res; int num_res = pdev->num_resources; - int i; + int err, i; iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); if (!iommu) @@ -1150,11 +1163,25 @@ static int rk_iommu_probe(struct platform_device *pdev) return -ENXIO; } - return 0; + err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); + if (err) + return err; + + iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); + err = iommu_device_register(&iommu->iommu); + + return err; } static int rk_iommu_remove(struct platform_device *pdev) { + struct rk_iommu *iommu = platform_get_drvdata(pdev); + + if (iommu) { + iommu_device_sysfs_remove(&iommu->iommu); + iommu_device_unregister(&iommu->iommu); + } + return 0; } diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 9305964250ac..eeb19f560a05 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -15,6 +15,7 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/dma-mapping.h> #include <soc/tegra/ahb.h> #include <soc/tegra/mc.h> |