diff options
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/Kconfig | 2 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 1 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 140 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 6 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 12 | ||||
-rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 21 | ||||
-rw-r--r-- | drivers/iommu/iommu-traces.c | 27 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 21 | ||||
-rw-r--r-- | drivers/iommu/omap-iopgtable.h | 2 | ||||
-rw-r--r-- | drivers/iommu/tegra-gart.c | 27 | ||||
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 4 |
11 files changed, 145 insertions, 118 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index c880ebaf1553..3e7fdbb4916b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -206,7 +206,7 @@ config SHMOBILE_IPMMU_TLB config SHMOBILE_IOMMU bool "IOMMU for Renesas IPMMU/IPMMUI" default n - depends on (ARM && ARCH_SHMOBILE) + depends on ARM select IOMMU_API select ARM_DMA_USE_IOMMU select SHMOBILE_IPMMU diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 14c1f474cf11..5d58bf16e9e3 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_IOMMU_API) += iommu.o +obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 181c9ba929cd..e46a88700b68 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -392,7 +392,7 @@ struct arm_smmu_domain { struct arm_smmu_cfg root_cfg; phys_addr_t output_mask; - spinlock_t lock; + struct mutex lock; }; static DEFINE_SPINLOCK(arm_smmu_devices_lock); @@ -590,6 +590,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) ret = IRQ_HANDLED; resume = RESUME_RETRY; } else { + dev_err_ratelimited(smmu->dev, + "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n", + iova, fsynr, root_cfg->cbndx); ret = IRQ_NONE; resume = RESUME_TERMINATE; } @@ -778,7 +781,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) #ifdef __BIG_ENDIAN reg |= SCTLR_E; #endif - writel(reg, cb_base + ARM_SMMU_CB_SCTLR); + writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR); } static int arm_smmu_init_domain_context(struct iommu_domain *domain, @@ -897,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) goto out_free_domain; smmu_domain->root_cfg.pgd = pgd; - spin_lock_init(&smmu_domain->lock); + mutex_init(&smmu_domain->lock); domain->priv = smmu_domain; return 0; @@ -1134,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * Sanity check the domain. We don't currently support domains * that cross between different SMMU chains. */ - spin_lock(&smmu_domain->lock); + mutex_lock(&smmu_domain->lock); if (!smmu_domain->leaf_smmu) { /* Now that we have a master, we can finalise the domain */ ret = arm_smmu_init_domain_context(domain, dev); @@ -1149,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) dev_name(device_smmu->dev)); goto err_unlock; } - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); /* Looks ok, so add the device to the domain */ master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node); @@ -1159,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) return arm_smmu_domain_add_master(smmu_domain, master); err_unlock: - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); return ret; } @@ -1212,7 +1215,10 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, arm_smmu_flush_pgtable(smmu, page_address(table), ARM_SMMU_PTE_HWTABLE_SIZE); - pgtable_page_ctor(table); + if (!pgtable_page_ctor(table)) { + __free_page(table); + return -ENOMEM; + } pmd_populate(NULL, pmd, table); arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); } @@ -1388,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, if (paddr & ~output_mask) return -ERANGE; - spin_lock(&smmu_domain->lock); + mutex_lock(&smmu_domain->lock); pgd += pgd_index(iova); end = iova + size; do { @@ -1404,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, } while (pgd++, iova != end); out_unlock: - spin_unlock(&smmu_domain->lock); + mutex_unlock(&smmu_domain->lock); /* Ensure new page tables are visible to the hardware walker */ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) @@ -1417,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int flags) { struct arm_smmu_domain *smmu_domain = domain->priv; - struct arm_smmu_device *smmu = smmu_domain->leaf_smmu; - if (!smmu_domain || !smmu) + if (!smmu_domain) return -ENODEV; /* Check for silent address truncation up the SMMU chain. */ @@ -1443,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; + pgd_t *pgdp, pgd; + pud_t pud; + pmd_t pmd; + pte_t pte; struct arm_smmu_domain *smmu_domain = domain->priv; struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; - struct arm_smmu_device *smmu = root_cfg->smmu; - spin_lock(&smmu_domain->lock); - pgd = root_cfg->pgd; - if (!pgd) - goto err_unlock; + pgdp = root_cfg->pgd; + if (!pgdp) + return 0; - pgd += pgd_index(iova); - if (pgd_none_or_clear_bad(pgd)) - goto err_unlock; + pgd = *(pgdp + pgd_index(iova)); + if (pgd_none(pgd)) + return 0; - pud = pud_offset(pgd, iova); - if (pud_none_or_clear_bad(pud)) - goto err_unlock; + pud = *pud_offset(&pgd, iova); + if (pud_none(pud)) + return 0; - pmd = pmd_offset(pud, iova); - if (pmd_none_or_clear_bad(pmd)) - goto err_unlock; + pmd = *pmd_offset(&pud, iova); + if (pmd_none(pmd)) + return 0; - pte = pmd_page_vaddr(*pmd) + pte_index(iova); + pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); if (pte_none(pte)) - goto err_unlock; - - spin_unlock(&smmu_domain->lock); - return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK); + return 0; -err_unlock: - spin_unlock(&smmu_domain->lock); - dev_warn(smmu->dev, - "invalid (corrupt?) page tables detected for iova 0x%llx\n", - (unsigned long long)iova); - return -EINVAL; + return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); } static int arm_smmu_domain_has_cap(struct iommu_domain *domain, @@ -1559,9 +1554,13 @@ static struct iommu_ops arm_smmu_ops = { static void arm_smmu_device_reset(struct arm_smmu_device *smmu) { void __iomem *gr0_base = ARM_SMMU_GR0(smmu); - void __iomem *sctlr_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB_SCTLR; + void __iomem *cb_base; int i = 0; - u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); + u32 reg; + + /* Clear Global FSR */ + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); + writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR); /* Mark all SMRn as invalid and all S2CRn as bypass */ for (i = 0; i < smmu->num_mapping_groups; ++i) { @@ -1569,33 +1568,38 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i)); } - /* Make sure all context banks are disabled */ - for (i = 0; i < smmu->num_context_banks; ++i) - writel_relaxed(0, sctlr_base + ARM_SMMU_CB(smmu, i)); + /* Make sure all context banks are disabled and clear CB_FSR */ + for (i = 0; i < smmu->num_context_banks; ++i) { + cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i); + writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); + writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR); + } /* Invalidate the TLB, just in case */ writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); + /* Enable fault reporting */ - scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); + reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); /* Disable TLB broadcasting. */ - scr0 |= (sCR0_VMIDPNE | sCR0_PTM); + reg |= (sCR0_VMIDPNE | sCR0_PTM); /* Enable client access, but bypass when no mapping is found */ - scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG); + reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG); /* Disable forced broadcasting */ - scr0 &= ~sCR0_FB; + reg &= ~sCR0_FB; /* Don't upgrade barriers */ - scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); + reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); /* Push the button */ arm_smmu_tlb_sync(smmu); - writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0); + writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0); } static int arm_smmu_id_size_to_bits(int size) @@ -1700,13 +1704,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1); smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K; - /* Check that we ioremapped enough */ + /* Check for size mismatch of SMMU address space from mapped region */ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); size *= (smmu->pagesize << 1); - if (smmu->size < size) - dev_warn(smmu->dev, - "device is 0x%lx bytes but only mapped 0x%lx!\n", - size, smmu->size); + if (smmu->size != size) + dev_warn(smmu->dev, "SMMU address space size (0x%lx) differs " + "from mapped region size (0x%lx)!\n", size, smmu->size); smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; @@ -1781,15 +1784,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) smmu->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing base address/size\n"); - return -ENODEV; - } - + smmu->base = devm_ioremap_resource(dev, res); + if (IS_ERR(smmu->base)) + return PTR_ERR(smmu->base); smmu->size = resource_size(res); - smmu->base = devm_request_and_ioremap(dev, res); - if (!smmu->base) - return -EADDRNOTAVAIL; if (of_property_read_u32(dev->of_node, "#global-interrupts", &smmu->num_global_irqs)) { @@ -1804,12 +1802,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) smmu->num_context_irqs++; } - if (num_irqs < smmu->num_global_irqs) { - dev_warn(dev, "found %d interrupts but expected at least %d\n", - num_irqs, smmu->num_global_irqs); - smmu->num_global_irqs = num_irqs; + if (!smmu->num_context_irqs) { + dev_err(dev, "found %d interrupts but expected at least %d\n", + num_irqs, smmu->num_global_irqs + 1); + return -ENODEV; } - smmu->num_context_irqs = num_irqs - smmu->num_global_irqs; smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs, GFP_KERNEL); @@ -1855,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) dev_err(dev, "found only %d context interrupt(s) but %d required\n", smmu->num_context_irqs, smmu->num_context_banks); + err = -ENODEV; goto out_put_parent; } @@ -1933,7 +1931,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) free_irq(smmu->irqs[i], smmu); /* Turn the thing off */ - writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); + writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); return 0; } @@ -1981,7 +1979,7 @@ static void __exit arm_smmu_exit(void) return platform_driver_unregister(&arm_smmu_driver); } -module_init(arm_smmu_init); +subsys_initcall(arm_smmu_init); module_exit(arm_smmu_exit); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 785675a56a10..8b452c9676d9 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -88,7 +88,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, pr_warn("Device scope bus [%d] not found\n", scope->bus); break; } - pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); + pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function)); if (!pdev) { /* warning will be printed below */ break; @@ -99,7 +99,7 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, } if (!pdev) { pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n", - segment, scope->bus, path->dev, path->fn); + segment, scope->bus, path->device, path->function); *dev = NULL; return 0; } @@ -403,7 +403,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev) dev = pci_physfn(dev); - list_for_each_entry(dmaru, &dmar_drhd_units, list) { + for_each_drhd_unit(dmaru) { drhd = container_of(dmaru->hdr, struct acpi_dmar_hardware_unit, header); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 15e9b57e9cf0..43b9bfea48fa 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -782,7 +782,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, int offset; BUG_ON(!domain->pgd); - BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); + + if (addr_width < BITS_PER_LONG && pfn >> addr_width) + /* Address beyond IOMMU's addressing capabilities. */ + return NULL; + parent = domain->pgd; while (level > 0) { @@ -3777,11 +3781,10 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, static void domain_remove_one_dev_info(struct dmar_domain *domain, struct pci_dev *pdev) { - struct device_domain_info *info; + struct device_domain_info *info, *tmp; struct intel_iommu *iommu; unsigned long flags; int found = 0; - struct list_head *entry, *tmp; iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, pdev->devfn); @@ -3789,8 +3792,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, return; spin_lock_irqsave(&device_domain_lock, flags); - list_for_each_safe(entry, tmp, &domain->devices) { - info = list_entry(entry, struct device_domain_info, link); + list_for_each_entry_safe(info, tmp, &domain->devices, link) { if (info->segment == pci_domain_nr(pdev->bus) && info->bus == pdev->bus->number && info->devfn == pdev->devfn) { diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index f71673dbb23d..bab10b1002fb 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -525,12 +525,13 @@ static int __init intel_irq_remapping_supported(void) if (disable_irq_remap) return 0; if (irq_remap_broken) { - WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, - "This system BIOS has enabled interrupt remapping\n" - "on a chipset that contains an erratum making that\n" - "feature unstable. To maintain system stability\n" - "interrupt remapping is being disabled. Please\n" - "contact your BIOS vendor for an update\n"); + printk(KERN_WARNING + "This system BIOS has enabled interrupt remapping\n" + "on a chipset that contains an erratum making that\n" + "feature unstable. To maintain system stability\n" + "interrupt remapping is being disabled. Please\n" + "contact your BIOS vendor for an update\n"); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); disable_irq_remap = 1; return 0; } @@ -686,12 +687,12 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, * Access PCI directly due to the PCI * subsystem isn't initialized yet. */ - bus = read_pci_config_byte(bus, path->dev, path->fn, + bus = read_pci_config_byte(bus, path->device, path->function, PCI_SECONDARY_BUS); path++; } ir_hpet[ir_hpet_num].bus = bus; - ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); + ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); ir_hpet[ir_hpet_num].iommu = iommu; ir_hpet[ir_hpet_num].id = scope->enumeration_id; ir_hpet_num++; @@ -714,13 +715,13 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, * Access PCI directly due to the PCI * subsystem isn't initialized yet. */ - bus = read_pci_config_byte(bus, path->dev, path->fn, + bus = read_pci_config_byte(bus, path->device, path->function, PCI_SECONDARY_BUS); path++; } ir_ioapic[ir_ioapic_num].bus = bus; - ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); + ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); ir_ioapic[ir_ioapic_num].iommu = iommu; ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; ir_ioapic_num++; diff --git a/drivers/iommu/iommu-traces.c b/drivers/iommu/iommu-traces.c new file mode 100644 index 000000000000..bf3b317ff0c1 --- /dev/null +++ b/drivers/iommu/iommu-traces.c @@ -0,0 +1,27 @@ +/* + * iommu trace points + * + * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com> + * + */ + +#include <linux/string.h> +#include <linux/types.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/iommu.h> + +/* iommu_group_event */ +EXPORT_TRACEPOINT_SYMBOL_GPL(add_device_to_group); +EXPORT_TRACEPOINT_SYMBOL_GPL(remove_device_from_group); + +/* iommu_device_event */ +EXPORT_TRACEPOINT_SYMBOL_GPL(attach_device_to_domain); +EXPORT_TRACEPOINT_SYMBOL_GPL(detach_device_from_domain); + +/* iommu_map_unmap */ +EXPORT_TRACEPOINT_SYMBOL_GPL(map); +EXPORT_TRACEPOINT_SYMBOL_GPL(unmap); + +/* iommu_error */ +EXPORT_TRACEPOINT_SYMBOL_GPL(io_page_fault); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index fbe9ca734f8f..e5555fcfe703 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -29,6 +29,7 @@ #include <linux/idr.h> #include <linux/notifier.h> #include <linux/err.h> +#include <trace/events/iommu.h> static struct kset *iommu_group_kset; static struct ida iommu_group_ida; @@ -363,6 +364,8 @@ rename: /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); + + trace_add_device_to_group(group->id, dev); return 0; } EXPORT_SYMBOL_GPL(iommu_group_add_device); @@ -399,6 +402,8 @@ void iommu_group_remove_device(struct device *dev) sysfs_remove_link(group->devices_kobj, device->name); sysfs_remove_link(&dev->kobj, "iommu_group"); + trace_remove_device_from_group(group->id, dev); + kfree(device->name); kfree(device); dev->iommu_group = NULL; @@ -680,10 +685,14 @@ EXPORT_SYMBOL_GPL(iommu_domain_free); int iommu_attach_device(struct iommu_domain *domain, struct device *dev) { + int ret; if (unlikely(domain->ops->attach_dev == NULL)) return -ENODEV; - return domain->ops->attach_dev(domain, dev); + ret = domain->ops->attach_dev(domain, dev); + if (!ret) + trace_attach_device_to_domain(dev); + return ret; } EXPORT_SYMBOL_GPL(iommu_attach_device); @@ -693,6 +702,7 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) return; domain->ops->detach_dev(domain, dev); + trace_detach_device_from_domain(dev); } EXPORT_SYMBOL_GPL(iommu_detach_device); @@ -807,17 +817,17 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { - pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n", + pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", iova, &paddr, size, min_pagesz); return -EINVAL; } - pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size); + pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); while (size) { size_t pgsize = iommu_pgsize(domain, iova | paddr, size); - pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n", + pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", iova, &paddr, pgsize); ret = domain->ops->map(domain, iova, paddr, pgsize, prot); @@ -832,6 +842,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, /* unroll mapping in case something went wrong */ if (ret) iommu_unmap(domain, orig_iova, orig_size - size); + else + trace_map(iova, paddr, size); return ret; } @@ -880,6 +892,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) unmapped += unmapped_page; } + trace_unmap(iova, 0, size); return unmapped; } EXPORT_SYMBOL_GPL(iommu_unmap); diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h index f4003d568a92..b6f9a51746ca 100644 --- a/drivers/iommu/omap-iopgtable.h +++ b/drivers/iommu/omap-iopgtable.h @@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da)) #define to_iommu(dev) \ - ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) + (platform_get_drvdata(to_platform_device(dev))) diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 108c0e9c24d9..dba1a9fd5070 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -252,7 +252,7 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, spin_lock_irqsave(&gart->pte_lock, flags); pfn = __phys_to_pfn(pa); if (!pfn_valid(pfn)) { - dev_err(gart->dev, "Invalid page: %08x\n", pa); + dev_err(gart->dev, "Invalid page: %pa\n", &pa); spin_unlock_irqrestore(&gart->pte_lock, flags); return -EINVAL; } @@ -295,8 +295,8 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, pa = (pte & GART_PAGE_MASK); if (!pfn_valid(__phys_to_pfn(pa))) { - dev_err(gart->dev, "No entry for %08llx:%08x\n", - (unsigned long long)iova, pa); + dev_err(gart->dev, "No entry for %08llx:%pa\n", + (unsigned long long)iova, &pa); gart_dump_table(gart); return -EINVAL; } @@ -351,7 +351,6 @@ static int tegra_gart_probe(struct platform_device *pdev) struct gart_device *gart; struct resource *res, *res_remap; void __iomem *gart_regs; - int err; struct device *dev = &pdev->dev; if (gart_handle) @@ -376,8 +375,7 @@ static int tegra_gart_probe(struct platform_device *pdev) gart_regs = devm_ioremap(dev, res->start, resource_size(res)); if (!gart_regs) { dev_err(dev, "failed to remap GART registers\n"); - err = -ENXIO; - goto fail; + return -ENXIO; } gart->dev = &pdev->dev; @@ -391,8 +389,7 @@ static int tegra_gart_probe(struct platform_device *pdev) gart->savedata = vmalloc(sizeof(u32) * gart->page_count); if (!gart->savedata) { dev_err(dev, "failed to allocate context save area\n"); - err = -ENOMEM; - goto fail; + return -ENOMEM; } platform_set_drvdata(pdev, gart); @@ -401,32 +398,20 @@ static int tegra_gart_probe(struct platform_device *pdev) gart_handle = gart; bus_set_iommu(&platform_bus_type, &gart_iommu_ops); return 0; - -fail: - if (gart_regs) - devm_iounmap(dev, gart_regs); - if (gart && gart->savedata) - vfree(gart->savedata); - devm_kfree(dev, gart); - return err; } static int tegra_gart_remove(struct platform_device *pdev) { struct gart_device *gart = platform_get_drvdata(pdev); - struct device *dev = gart->dev; writel(0, gart->regs + GART_CONFIG); if (gart->savedata) vfree(gart->savedata); - if (gart->regs) - devm_iounmap(dev, gart->regs); - devm_kfree(dev, gart); gart_handle = NULL; return 0; } -const struct dev_pm_ops tegra_gart_pm_ops = { +static const struct dev_pm_ops tegra_gart_pm_ops = { .suspend = tegra_gart_suspend, .resume = tegra_gart_resume, }; diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index e0665603afd9..605b5b46a903 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -731,7 +731,7 @@ static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova, unsigned long pfn = __phys_to_pfn(pa); unsigned long flags; - dev_dbg(as->smmu->dev, "[%d] %08lx:%08x\n", as->asid, iova, pa); + dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa); if (!pfn_valid(pfn)) return -ENOMEM; @@ -1254,7 +1254,7 @@ static int tegra_smmu_remove(struct platform_device *pdev) return 0; } -const struct dev_pm_ops tegra_smmu_pm_ops = { +static const struct dev_pm_ops tegra_smmu_pm_ops = { .suspend = tegra_smmu_suspend, .resume = tegra_smmu_resume, }; |