diff options
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/Kconfig | 37 | ||||
-rw-r--r-- | drivers/iommu/Makefile | 2 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 3 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_v2.c | 92 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 134 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 532 | ||||
-rw-r--r-- | drivers/iommu/exynos-iommu.c | 1 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 308 | ||||
-rw-r--r-- | drivers/iommu/intel_irq_remapping.c | 249 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 60 | ||||
-rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 3 | ||||
-rw-r--r-- | drivers/iommu/irq_remapping.c | 8 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu.c | 4 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu_dev.c | 10 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu-debug.c | 242 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 313 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.h | 98 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu2.c | 337 | ||||
-rw-r--r-- | drivers/iommu/rockchip-iommu.c | 1038 | ||||
-rw-r--r-- | drivers/iommu/shmobile-iommu.c | 1 | ||||
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 1609 |
21 files changed, 2978 insertions, 2103 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index dd5112265cc9..30f0e61341c5 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -144,13 +144,26 @@ config OMAP_IOMMU select IOMMU_API config OMAP_IOMMU_DEBUG - tristate "Export OMAP IOMMU internals in DebugFS" - depends on OMAP_IOMMU && DEBUG_FS - help - Select this to see extensive information about - the internal state of OMAP IOMMU in debugfs. + bool "Export OMAP IOMMU internals in DebugFS" + depends on OMAP_IOMMU && DEBUG_FS + ---help--- + Select this to see extensive information about + the internal state of OMAP IOMMU in debugfs. - Say N unless you know you need this. + Say N unless you know you need this. + +config ROCKCHIP_IOMMU + bool "Rockchip IOMMU Support" + depends on ARM + depends on ARCH_ROCKCHIP || COMPILE_TEST + select IOMMU_API + select ARM_DMA_USE_IOMMU + help + Support for IOMMUs found on Rockchip rk32xx SOCs. + These IOMMUs allow virtualization of the address space used by most + cores within the multimedia subsystem. + Say Y here if you are using a Rockchip SoC that includes an IOMMU + device. config TEGRA_IOMMU_GART bool "Tegra GART IOMMU Support" @@ -163,14 +176,14 @@ config TEGRA_IOMMU_GART hardware included on Tegra SoCs. config TEGRA_IOMMU_SMMU - bool "Tegra SMMU IOMMU Support" - depends on ARCH_TEGRA && TEGRA_AHB + bool "NVIDIA Tegra SMMU Support" + depends on ARCH_TEGRA + depends on TEGRA_AHB + depends on TEGRA_MC select IOMMU_API help - Enables support for remapping discontiguous physical memory - shared with the operating system into contiguous I/O virtual - space through the SMMU (System Memory Management Unit) - hardware included on Tegra SoCs. + This driver supports the IOMMU hardware (SMMU) found on NVIDIA Tegra + SoCs (Tegra30 up to Tegra124). config EXYNOS_IOMMU bool "Exynos IOMMU Support" diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 16edef74b8ee..7b976f294a69 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o -obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o +obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 505a9adac2d5..b205f76d7129 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap) return true; case IOMMU_CAP_INTR_REMAP: return (irq_remapping_enabled == 1); + case IOMMU_CAP_NOEXEC: + return false; } return false; @@ -3424,6 +3426,7 @@ static const struct iommu_ops amd_iommu_ops = { .detach_dev = amd_iommu_detach_device, .map = amd_iommu_map, .unmap = amd_iommu_unmap, + .map_sg = default_iommu_map_sg, .iova_to_phys = amd_iommu_iova_to_phys, .pgsize_bitmap = AMD_IOMMU_PGSIZES, }; diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 90d734bbf467..bea878f8e7d3 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state) static void put_pasid_state(struct pasid_state *pasid_state) { - if (atomic_dec_and_test(&pasid_state->count)) { - put_device_state(pasid_state->device_state); + if (atomic_dec_and_test(&pasid_state->count)) wake_up(&pasid_state->wq); - } } static void put_pasid_state_wait(struct pasid_state *pasid_state) @@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state) prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); - if (atomic_dec_and_test(&pasid_state->count)) - put_device_state(pasid_state->device_state); - else + if (!atomic_dec_and_test(&pasid_state->count)) schedule(); finish_wait(&pasid_state->wq, &wait); @@ -513,45 +509,67 @@ static void finish_pri_tag(struct device_state *dev_state, spin_unlock_irqrestore(&pasid_state->lock, flags); } +static void handle_fault_error(struct fault *fault) +{ + int status; + + if (!fault->dev_state->inv_ppr_cb) { + set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); + return; + } + + status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev, + fault->pasid, + fault->address, + fault->flags); + switch (status) { + case AMD_IOMMU_INV_PRI_RSP_SUCCESS: + set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS); + break; + case AMD_IOMMU_INV_PRI_RSP_INVALID: + set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); + break; + case AMD_IOMMU_INV_PRI_RSP_FAIL: + set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE); + break; + default: + BUG(); + } +} + static void do_fault(struct work_struct *work) { struct fault *fault = container_of(work, struct fault, work); - int npages, write; - struct page *page; + struct mm_struct *mm; + struct vm_area_struct *vma; + u64 address; + int ret, write; write = !!(fault->flags & PPR_FAULT_WRITE); - down_read(&fault->state->mm->mmap_sem); - npages = get_user_pages(NULL, fault->state->mm, - fault->address, 1, write, 0, &page, NULL); - up_read(&fault->state->mm->mmap_sem); - - if (npages == 1) { - put_page(page); - } else if (fault->dev_state->inv_ppr_cb) { - int status; - - status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev, - fault->pasid, - fault->address, - fault->flags); - switch (status) { - case AMD_IOMMU_INV_PRI_RSP_SUCCESS: - set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS); - break; - case AMD_IOMMU_INV_PRI_RSP_INVALID: - set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); - break; - case AMD_IOMMU_INV_PRI_RSP_FAIL: - set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE); - break; - default: - BUG(); - } - } else { - set_pri_tag_status(fault->state, fault->tag, PPR_INVALID); + mm = fault->state->mm; + address = fault->address; + + down_read(&mm->mmap_sem); + vma = find_extend_vma(mm, address); + if (!vma || address < vma->vm_start) { + /* failed to get a vma in the right range */ + up_read(&mm->mmap_sem); + handle_fault_error(fault); + goto out; } + ret = handle_mm_fault(mm, vma, address, write); + if (ret & VM_FAULT_ERROR) { + /* failed to service fault */ + up_read(&mm->mmap_sem); + handle_fault_error(fault); + goto out; + } + + up_read(&mm->mmap_sem); + +out: finish_pri_tag(fault->dev_state, fault->state, fault->tag); put_pasid_state(fault->state); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 60558f794922..b8aac1389a96 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -404,9 +404,16 @@ struct arm_smmu_cfg { #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) +enum arm_smmu_domain_stage { + ARM_SMMU_DOMAIN_S1 = 0, + ARM_SMMU_DOMAIN_S2, + ARM_SMMU_DOMAIN_NESTED, +}; + struct arm_smmu_domain { struct arm_smmu_device *smmu; struct arm_smmu_cfg cfg; + enum arm_smmu_domain_stage stage; spinlock_t lock; }; @@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (smmu_domain->smmu) goto out_unlock; - if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { + /* + * Mapping the requested stage onto what we support is surprisingly + * complicated, mainly because the spec allows S1+S2 SMMUs without + * support for nested translation. That means we end up with the + * following table: + * + * Requested Supported Actual + * S1 N S1 + * S1 S1+S2 S1 + * S1 S2 S2 + * S1 S1 S1 + * N N N + * N S1+S2 S2 + * N S2 S2 + * N S1 S1 + * + * Note that you can't actually request stage-2 mappings. + */ + if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) + smmu_domain->stage = ARM_SMMU_DOMAIN_S2; + if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + + switch (smmu_domain->stage) { + case ARM_SMMU_DOMAIN_S1: + cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; + start = smmu->num_s2_context_banks; + break; + case ARM_SMMU_DOMAIN_NESTED: /* * We will likely want to change this if/when KVM gets * involved. */ - cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; - start = smmu->num_s2_context_banks; - } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { - cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; - start = smmu->num_s2_context_banks; - } else { + case ARM_SMMU_DOMAIN_S2: cfg->cbar = CBAR_TYPE_S2_TRANS; start = 0; + break; + default: + ret = -EINVAL; + goto out_unlock; } ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, @@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, unsigned long pfn, int prot, int stage) { pte_t *pte, *start; - pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; + pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; if (pmd_none(*pmd)) { /* Allocate a new set of tables */ @@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, pteval |= ARM_SMMU_PTE_MEMATTR_NC; } + if (prot & IOMMU_NOEXEC) + pteval |= ARM_SMMU_PTE_XN; + /* If no access, create a faulting entry to avoid TLB fills */ - if (prot & IOMMU_EXEC) - pteval &= ~ARM_SMMU_PTE_XN; - else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) + if (!(prot & (IOMMU_READ | IOMMU_WRITE))) pteval &= ~ARM_SMMU_PTE_PAGE; pteval |= ARM_SMMU_PTE_SH_IS; @@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap) return true; case IOMMU_CAP_INTR_REMAP: return true; /* MSIs are just memory writes */ + case IOMMU_CAP_NOEXEC: + return true; default: return false; } @@ -1644,20 +1681,57 @@ static void arm_smmu_remove_device(struct device *dev) iommu_group_remove_device(dev); } +static int arm_smmu_domain_get_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + struct arm_smmu_domain *smmu_domain = domain->priv; + + switch (attr) { + case DOMAIN_ATTR_NESTING: + *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); + return 0; + default: + return -ENODEV; + } +} + +static int arm_smmu_domain_set_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + struct arm_smmu_domain *smmu_domain = domain->priv; + + switch (attr) { + case DOMAIN_ATTR_NESTING: + if (smmu_domain->smmu) + return -EPERM; + if (*(int *)data) + smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; + else + smmu_domain->stage = ARM_SMMU_DOMAIN_S1; + + return 0; + default: + return -ENODEV; + } +} + static const struct iommu_ops arm_smmu_ops = { - .capable = arm_smmu_capable, - .domain_init = arm_smmu_domain_init, - .domain_destroy = arm_smmu_domain_destroy, - .attach_dev = arm_smmu_attach_dev, - .detach_dev = arm_smmu_detach_dev, - .map = arm_smmu_map, - .unmap = arm_smmu_unmap, - .iova_to_phys = arm_smmu_iova_to_phys, - .add_device = arm_smmu_add_device, - .remove_device = arm_smmu_remove_device, - .pgsize_bitmap = (SECTION_SIZE | - ARM_SMMU_PTE_CONT_SIZE | - PAGE_SIZE), + .capable = arm_smmu_capable, + .domain_init = arm_smmu_domain_init, + .domain_destroy = arm_smmu_domain_destroy, + .attach_dev = arm_smmu_attach_dev, + .detach_dev = arm_smmu_detach_dev, + .map = arm_smmu_map, + .unmap = arm_smmu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = arm_smmu_iova_to_phys, + .add_device = arm_smmu_add_device, + .remove_device = arm_smmu_remove_device, + .domain_get_attr = arm_smmu_domain_get_attr, + .domain_set_attr = arm_smmu_domain_set_attr, + .pgsize_bitmap = (SECTION_SIZE | + ARM_SMMU_PTE_CONT_SIZE | + PAGE_SIZE), }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) @@ -2072,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = { static int __init arm_smmu_init(void) { + struct device_node *np; int ret; + /* + * Play nice with systems that don't have an ARM SMMU by checking that + * an ARM SMMU exists in the system before proceeding with the driver + * and IOMMU bus operation registration. + */ + np = of_find_matching_node(NULL, arm_smmu_of_match); + if (!np) + return 0; + + of_node_put(np); + ret = platform_driver_register(&arm_smmu_driver); if (ret) return ret; diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index c5c61cabd6e3..9847613085e1 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -44,6 +44,14 @@ #include "irq_remapping.h" +typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *); +struct dmar_res_callback { + dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED]; + void *arg[ACPI_DMAR_TYPE_RESERVED]; + bool ignore_unhandled; + bool print_entry; +}; + /* * Assumptions: * 1) The hotplug framework guarentees that DMAR unit will be hot-added @@ -62,11 +70,12 @@ LIST_HEAD(dmar_drhd_units); struct acpi_table_header * __initdata dmar_tbl; static acpi_size dmar_tbl_size; static int dmar_dev_scope_status = 1; +static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; static int alloc_iommu(struct dmar_drhd_unit *drhd); static void free_iommu(struct intel_iommu *iommu); -static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) +static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) { /* * add INCLUDE_ALL at the tail, so scan the list will find it at @@ -344,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = { .priority = INT_MIN, }; +static struct dmar_drhd_unit * +dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd) +{ + struct dmar_drhd_unit *dmaru; + + list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list) + if (dmaru->segment == drhd->segment && + dmaru->reg_base_addr == drhd->address) + return dmaru; + + return NULL; +} + /** * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition * structure which uniquely represent one DMA remapping hardware unit * present in the platform */ -static int __init -dmar_parse_one_drhd(struct acpi_dmar_header *header) +static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_hardware_unit *drhd; struct dmar_drhd_unit *dmaru; int ret = 0; drhd = (struct acpi_dmar_hardware_unit *)header; - dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); + dmaru = dmar_find_dmaru(drhd); + if (dmaru) + goto out; + + dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL); if (!dmaru) return -ENOMEM; - dmaru->hdr = header; + /* + * If header is allocated from slab by ACPI _DSM method, we need to + * copy the content because the memory buffer will be freed on return. + */ + dmaru->hdr = (void *)(dmaru + 1); + memcpy(dmaru->hdr, header, header->length); dmaru->reg_base_addr = drhd->address; dmaru->segment = drhd->segment; dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ @@ -381,6 +411,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) return ret; } dmar_register_drhd_unit(dmaru); + +out: + if (arg) + (*(int *)arg)++; + return 0; } @@ -393,7 +428,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru) kfree(dmaru); } -static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) +static int __init dmar_parse_one_andd(struct acpi_dmar_header *header, + void *arg) { struct acpi_dmar_andd *andd = (void *)header; @@ -414,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) } #ifdef CONFIG_ACPI_NUMA -static int __init -dmar_parse_one_rhsa(struct acpi_dmar_header *header) +static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_rhsa *rhsa; struct dmar_drhd_unit *drhd; @@ -442,6 +477,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header) return 0; } +#else +#define dmar_parse_one_rhsa dmar_res_noop #endif static void __init @@ -503,6 +540,52 @@ static int __init dmar_table_detect(void) return (ACPI_SUCCESS(status) ? 1 : 0); } +static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, + size_t len, struct dmar_res_callback *cb) +{ + int ret = 0; + struct acpi_dmar_header *iter, *next; + struct acpi_dmar_header *end = ((void *)start) + len; + + for (iter = start; iter < end && ret == 0; iter = next) { + next = (void *)iter + iter->length; + if (iter->length == 0) { + /* Avoid looping forever on bad ACPI tables */ + pr_debug(FW_BUG "Invalid 0-length structure\n"); + break; + } else if (next > end) { + /* Avoid passing table end */ + pr_warn(FW_BUG "record passes table end\n"); + ret = -EINVAL; + break; + } + + if (cb->print_entry) + dmar_table_print_dmar_entry(iter); + + if (iter->type >= ACPI_DMAR_TYPE_RESERVED) { + /* continue for forward compatibility */ + pr_debug("Unknown DMAR structure type %d\n", + iter->type); + } else if (cb->cb[iter->type]) { + ret = cb->cb[iter->type](iter, cb->arg[iter->type]); + } else if (!cb->ignore_unhandled) { + pr_warn("No handler for DMAR structure type %d\n", + iter->type); + ret = -EINVAL; + } + } + + return ret; +} + +static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar, + struct dmar_res_callback *cb) +{ + return dmar_walk_remapping_entries((void *)(dmar + 1), + dmar->header.length - sizeof(*dmar), cb); +} + /** * parse_dmar_table - parses the DMA reporting table */ @@ -510,9 +593,18 @@ static int __init parse_dmar_table(void) { struct acpi_table_dmar *dmar; - struct acpi_dmar_header *entry_header; int ret = 0; int drhd_count = 0; + struct dmar_res_callback cb = { + .print_entry = true, + .ignore_unhandled = true, + .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count, + .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd, + .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr, + .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr, + .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa, + .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd, + }; /* * Do it again, earlier dmar_tbl mapping could be mapped with @@ -536,51 +628,10 @@ parse_dmar_table(void) } pr_info("Host address width %d\n", dmar->width + 1); - - entry_header = (struct acpi_dmar_header *)(dmar + 1); - while (((unsigned long)entry_header) < - (((unsigned long)dmar) + dmar_tbl->length)) { - /* Avoid looping forever on bad ACPI tables */ - if (entry_header->length == 0) { - pr_warn("Invalid 0-length structure\n"); - ret = -EINVAL; - break; - } - - dmar_table_print_dmar_entry(entry_header); - - switch (entry_header->type) { - case ACPI_DMAR_TYPE_HARDWARE_UNIT: - drhd_count++; - ret = dmar_parse_one_drhd(entry_header); - break; - case ACPI_DMAR_TYPE_RESERVED_MEMORY: - ret = dmar_parse_one_rmrr(entry_header); - break; - case ACPI_DMAR_TYPE_ROOT_ATS: - ret = dmar_parse_one_atsr(entry_header); - break; - case ACPI_DMAR_TYPE_HARDWARE_AFFINITY: -#ifdef CONFIG_ACPI_NUMA - ret = dmar_parse_one_rhsa(entry_header); -#endif - break; - case ACPI_DMAR_TYPE_NAMESPACE: - ret = dmar_parse_one_andd(entry_header); - break; - default: - pr_warn("Unknown DMAR structure type %d\n", - entry_header->type); - ret = 0; /* for forward compatibility */ - break; - } - if (ret) - break; - - entry_header = ((void *)entry_header + entry_header->length); - } - if (drhd_count == 0) + ret = dmar_walk_dmar_table(dmar, &cb); + if (ret == 0 && drhd_count == 0) pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); + return ret; } @@ -778,76 +829,68 @@ static void warn_invalid_dmar(u64 addr, const char *message) dmi_get_system_info(DMI_PRODUCT_VERSION)); } -static int __init check_zero_address(void) +static int __ref +dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg) { - struct acpi_table_dmar *dmar; - struct acpi_dmar_header *entry_header; struct acpi_dmar_hardware_unit *drhd; + void __iomem *addr; + u64 cap, ecap; - dmar = (struct acpi_table_dmar *)dmar_tbl; - entry_header = (struct acpi_dmar_header *)(dmar + 1); - - while (((unsigned long)entry_header) < - (((unsigned long)dmar) + dmar_tbl->length)) { - /* Avoid looping forever on bad ACPI tables */ - if (entry_header->length == 0) { - pr_warn("Invalid 0-length structure\n"); - return 0; - } + drhd = (void *)entry; + if (!drhd->address) { + warn_invalid_dmar(0, ""); + return -EINVAL; + } - if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { - void __iomem *addr; - u64 cap, ecap; + if (arg) + addr = ioremap(drhd->address, VTD_PAGE_SIZE); + else + addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); + if (!addr) { + pr_warn("IOMMU: can't validate: %llx\n", drhd->address); + return -EINVAL; + } - drhd = (void *)entry_header; - if (!drhd->address) { - warn_invalid_dmar(0, ""); - goto failed; - } + cap = dmar_readq(addr + DMAR_CAP_REG); + ecap = dmar_readq(addr + DMAR_ECAP_REG); - addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); - if (!addr ) { - printk("IOMMU: can't validate: %llx\n", drhd->address); - goto failed; - } - cap = dmar_readq(addr + DMAR_CAP_REG); - ecap = dmar_readq(addr + DMAR_ECAP_REG); - early_iounmap(addr, VTD_PAGE_SIZE); - if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { - warn_invalid_dmar(drhd->address, - " returns all ones"); - goto failed; - } - } + if (arg) + iounmap(addr); + else + early_iounmap(addr, VTD_PAGE_SIZE); - entry_header = ((void *)entry_header + entry_header->length); + if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { + warn_invalid_dmar(drhd->address, " returns all ones"); + return -EINVAL; } - return 1; -failed: return 0; } int __init detect_intel_iommu(void) { int ret; + struct dmar_res_callback validate_drhd_cb = { + .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd, + .ignore_unhandled = true, + }; down_write(&dmar_global_lock); ret = dmar_table_detect(); if (ret) - ret = check_zero_address(); - { - if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { - iommu_detected = 1; - /* Make sure ACS will be enabled */ - pci_request_acs(); - } + ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, + &validate_drhd_cb); + if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { + iommu_detected = 1; + /* Make sure ACS will be enabled */ + pci_request_acs(); + } #ifdef CONFIG_X86 - if (ret) - x86_init.iommu.iommu_init = intel_iommu_init; + if (ret) + x86_init.iommu.iommu_init = intel_iommu_init; #endif - } + early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); dmar_tbl = NULL; up_write(&dmar_global_lock); @@ -931,11 +974,32 @@ out: return err; } +static int dmar_alloc_seq_id(struct intel_iommu *iommu) +{ + iommu->seq_id = find_first_zero_bit(dmar_seq_ids, + DMAR_UNITS_SUPPORTED); + if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) { + iommu->seq_id = -1; + } else { + set_bit(iommu->seq_id, dmar_seq_ids); + sprintf(iommu->name, "dmar%d", iommu->seq_id); + } + + return iommu->seq_id; +} + +static void dmar_free_seq_id(struct intel_iommu *iommu) +{ + if (iommu->seq_id >= 0) { + clear_bit(iommu->seq_id, dmar_seq_ids); + iommu->seq_id = -1; + } +} + static int alloc_iommu(struct dmar_drhd_unit *drhd) { struct intel_iommu *iommu; u32 ver, sts; - static int iommu_allocated = 0; int agaw = 0; int msagaw = 0; int err; @@ -949,13 +1013,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) if (!iommu) return -ENOMEM; - iommu->seq_id = iommu_allocated++; - sprintf (iommu->name, "dmar%d", iommu->seq_id); + if (dmar_alloc_seq_id(iommu) < 0) { + pr_err("IOMMU: failed to allocate seq_id\n"); + err = -ENOSPC; + goto error; + } err = map_iommu(iommu, drhd->reg_base_addr); if (err) { pr_err("IOMMU: failed to map %s\n", iommu->name); - goto error; + goto error_free_seq_id; } err = -EINVAL; @@ -1005,9 +1072,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) return 0; - err_unmap: +err_unmap: unmap_iommu(iommu); - error: +error_free_seq_id: + dmar_free_seq_id(iommu); +error: kfree(iommu); return err; } @@ -1031,6 +1100,7 @@ static void free_iommu(struct intel_iommu *iommu) if (iommu->reg) unmap_iommu(iommu); + dmar_free_seq_id(iommu); kfree(iommu); } @@ -1661,12 +1731,17 @@ int __init dmar_ir_support(void) return dmar->flags & 0x1; } +/* Check whether DMAR units are in use */ +static inline bool dmar_in_use(void) +{ + return irq_remapping_enabled || intel_iommu_enabled; +} + static int __init dmar_free_unused_resources(void) { struct dmar_drhd_unit *dmaru, *dmaru_n; - /* DMAR units are in use */ - if (irq_remapping_enabled || intel_iommu_enabled) + if (dmar_in_use()) return 0; if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) @@ -1684,3 +1759,242 @@ static int __init dmar_free_unused_resources(void) late_initcall(dmar_free_unused_resources); IOMMU_INIT_POST(detect_intel_iommu); + +/* + * DMAR Hotplug Support + * For more details, please refer to Intel(R) Virtualization Technology + * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8 + * "Remapping Hardware Unit Hot Plug". + */ +static u8 dmar_hp_uuid[] = { + /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C, + /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF +}; + +/* + * Currently there's only one revision and BIOS will not check the revision id, + * so use 0 for safety. + */ +#define DMAR_DSM_REV_ID 0 +#define DMAR_DSM_FUNC_DRHD 1 +#define DMAR_DSM_FUNC_ATSR 2 +#define DMAR_DSM_FUNC_RHSA 3 + +static inline bool dmar_detect_dsm(acpi_handle handle, int func) +{ + return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func); +} + +static int dmar_walk_dsm_resource(acpi_handle handle, int func, + dmar_res_handler_t handler, void *arg) +{ + int ret = -ENODEV; + union acpi_object *obj; + struct acpi_dmar_header *start; + struct dmar_res_callback callback; + static int res_type[] = { + [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT, + [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS, + [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY, + }; + + if (!dmar_detect_dsm(handle, func)) + return 0; + + obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, + func, NULL, ACPI_TYPE_BUFFER); + if (!obj) + return -ENODEV; + + memset(&callback, 0, sizeof(callback)); + callback.cb[res_type[func]] = handler; + callback.arg[res_type[func]] = arg; + start = (struct acpi_dmar_header *)obj->buffer.pointer; + ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback); + + ACPI_FREE(obj); + + return ret; +} + +static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg) +{ + int ret; + struct dmar_drhd_unit *dmaru; + + dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); + if (!dmaru) + return -ENODEV; + + ret = dmar_ir_hotplug(dmaru, true); + if (ret == 0) + ret = dmar_iommu_hotplug(dmaru, true); + + return ret; +} + +static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg) +{ + int i, ret; + struct device *dev; + struct dmar_drhd_unit *dmaru; + + dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); + if (!dmaru) + return 0; + + /* + * All PCI devices managed by this unit should have been destroyed. + */ + if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) + for_each_active_dev_scope(dmaru->devices, + dmaru->devices_cnt, i, dev) + return -EBUSY; + + ret = dmar_ir_hotplug(dmaru, false); + if (ret == 0) + ret = dmar_iommu_hotplug(dmaru, false); + + return ret; +} + +static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg) +{ + struct dmar_drhd_unit *dmaru; + + dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header); + if (dmaru) { + list_del_rcu(&dmaru->list); + synchronize_rcu(); + dmar_free_drhd(dmaru); + } + + return 0; +} + +static int dmar_hotplug_insert(acpi_handle handle) +{ + int ret; + int drhd_count = 0; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_validate_one_drhd, (void *)1); + if (ret) + goto out; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_parse_one_drhd, (void *)&drhd_count); + if (ret == 0 && drhd_count == 0) { + pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n"); + goto out; + } else if (ret) { + goto release_drhd; + } + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA, + &dmar_parse_one_rhsa, NULL); + if (ret) + goto release_drhd; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_parse_one_atsr, NULL); + if (ret) + goto release_atsr; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_add_drhd, NULL); + if (!ret) + return 0; + + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_remove_drhd, NULL); +release_atsr: + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_release_one_atsr, NULL); +release_drhd: + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_release_drhd, NULL); +out: + return ret; +} + +static int dmar_hotplug_remove(acpi_handle handle) +{ + int ret; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_check_one_atsr, NULL); + if (ret) + return ret; + + ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_remove_drhd, NULL); + if (ret == 0) { + WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, + &dmar_release_one_atsr, NULL)); + WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_release_drhd, NULL)); + } else { + dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, + &dmar_hp_add_drhd, NULL); + } + + return ret; +} + +static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl, + void *context, void **retval) +{ + acpi_handle *phdl = retval; + + if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { + *phdl = handle; + return AE_CTRL_TERMINATE; + } + + return AE_OK; +} + +static int dmar_device_hotplug(acpi_handle handle, bool insert) +{ + int ret; + acpi_handle tmp = NULL; + acpi_status status; + + if (!dmar_in_use()) + return 0; + + if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { + tmp = handle; + } else { + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, + ACPI_UINT32_MAX, + dmar_get_dsm_handle, + NULL, NULL, &tmp); + if (ACPI_FAILURE(status)) { + pr_warn("Failed to locate _DSM method.\n"); + return -ENXIO; + } + } + if (tmp == NULL) + return 0; + + down_write(&dmar_global_lock); + if (insert) + ret = dmar_hotplug_insert(tmp); + else + ret = dmar_hotplug_remove(tmp); + up_write(&dmar_global_lock); + + return ret; +} + +int dmar_device_add(acpi_handle handle) +{ + return dmar_device_hotplug(handle, true); +} + +int dmar_device_remove(acpi_handle handle) +{ + return dmar_device_hotplug(handle, false); +} diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 74233186f6f7..28372b85d8da 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1178,6 +1178,7 @@ static const struct iommu_ops exynos_iommu_ops = { .detach_dev = exynos_iommu_detach_device, .map = exynos_iommu_map, .unmap = exynos_iommu_unmap, + .map_sg = default_iommu_map_sg, .iova_to_phys = exynos_iommu_iova_to_phys, .add_device = exynos_iommu_add_device, .remove_device = exynos_iommu_remove_device, diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a27d6cb1a793..1232336b960e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root) } static inline void set_root_value(struct root_entry *root, unsigned long value) { + root->val &= ~VTD_PAGE_MASK; root->val |= value & VTD_PAGE_MASK; } @@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context, static inline void context_set_address_root(struct context_entry *context, unsigned long value) { + context->lo &= ~VTD_PAGE_MASK; context->lo |= value & VTD_PAGE_MASK; } @@ -328,17 +330,10 @@ static int hw_pass_through = 1; /* si_domain contains mulitple devices */ #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) -/* define the limit of IOMMUs supported in each domain */ -#ifdef CONFIG_X86 -# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS -#else -# define IOMMU_UNITS_SUPPORTED 64 -#endif - struct dmar_domain { int id; /* domain id */ int nid; /* node id */ - DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED); + DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED); /* bitmap of iommus this domain uses*/ struct list_head devices; /* all devices' list */ @@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) unsigned long flags; root = (struct root_entry *)alloc_pgtable_page(iommu->node); - if (!root) + if (!root) { + pr_err("IOMMU: allocating root entry for %s failed\n", + iommu->name); return -ENOMEM; + } __iommu_flush_cache(iommu, root, ROOT_SIZE); @@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu) return 0; } -static void free_dmar_iommu(struct intel_iommu *iommu) +static void disable_dmar_iommu(struct intel_iommu *iommu) { struct dmar_domain *domain; int i; @@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu) if (iommu->gcmd & DMA_GCMD_TE) iommu_disable_translation(iommu); +} - kfree(iommu->domains); - kfree(iommu->domain_ids); - iommu->domains = NULL; - iommu->domain_ids = NULL; +static void free_dmar_iommu(struct intel_iommu *iommu) +{ + if ((iommu->domains) && (iommu->domain_ids)) { + kfree(iommu->domains); + kfree(iommu->domain_ids); + iommu->domains = NULL; + iommu->domain_ids = NULL; + } g_iommus[iommu->seq_id] = NULL; @@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, { struct dma_pte *first_pte = NULL, *pte = NULL; phys_addr_t uninitialized_var(pteval); - unsigned long sg_res; + unsigned long sg_res = 0; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; @@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; - if (sg) - sg_res = 0; - else { - sg_res = nr_pages + 1; + if (!sg) { + sg_res = nr_pages; pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; } @@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw) return 0; } +static void intel_iommu_init_qi(struct intel_iommu *iommu) +{ + /* + * Start from the sane iommu hardware state. + * If the queued invalidation is already initialized by us + * (for example, while enabling interrupt-remapping) then + * we got the things already rolling from a sane state. + */ + if (!iommu->qi) { + /* + * Clear any previous faults. + */ + dmar_fault(-1, iommu); + /* + * Disable queued invalidation if supported and already enabled + * before OS handover. + */ + dmar_disable_qi(iommu); + } + + if (dmar_enable_qi(iommu)) { + /* + * Queued Invalidate not enabled, use Register Based Invalidate + */ + iommu->flush.flush_context = __iommu_flush_context; + iommu->flush.flush_iotlb = __iommu_flush_iotlb; + pr_info("IOMMU: %s using Register based invalidation\n", + iommu->name); + } else { + iommu->flush.flush_context = qi_flush_context; + iommu->flush.flush_iotlb = qi_flush_iotlb; + pr_info("IOMMU: %s using Queued invalidation\n", iommu->name); + } +} + static int __init init_dmars(void) { struct dmar_drhd_unit *drhd; @@ -2728,14 +2764,18 @@ static int __init init_dmars(void) * threaded kernel __init code path all other access are read * only */ - if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) { + if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) { g_num_of_iommus++; continue; } printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n", - IOMMU_UNITS_SUPPORTED); + DMAR_UNITS_SUPPORTED); } + /* Preallocate enough resources for IOMMU hot-addition */ + if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) + g_num_of_iommus = DMAR_UNITS_SUPPORTED; + g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), GFP_KERNEL); if (!g_iommus) { @@ -2764,58 +2804,14 @@ static int __init init_dmars(void) * among all IOMMU's. Need to Split it later. */ ret = iommu_alloc_root_entry(iommu); - if (ret) { - printk(KERN_ERR "IOMMU: allocate root entry failed\n"); + if (ret) goto free_iommu; - } if (!ecap_pass_through(iommu->ecap)) hw_pass_through = 0; } - /* - * Start from the sane iommu hardware state. - */ - for_each_active_iommu(iommu, drhd) { - /* - * If the queued invalidation is already initialized by us - * (for example, while enabling interrupt-remapping) then - * we got the things already rolling from a sane state. - */ - if (iommu->qi) - continue; - - /* - * Clear any previous faults. - */ - dmar_fault(-1, iommu); - /* - * Disable queued invalidation if supported and already enabled - * before OS handover. - */ - dmar_disable_qi(iommu); - } - - for_each_active_iommu(iommu, drhd) { - if (dmar_enable_qi(iommu)) { - /* - * Queued Invalidate not enabled, use Register Based - * Invalidate - */ - iommu->flush.flush_context = __iommu_flush_context; - iommu->flush.flush_iotlb = __iommu_flush_iotlb; - printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based " - "invalidation\n", - iommu->seq_id, - (unsigned long long)drhd->reg_base_addr); - } else { - iommu->flush.flush_context = qi_flush_context; - iommu->flush.flush_iotlb = qi_flush_iotlb; - printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued " - "invalidation\n", - iommu->seq_id, - (unsigned long long)drhd->reg_base_addr); - } - } + for_each_active_iommu(iommu, drhd) + intel_iommu_init_qi(iommu); if (iommu_pass_through) iommu_identity_mapping |= IDENTMAP_ALL; @@ -2901,8 +2897,10 @@ static int __init init_dmars(void) return 0; free_iommu: - for_each_active_iommu(iommu, drhd) + for_each_active_iommu(iommu, drhd) { + disable_dmar_iommu(iommu); free_dmar_iommu(iommu); + } kfree(deferred_flush); free_g_iommus: kfree(g_iommus); @@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {} #endif /* CONFIG_PM */ -int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) +int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_reserved_memory *rmrr; struct dmar_rmrr_unit *rmrru; @@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) return 0; } -int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) +static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) +{ + struct dmar_atsr_unit *atsru; + struct acpi_dmar_atsr *tmp; + + list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { + tmp = (struct acpi_dmar_atsr *)atsru->hdr; + if (atsr->segment != tmp->segment) + continue; + if (atsr->header.length != tmp->header.length) + continue; + if (memcmp(atsr, tmp, atsr->header.length) == 0) + return atsru; + } + + return NULL; +} + +int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg) { struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; + if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled) + return 0; + atsr = container_of(hdr, struct acpi_dmar_atsr, header); - atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); + atsru = dmar_find_atsr(atsr); + if (atsru) + return 0; + + atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL); if (!atsru) return -ENOMEM; - atsru->hdr = hdr; + /* + * If memory is allocated from slab by ACPI _DSM method, we need to + * copy the memory content because the memory buffer will be freed + * on return. + */ + atsru->hdr = (void *)(atsru + 1); + memcpy(atsru->hdr, hdr, hdr->length); atsru->include_all = atsr->flags & 0x1; if (!atsru->include_all) { atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), @@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) kfree(atsru); } +int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg) +{ + struct acpi_dmar_atsr *atsr; + struct dmar_atsr_unit *atsru; + + atsr = container_of(hdr, struct acpi_dmar_atsr, header); + atsru = dmar_find_atsr(atsr); + if (atsru) { + list_del_rcu(&atsru->list); + synchronize_rcu(); + intel_iommu_free_atsr(atsru); + } + + return 0; +} + +int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) +{ + int i; + struct device *dev; + struct acpi_dmar_atsr *atsr; + struct dmar_atsr_unit *atsru; + + atsr = container_of(hdr, struct acpi_dmar_atsr, header); + atsru = dmar_find_atsr(atsr); + if (!atsru) + return 0; + + if (!atsru->include_all && atsru->devices && atsru->devices_cnt) + for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, + i, dev) + return -EBUSY; + + return 0; +} + +static int intel_iommu_add(struct dmar_drhd_unit *dmaru) +{ + int sp, ret = 0; + struct intel_iommu *iommu = dmaru->iommu; + + if (g_iommus[iommu->seq_id]) + return 0; + + if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { + pr_warn("IOMMU: %s doesn't support hardware pass through.\n", + iommu->name); + return -ENXIO; + } + if (!ecap_sc_support(iommu->ecap) && + domain_update_iommu_snooping(iommu)) { + pr_warn("IOMMU: %s doesn't support snooping.\n", + iommu->name); + return -ENXIO; + } + sp = domain_update_iommu_superpage(iommu) - 1; + if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { + pr_warn("IOMMU: %s doesn't support large page.\n", + iommu->name); + return -ENXIO; + } + + /* + * Disable translation if already enabled prior to OS handover. + */ + if (iommu->gcmd & DMA_GCMD_TE) + iommu_disable_translation(iommu); + + g_iommus[iommu->seq_id] = iommu; + ret = iommu_init_domains(iommu); + if (ret == 0) + ret = iommu_alloc_root_entry(iommu); + if (ret) + goto out; + + if (dmaru->ignored) { + /* + * we always have to disable PMRs or DMA may fail on this device + */ + if (force_on) + iommu_disable_protect_mem_regions(iommu); + return 0; + } + + intel_iommu_init_qi(iommu); + iommu_flush_write_buffer(iommu); + ret = dmar_set_interrupt(iommu); + if (ret) + goto disable_iommu; + + iommu_set_root_entry(iommu); + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); + iommu_enable_translation(iommu); + + if (si_domain) { + ret = iommu_attach_domain(si_domain, iommu); + if (ret < 0 || si_domain->id != ret) + goto disable_iommu; + domain_attach_iommu(si_domain, iommu); + } + + iommu_disable_protect_mem_regions(iommu); + return 0; + +disable_iommu: + disable_dmar_iommu(iommu); +out: + free_dmar_iommu(iommu); + return ret; +} + +int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ + int ret = 0; + struct intel_iommu *iommu = dmaru->iommu; + + if (!intel_iommu_enabled) + return 0; + if (iommu == NULL) + return -EINVAL; + + if (insert) { + ret = intel_iommu_add(dmaru); + } else { + disable_dmar_iommu(iommu); + free_dmar_iommu(iommu); + } + + return ret; +} + static void intel_iommu_free_dmars(void) { struct dmar_rmrr_unit *rmrru, *rmrr_n; @@ -4467,6 +4628,7 @@ static const struct iommu_ops intel_iommu_ops = { .detach_dev = intel_iommu_detach_device, .map = intel_iommu_map, .unmap = intel_iommu_unmap, + .map_sg = default_iommu_map_sg, .iova_to_phys = intel_iommu_iova_to_phys, .add_device = intel_iommu_add_device, .remove_device = intel_iommu_remove_device, diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 7c80661b35c1..27541d440849 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -36,7 +36,6 @@ struct hpet_scope { static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; static struct hpet_scope ir_hpet[MAX_HPET_TBS]; -static int ir_ioapic_num, ir_hpet_num; /* * Lock ordering: @@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) int i; for (i = 0; i < MAX_HPET_TBS; i++) - if (ir_hpet[i].id == hpet_id) + if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) return ir_hpet[i].iommu; return NULL; } @@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic) int i; for (i = 0; i < MAX_IO_APICS; i++) - if (ir_ioapic[i].id == apic) + if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) return ir_ioapic[i].iommu; return NULL; } @@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic) down_read(&dmar_global_lock); for (i = 0; i < MAX_IO_APICS; i++) { - if (ir_ioapic[i].id == apic) { + if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; break; } @@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id) down_read(&dmar_global_lock); for (i = 0; i < MAX_HPET_TBS; i++) { - if (ir_hpet[i].id == id) { + if (ir_hpet[i].iommu && ir_hpet[i].id == id) { sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; break; } @@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) raw_spin_unlock_irqrestore(&iommu->register_lock, flags); } - -static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) +static int intel_setup_irq_remapping(struct intel_iommu *iommu) { struct ir_table *ir_table; struct page *pages; unsigned long *bitmap; - ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), - GFP_ATOMIC); + if (iommu->ir_table) + return 0; - if (!iommu->ir_table) + ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC); + if (!ir_table) return -ENOMEM; pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, @@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode) if (!pages) { pr_err("IR%d: failed to allocate pages of order %d\n", iommu->seq_id, INTR_REMAP_PAGE_ORDER); - kfree(iommu->ir_table); - return -ENOMEM; + goto out_free_table; } bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), sizeof(long), GFP_ATOMIC); if (bitmap == NULL) { pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); - __free_pages(pages, INTR_REMAP_PAGE_ORDER); - kfree(ir_table); - return -ENOMEM; + goto out_free_pages; } ir_table->base = page_address(pages); ir_table->bitmap = bitmap; - - iommu_set_irq_remapping(iommu, mode); + iommu->ir_table = ir_table; return 0; + +out_free_pages: + __free_pages(pages, INTR_REMAP_PAGE_ORDER); +out_free_table: + kfree(ir_table); + return -ENOMEM; +} + +static void intel_teardown_irq_remapping(struct intel_iommu *iommu) +{ + if (iommu && iommu->ir_table) { + free_pages((unsigned long)iommu->ir_table->base, + INTR_REMAP_PAGE_ORDER); + kfree(iommu->ir_table->bitmap); + kfree(iommu->ir_table); + iommu->ir_table = NULL; + } } /* @@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void) if (!ecap_ir_support(iommu->ecap)) continue; - if (intel_setup_irq_remapping(iommu, eim)) + if (intel_setup_irq_remapping(iommu)) goto error; + iommu_set_irq_remapping(iommu, eim); setup = 1; } @@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void) return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; error: - /* - * handle error condition gracefully here! - */ + for_each_iommu(iommu, drhd) + if (ecap_ir_support(iommu->ecap)) { + iommu_disable_irq_remapping(iommu); + intel_teardown_irq_remapping(iommu); + } if (x2apic_present) pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); @@ -699,12 +714,13 @@ error: return -1; } -static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, - struct intel_iommu *iommu) +static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, + struct intel_iommu *iommu, + struct acpi_dmar_hardware_unit *drhd) { struct acpi_dmar_pci_path *path; u8 bus; - int count; + int count, free = -1; bus = scope->bus; path = (struct acpi_dmar_pci_path *)(scope + 1); @@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, PCI_SECONDARY_BUS); path++; } - ir_hpet[ir_hpet_num].bus = bus; - ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); - ir_hpet[ir_hpet_num].iommu = iommu; - ir_hpet[ir_hpet_num].id = scope->enumeration_id; - ir_hpet_num++; + + for (count = 0; count < MAX_HPET_TBS; count++) { + if (ir_hpet[count].iommu == iommu && + ir_hpet[count].id == scope->enumeration_id) + return 0; + else if (ir_hpet[count].iommu == NULL && free == -1) + free = count; + } + if (free == -1) { + pr_warn("Exceeded Max HPET blocks\n"); + return -ENOSPC; + } + + ir_hpet[free].iommu = iommu; + ir_hpet[free].id = scope->enumeration_id; + ir_hpet[free].bus = bus; + ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function); + pr_info("HPET id %d under DRHD base 0x%Lx\n", + scope->enumeration_id, drhd->address); + + return 0; } -static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, - struct intel_iommu *iommu) +static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, + struct intel_iommu *iommu, + struct acpi_dmar_hardware_unit *drhd) { struct acpi_dmar_pci_path *path; u8 bus; - int count; + int count, free = -1; bus = scope->bus; path = (struct acpi_dmar_pci_path *)(scope + 1); @@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, path++; } - ir_ioapic[ir_ioapic_num].bus = bus; - ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); - ir_ioapic[ir_ioapic_num].iommu = iommu; - ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; - ir_ioapic_num++; + for (count = 0; count < MAX_IO_APICS; count++) { + if (ir_ioapic[count].iommu == iommu && + ir_ioapic[count].id == scope->enumeration_id) + return 0; + else if (ir_ioapic[count].iommu == NULL && free == -1) + free = count; + } + if (free == -1) { + pr_warn("Exceeded Max IO APICS\n"); + return -ENOSPC; + } + + ir_ioapic[free].bus = bus; + ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function); + ir_ioapic[free].iommu = iommu; + ir_ioapic[free].id = scope->enumeration_id; + pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", + scope->enumeration_id, drhd->address, iommu->seq_id); + + return 0; } static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, struct intel_iommu *iommu) { + int ret = 0; struct acpi_dmar_hardware_unit *drhd; struct acpi_dmar_device_scope *scope; void *start, *end; drhd = (struct acpi_dmar_hardware_unit *)header; - start = (void *)(drhd + 1); end = ((void *)drhd) + header->length; - while (start < end) { + while (start < end && ret == 0) { scope = start; - if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { - if (ir_ioapic_num == MAX_IO_APICS) { - printk(KERN_WARNING "Exceeded Max IO APICS\n"); - return -1; - } - - printk(KERN_INFO "IOAPIC id %d under DRHD base " - " 0x%Lx IOMMU %d\n", scope->enumeration_id, - drhd->address, iommu->seq_id); + if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) + ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); + else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) + ret = ir_parse_one_hpet_scope(scope, iommu, drhd); + start += scope->length; + } - ir_parse_one_ioapic_scope(scope, iommu); - } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { - if (ir_hpet_num == MAX_HPET_TBS) { - printk(KERN_WARNING "Exceeded Max HPET blocks\n"); - return -1; - } + return ret; +} - printk(KERN_INFO "HPET id %d under DRHD base" - " 0x%Lx\n", scope->enumeration_id, - drhd->address); +static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) +{ + int i; - ir_parse_one_hpet_scope(scope, iommu); - } - start += scope->length; - } + for (i = 0; i < MAX_HPET_TBS; i++) + if (ir_hpet[i].iommu == iommu) + ir_hpet[i].iommu = NULL; - return 0; + for (i = 0; i < MAX_IO_APICS; i++) + if (ir_ioapic[i].iommu == iommu) + ir_ioapic[i].iommu = NULL; } /* @@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = { .msi_setup_irq = intel_msi_setup_irq, .alloc_hpet_msi = intel_alloc_hpet_msi, }; + +/* + * Support of Interrupt Remapping Unit Hotplug + */ +static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) +{ + int ret; + int eim = x2apic_enabled(); + + if (eim && !ecap_eim_support(iommu->ecap)) { + pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n", + iommu->reg_phys, iommu->ecap); + return -ENODEV; + } + + if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { + pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n", + iommu->reg_phys); + return -ENODEV; + } + + /* TODO: check all IOAPICs are covered by IOMMU */ + + /* Setup Interrupt-remapping now. */ + ret = intel_setup_irq_remapping(iommu); + if (ret) { + pr_err("DRHD %Lx: failed to allocate resource\n", + iommu->reg_phys); + ir_remove_ioapic_hpet_scope(iommu); + return ret; + } + + if (!iommu->qi) { + /* Clear previous faults. */ + dmar_fault(-1, iommu); + iommu_disable_irq_remapping(iommu); + dmar_disable_qi(iommu); + } + + /* Enable queued invalidation */ + ret = dmar_enable_qi(iommu); + if (!ret) { + iommu_set_irq_remapping(iommu, eim); + } else { + pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n", + iommu->reg_phys, iommu->ecap, ret); + intel_teardown_irq_remapping(iommu); + ir_remove_ioapic_hpet_scope(iommu); + } + + return ret; +} + +int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) +{ + int ret = 0; + struct intel_iommu *iommu = dmaru->iommu; + + if (!irq_remapping_enabled) + return 0; + if (iommu == NULL) + return -EINVAL; + if (!ecap_ir_support(iommu->ecap)) + return 0; + + if (insert) { + if (!iommu->ir_table) + ret = dmar_ir_add(dmaru, iommu); + } else { + if (iommu->ir_table) { + if (!bitmap_empty(iommu->ir_table->bitmap, + INTR_REMAP_TABLE_ENTRIES)) { + ret = -EBUSY; + } else { + iommu_disable_irq_remapping(iommu); + intel_teardown_irq_remapping(iommu); + ir_remove_ioapic_hpet_scope(iommu); + } + } + } + + return ret; +} diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index ed8b04867b1f..1bd63352ab17 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -818,7 +818,15 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) kfree(nb); return err; } - return bus_for_each_dev(bus, NULL, &cb, add_iommu_group); + + err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group); + if (err) { + bus_unregister_notifier(bus, nb); + kfree(nb); + return err; + } + + return 0; } /** @@ -836,13 +844,19 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) */ int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) { + int err; + if (bus->iommu_ops != NULL) return -EBUSY; bus->iommu_ops = ops; /* Do IOMMU specific setup for this bus-type */ - return iommu_bus_init(bus, ops); + err = iommu_bus_init(bus, ops); + if (err) + bus->iommu_ops = NULL; + + return err; } EXPORT_SYMBOL_GPL(bus_set_iommu); @@ -1124,6 +1138,48 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) } EXPORT_SYMBOL_GPL(iommu_unmap); +size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot) +{ + struct scatterlist *s; + size_t mapped = 0; + unsigned int i, min_pagesz; + int ret; + + if (unlikely(domain->ops->pgsize_bitmap == 0UL)) + return 0; + + min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); + + for_each_sg(sg, s, nents, i) { + phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; + + /* + * We are mapping on IOMMU page boundaries, so offset within + * the page must be 0. However, the IOMMU may support pages + * smaller than PAGE_SIZE, so s->offset may still represent + * an offset of that boundary within the CPU page. + */ + if (!IS_ALIGNED(s->offset, min_pagesz)) + goto out_err; + + ret = iommu_map(domain, iova + mapped, phys, s->length, prot); + if (ret) + goto out_err; + + mapped += s->length; + } + + return mapped; + +out_err: + /* undo mappings already done */ + iommu_unmap(domain, iova, mapped); + + return 0; + +} +EXPORT_SYMBOL_GPL(default_iommu_map_sg); int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, phys_addr_t paddr, u64 size, int prot) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 7dab5cbcc775..99effbb17191 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1127,6 +1127,7 @@ static const struct iommu_ops ipmmu_ops = { .detach_dev = ipmmu_detach_device, .map = ipmmu_map, .unmap = ipmmu_unmap, + .map_sg = default_iommu_map_sg, .iova_to_phys = ipmmu_iova_to_phys, .add_device = ipmmu_add_device, .remove_device = ipmmu_remove_device, @@ -1184,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev) dev_name(&pdev->dev), mmu); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); - return irq; + return ret; } ipmmu_device_reset(mmu); diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 74a1767c89b5..2c3f5ad01098 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c @@ -56,19 +56,13 @@ static int do_setup_msi_irqs(struct pci_dev *dev, int nvec) unsigned int irq; struct msi_desc *msidesc; - WARN_ON(!list_is_singular(&dev->msi_list)); msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); - WARN_ON(msidesc->irq); - WARN_ON(msidesc->msi_attrib.multiple); - WARN_ON(msidesc->nvec_used); irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev)); if (irq == 0) return -ENOSPC; nvec_pow2 = __roundup_pow_of_two(nvec); - msidesc->nvec_used = nvec; - msidesc->msi_attrib.multiple = ilog2(nvec_pow2); for (sub_handle = 0; sub_handle < nvec; sub_handle++) { if (!sub_handle) { index = msi_alloc_remapped_irq(dev, irq, nvec_pow2); @@ -96,8 +90,6 @@ error: * IRQs from tearing down again in default_teardown_msi_irqs() */ msidesc->irq = 0; - msidesc->nvec_used = 0; - msidesc->msi_attrib.multiple = 0; return ret; } diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 6e3dcc289d59..e1b05379ca0e 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -73,8 +73,7 @@ fail: static void __disable_clocks(struct msm_iommu_drvdata *drvdata) { - if (drvdata->clk) - clk_disable(drvdata->clk); + clk_disable(drvdata->clk); clk_disable(drvdata->pclk); } @@ -681,6 +680,7 @@ static const struct iommu_ops msm_iommu_ops = { .detach_dev = msm_iommu_detach_dev, .map = msm_iommu_map, .unmap = msm_iommu_unmap, + .map_sg = default_iommu_map_sg, .iova_to_phys = msm_iommu_iova_to_phys, .pgsize_bitmap = MSM_IOMMU_PGSIZES, }; diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c index 61def7cb5263..b6d01f97e537 100644 --- a/drivers/iommu/msm_iommu_dev.c +++ b/drivers/iommu/msm_iommu_dev.c @@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev) struct clk *iommu_clk; struct clk *iommu_pclk; struct msm_iommu_drvdata *drvdata; - struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; + struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev); void __iomem *regs_base; int ret, irq, par; @@ -224,8 +224,7 @@ static int msm_iommu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drvdata); - if (iommu_clk) - clk_disable(iommu_clk); + clk_disable(iommu_clk); clk_disable(iommu_pclk); @@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev) static int msm_iommu_ctx_probe(struct platform_device *pdev) { - struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; + struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev); struct msm_iommu_drvdata *drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; int i, ret; @@ -323,8 +322,7 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev) SET_NSCFG(drvdata->base, mid, 3); } - if (drvdata->clk) - clk_disable(drvdata->clk); + clk_disable(drvdata->clk); clk_disable(drvdata->pclk); dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num); diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 531658d17333..f3d20a2039d2 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -10,45 +10,35 @@ * published by the Free Software Foundation. */ -#include <linux/module.h> #include <linux/err.h> -#include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/uaccess.h> -#include <linux/platform_device.h> #include <linux/debugfs.h> -#include <linux/omap-iommu.h> #include <linux/platform_data/iommu-omap.h> #include "omap-iopgtable.h" #include "omap-iommu.h" -#define MAXCOLUMN 100 /* for short messages */ - static DEFINE_MUTEX(iommu_debug_lock); static struct dentry *iommu_debug_root; -static ssize_t debug_read_ver(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) +static inline bool is_omap_iommu_detached(struct omap_iommu *obj) { - u32 ver = omap_iommu_arch_version(); - char buf[MAXCOLUMN], *p = buf; - - p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); - - return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + return !obj->domain; } static ssize_t debug_read_regs(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); + struct omap_iommu *obj = file->private_data; char *p, *buf; ssize_t bytes; + if (is_omap_iommu_detached(obj)) + return -EPERM; + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); + struct omap_iommu *obj = file->private_data; char *p, *buf; ssize_t bytes, rest; + if (is_omap_iommu_detached(obj)) + return -EPERM; + buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, return bytes; } -static ssize_t debug_write_pagetable(struct file *file, - const char __user *userbuf, size_t count, loff_t *ppos) +static void dump_ioptable(struct seq_file *s) { - struct iotlb_entry e; - struct cr_regs cr; - int err; - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); - char buf[MAXCOLUMN], *p = buf; - - count = min(count, sizeof(buf)); - - mutex_lock(&iommu_debug_lock); - if (copy_from_user(p, userbuf, count)) { - mutex_unlock(&iommu_debug_lock); - return -EFAULT; - } - - sscanf(p, "%x %x", &cr.cam, &cr.ram); - if (!cr.cam || !cr.ram) { - mutex_unlock(&iommu_debug_lock); - return -EINVAL; - } - - omap_iotlb_cr_to_e(&cr, &e); - err = omap_iopgtable_store_entry(obj, &e); - if (err) - dev_err(obj->dev, "%s: fail to store cr\n", __func__); - - mutex_unlock(&iommu_debug_lock); - return count; -} - -#define dump_ioptable_entry_one(lv, da, val) \ - ({ \ - int __err = 0; \ - ssize_t bytes; \ - const int maxcol = 22; \ - const char *str = "%d: %08x %08x\n"; \ - bytes = snprintf(p, maxcol, str, lv, da, val); \ - p += bytes; \ - len -= bytes; \ - if (len < maxcol) \ - __err = -ENOMEM; \ - __err; \ - }) - -static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len) -{ - int i; - u32 *iopgd; - char *p = buf; + int i, j; + u32 da; + u32 *iopgd, *iopte; + struct omap_iommu *obj = s->private; spin_lock(&obj->page_table_lock); iopgd = iopgd_offset(obj, 0); for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { - int j, err; - u32 *iopte; - u32 da; - if (!*iopgd) continue; if (!(*iopgd & IOPGD_TABLE)) { da = i << IOPGD_SHIFT; - - err = dump_ioptable_entry_one(1, da, *iopgd); - if (err) - goto out; + seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd); continue; } iopte = iopte_offset(iopgd, 0); - for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { if (!*iopte) continue; da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); - err = dump_ioptable_entry_one(2, da, *iopgd); - if (err) - goto out; + seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte); } } -out: - spin_unlock(&obj->page_table_lock); - return p - buf; + spin_unlock(&obj->page_table_lock); } -static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, - size_t count, loff_t *ppos) +static int debug_read_pagetable(struct seq_file *s, void *data) { - struct device *dev = file->private_data; - struct omap_iommu *obj = dev_to_omap_iommu(dev); - char *p, *buf; - size_t bytes; + struct omap_iommu *obj = s->private; - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return -ENOMEM; - p = buf; - - p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); - p += sprintf(p, "-----------------------------------------\n"); + if (is_omap_iommu_detached(obj)) + return -EPERM; mutex_lock(&iommu_debug_lock); - bytes = PAGE_SIZE - (p - buf); - p += dump_ioptable(obj, p, bytes); - - bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + seq_printf(s, "L: %8s %8s\n", "da:", "pte:"); + seq_puts(s, "--------------------------\n"); + dump_ioptable(s); mutex_unlock(&iommu_debug_lock); - free_page((unsigned long)buf); - return bytes; + return 0; } -#define DEBUG_FOPS(name) \ - static const struct file_operations debug_##name##_fops = { \ - .open = simple_open, \ - .read = debug_read_##name, \ - .write = debug_write_##name, \ - .llseek = generic_file_llseek, \ - }; +#define DEBUG_SEQ_FOPS_RO(name) \ + static int debug_open_##name(struct inode *inode, struct file *file) \ + { \ + return single_open(file, debug_read_##name, inode->i_private); \ + } \ + \ + static const struct file_operations debug_##name##_fops = { \ + .open = debug_open_##name, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } #define DEBUG_FOPS_RO(name) \ static const struct file_operations debug_##name##_fops = { \ @@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, .llseek = generic_file_llseek, \ }; -DEBUG_FOPS_RO(ver); DEBUG_FOPS_RO(regs); DEBUG_FOPS_RO(tlb); -DEBUG_FOPS(pagetable); +DEBUG_SEQ_FOPS_RO(pagetable); #define __DEBUG_ADD_FILE(attr, mode) \ { \ struct dentry *dent; \ - dent = debugfs_create_file(#attr, mode, parent, \ - dev, &debug_##attr##_fops); \ + dent = debugfs_create_file(#attr, mode, obj->debug_dir, \ + obj, &debug_##attr##_fops); \ if (!dent) \ - return -ENOMEM; \ + goto err; \ } -#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600) #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400) -static int iommu_debug_register(struct device *dev, void *data) +void omap_iommu_debugfs_add(struct omap_iommu *obj) { - struct platform_device *pdev = to_platform_device(dev); - struct omap_iommu *obj = platform_get_drvdata(pdev); - struct omap_iommu_arch_data *arch_data; - struct dentry *d, *parent; - - if (!obj || !obj->dev) - return -EINVAL; - - arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); - if (!arch_data) - return -ENOMEM; - - arch_data->iommu_dev = obj; + struct dentry *d; - dev->archdata.iommu = arch_data; + if (!iommu_debug_root) + return; - d = debugfs_create_dir(obj->name, iommu_debug_root); - if (!d) - goto nomem; - parent = d; + obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root); + if (!obj->debug_dir) + return; - d = debugfs_create_u8("nr_tlb_entries", 400, parent, + d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir, (u8 *)&obj->nr_tlb_entries); if (!d) - goto nomem; + return; - DEBUG_ADD_FILE_RO(ver); DEBUG_ADD_FILE_RO(regs); DEBUG_ADD_FILE_RO(tlb); - DEBUG_ADD_FILE(pagetable); + DEBUG_ADD_FILE_RO(pagetable); - return 0; + return; -nomem: - kfree(arch_data); - return -ENOMEM; +err: + debugfs_remove_recursive(obj->debug_dir); } -static int iommu_debug_unregister(struct device *dev, void *data) +void omap_iommu_debugfs_remove(struct omap_iommu *obj) { - if (!dev->archdata.iommu) - return 0; - - kfree(dev->archdata.iommu); + if (!obj->debug_dir) + return; - dev->archdata.iommu = NULL; - - return 0; + debugfs_remove_recursive(obj->debug_dir); } -static int __init iommu_debug_init(void) +void __init omap_iommu_debugfs_init(void) { - struct dentry *d; - int err; - - d = debugfs_create_dir("iommu", NULL); - if (!d) - return -ENOMEM; - iommu_debug_root = d; - - err = omap_foreach_iommu_device(d, iommu_debug_register); - if (err) - goto err_out; - return 0; - -err_out: - debugfs_remove_recursive(iommu_debug_root); - return err; + iommu_debug_root = debugfs_create_dir("omap_iommu", NULL); + if (!iommu_debug_root) + pr_err("can't create debugfs dir\n"); } -module_init(iommu_debug_init) -static void __exit iommu_debugfs_exit(void) +void __exit omap_iommu_debugfs_exit(void) { - debugfs_remove_recursive(iommu_debug_root); - omap_foreach_iommu_device(NULL, iommu_debug_unregister); + debugfs_remove(iommu_debug_root); } -module_exit(iommu_debugfs_exit) - -MODULE_DESCRIPTION("omap iommu: debugfs interface"); -MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 36278870e84a..bbb7dcef02d3 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -76,53 +76,23 @@ struct iotlb_lock { short vict; }; -/* accommodate the difference between omap1 and omap2/3 */ -static const struct iommu_functions *arch_iommu; - static struct platform_driver omap_iommu_driver; static struct kmem_cache *iopte_cachep; /** - * omap_install_iommu_arch - Install archtecure specific iommu functions - * @ops: a pointer to architecture specific iommu functions - * - * There are several kind of iommu algorithm(tlb, pagetable) among - * omap series. This interface installs such an iommu algorighm. - **/ -int omap_install_iommu_arch(const struct iommu_functions *ops) -{ - if (arch_iommu) - return -EBUSY; - - arch_iommu = ops; - return 0; -} -EXPORT_SYMBOL_GPL(omap_install_iommu_arch); - -/** - * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions - * @ops: a pointer to architecture specific iommu functions - * - * This interface uninstalls the iommu algorighm installed previously. - **/ -void omap_uninstall_iommu_arch(const struct iommu_functions *ops) -{ - if (arch_iommu != ops) - pr_err("%s: not your arch\n", __func__); - - arch_iommu = NULL; -} -EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); - -/** * omap_iommu_save_ctx - Save registers for pm off-mode support * @dev: client device **/ void omap_iommu_save_ctx(struct device *dev) { struct omap_iommu *obj = dev_to_omap_iommu(dev); + u32 *p = obj->ctx; + int i; - arch_iommu->save_ctx(obj); + for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { + p[i] = iommu_read_reg(obj, i * sizeof(u32)); + dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); + } } EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); @@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); void omap_iommu_restore_ctx(struct device *dev) { struct omap_iommu *obj = dev_to_omap_iommu(dev); + u32 *p = obj->ctx; + int i; - arch_iommu->restore_ctx(obj); + for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { + iommu_write_reg(obj, p[i], i * sizeof(u32)); + dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); + } } EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); -/** - * omap_iommu_arch_version - Return running iommu arch version - **/ -u32 omap_iommu_arch_version(void) +static void __iommu_set_twl(struct omap_iommu *obj, bool on) { - return arch_iommu->version; + u32 l = iommu_read_reg(obj, MMU_CNTL); + + if (on) + iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); + else + iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); + + l &= ~MMU_CNTL_MASK; + if (on) + l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); + else + l |= (MMU_CNTL_MMU_EN); + + iommu_write_reg(obj, l, MMU_CNTL); +} + +static int omap2_iommu_enable(struct omap_iommu *obj) +{ + u32 l, pa; + + if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) + return -EINVAL; + + pa = virt_to_phys(obj->iopgd); + if (!IS_ALIGNED(pa, SZ_16K)) + return -EINVAL; + + l = iommu_read_reg(obj, MMU_REVISION); + dev_info(obj->dev, "%s: version %d.%d\n", obj->name, + (l >> 4) & 0xf, l & 0xf); + + iommu_write_reg(obj, pa, MMU_TTB); + + if (obj->has_bus_err_back) + iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); + + __iommu_set_twl(obj, true); + + return 0; +} + +static void omap2_iommu_disable(struct omap_iommu *obj) +{ + u32 l = iommu_read_reg(obj, MMU_CNTL); + + l &= ~MMU_CNTL_MASK; + iommu_write_reg(obj, l, MMU_CNTL); + + dev_dbg(obj->dev, "%s is shutting down\n", obj->name); } -EXPORT_SYMBOL_GPL(omap_iommu_arch_version); static int iommu_enable(struct omap_iommu *obj) { int err; struct platform_device *pdev = to_platform_device(obj->dev); - struct iommu_platform_data *pdata = pdev->dev.platform_data; - - if (!arch_iommu) - return -ENODEV; + struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); if (pdata && pdata->deassert_reset) { err = pdata->deassert_reset(pdev, pdata->reset_name); @@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj) pm_runtime_get_sync(obj->dev); - err = arch_iommu->enable(obj); + err = omap2_iommu_enable(obj); return err; } @@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj) static void iommu_disable(struct omap_iommu *obj) { struct platform_device *pdev = to_platform_device(obj->dev); - struct iommu_platform_data *pdata = pdev->dev.platform_data; + struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); - arch_iommu->disable(obj); + omap2_iommu_disable(obj); pm_runtime_put_sync(obj->dev); @@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj) /* * TLB operations */ -void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) -{ - BUG_ON(!cr || !e); - - arch_iommu->cr_to_e(cr, e); -} -EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); - static inline int iotlb_cr_valid(struct cr_regs *cr) { if (!cr) return -EINVAL; - return arch_iommu->cr_valid(cr); -} - -static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, - struct iotlb_entry *e) -{ - if (!e) - return NULL; - - return arch_iommu->alloc_cr(obj, e); + return cr->cam & MMU_CAM_V; } static u32 iotlb_cr_to_virt(struct cr_regs *cr) { - return arch_iommu->cr_to_virt(cr); + u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; + u32 mask = get_cam_va_mask(cr->cam & page_size); + + return cr->cam & mask; } static u32 get_iopte_attr(struct iotlb_entry *e) { - return arch_iommu->get_pte_attr(e); + u32 attr; + + attr = e->mixed << 5; + attr |= e->endian; + attr |= e->elsz >> 3; + attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || + (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); + return attr; } static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) { - return arch_iommu->fault_isr(obj, da); + u32 status, fault_addr; + + status = iommu_read_reg(obj, MMU_IRQSTATUS); + status &= MMU_IRQ_MASK; + if (!status) { + *da = 0; + return 0; + } + + fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); + *da = fault_addr; + + iommu_write_reg(obj, status, MMU_IRQSTATUS); + + return status; } static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) @@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) { - arch_iommu->tlb_read_cr(obj, cr); + cr->cam = iommu_read_reg(obj, MMU_READ_CAM); + cr->ram = iommu_read_reg(obj, MMU_READ_RAM); } static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) { - arch_iommu->tlb_load_cr(obj, cr); + iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); + iommu_write_reg(obj, cr->ram, MMU_RAM); iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); iommu_write_reg(obj, 1, MMU_LD_TLB); } -/** - * iotlb_dump_cr - Dump an iommu tlb entry into buf - * @obj: target iommu - * @cr: contents of cam and ram register - * @buf: output buffer - **/ -static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, - char *buf) -{ - BUG_ON(!cr || !buf); - - return arch_iommu->dump_cr(obj, cr, buf); -} - /* only used in iotlb iteration for-loop */ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) { @@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) return cr; } +#ifdef PREFETCH_IOTLB +static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, + struct iotlb_entry *e) +{ + struct cr_regs *cr; + + if (!e) + return NULL; + + if (e->da & ~(get_cam_va_mask(e->pgsz))) { + dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, + e->da); + return ERR_PTR(-EINVAL); + } + + cr = kmalloc(sizeof(*cr), GFP_KERNEL); + if (!cr) + return ERR_PTR(-ENOMEM); + + cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; + cr->ram = e->pa | e->endian | e->elsz | e->mixed; + + return cr; +} + /** * load_iotlb_entry - Set an iommu tlb entry * @obj: target iommu * @e: an iommu tlb entry info **/ -#ifdef PREFETCH_IOTLB static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) { int err = 0; @@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj) pm_runtime_put_sync(obj->dev); } -#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) +#ifdef CONFIG_OMAP_IOMMU_DEBUG + +#define pr_reg(name) \ + do { \ + ssize_t bytes; \ + const char *str = "%20s: %08x\n"; \ + const int maxcol = 32; \ + bytes = snprintf(p, maxcol, str, __stringify(name), \ + iommu_read_reg(obj, MMU_##name)); \ + p += bytes; \ + len -= bytes; \ + if (len < maxcol) \ + goto out; \ + } while (0) + +static ssize_t +omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) +{ + char *p = buf; + + pr_reg(REVISION); + pr_reg(IRQSTATUS); + pr_reg(IRQENABLE); + pr_reg(WALKING_ST); + pr_reg(CNTL); + pr_reg(FAULT_AD); + pr_reg(TTB); + pr_reg(LOCK); + pr_reg(LD_TLB); + pr_reg(CAM); + pr_reg(RAM); + pr_reg(GFLUSH); + pr_reg(FLUSH_ENTRY); + pr_reg(READ_CAM); + pr_reg(READ_RAM); + pr_reg(EMU_FAULT_AD); +out: + return p - buf; +} ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) { @@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) pm_runtime_get_sync(obj->dev); - bytes = arch_iommu->dump_ctx(obj, buf, bytes); + bytes = omap2_iommu_dump_ctx(obj, buf, bytes); pm_runtime_put_sync(obj->dev); return bytes; } -EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); static int __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) @@ -464,6 +536,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) } /** + * iotlb_dump_cr - Dump an iommu tlb entry into buf + * @obj: target iommu + * @cr: contents of cam and ram register + * @buf: output buffer + **/ +static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, + char *buf) +{ + char *p = buf; + + /* FIXME: Need more detail analysis of cam/ram */ + p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram, + (cr->cam & MMU_CAM_P) ? 1 : 0); + + return p - buf; +} + +/** * omap_dump_tlb_entries - dump cr arrays to given buffer * @obj: target iommu * @buf: output buffer @@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) return p - buf; } -EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); - -int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) -{ - return driver_for_each_device(&omap_iommu_driver.driver, - NULL, data, fn); -} -EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); -#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ +#endif /* CONFIG_OMAP_IOMMU_DEBUG */ /* * H/W pagetable operations @@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) * @obj: target iommu * @e: an iommu tlb entry info **/ -int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) +static int +omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) { int err; @@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) prefetch_iotlb_entry(obj, e); return err; } -EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); /** * iopgtable_lookup_entry - Lookup an iommu pte entry @@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) u32 *iopgd, *iopte; struct omap_iommu *obj = data; struct iommu_domain *domain = obj->domain; + struct omap_iommu_domain *omap_domain = domain->priv; - if (!obj->refcount) + if (!omap_domain->iommu_dev) return IRQ_NONE; errs = iommu_report_fault(obj, &da); @@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) spin_lock(&obj->iommu_lock); - /* an iommu device can only be attached once */ - if (++obj->refcount > 1) { - dev_err(dev, "%s: already attached!\n", obj->name); - err = -EBUSY; - goto err_enable; - } - obj->iopgd = iopgd; err = iommu_enable(obj); if (err) @@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) return obj; err_enable: - obj->refcount--; spin_unlock(&obj->iommu_lock); return ERR_PTR(err); } @@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj) spin_lock(&obj->iommu_lock); - if (--obj->refcount == 0) - iommu_disable(obj); - + iommu_disable(obj); obj->iopgd = NULL; spin_unlock(&obj->iommu_lock); @@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev) int irq; struct omap_iommu *obj; struct resource *res; - struct iommu_platform_data *pdata = pdev->dev.platform_data; + struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev); struct device_node *of = pdev->dev.of_node; obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); @@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev) pm_runtime_irq_safe(obj->dev); pm_runtime_enable(obj->dev); + omap_iommu_debugfs_add(obj); + dev_info(&pdev->dev, "%s registered\n", obj->name); return 0; } @@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev) struct omap_iommu *obj = platform_get_drvdata(pdev); iopgtable_clear_entry_all(obj); + omap_iommu_debugfs_remove(obj); pm_runtime_disable(obj->dev); @@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) e->da = da; e->pa = pa; e->valid = MMU_CAM_V; - /* FIXME: add OMAP1 support */ e->pgsz = pgsz; e->endian = MMU_RAM_ENDIAN_LITTLE; e->elsz = MMU_RAM_ELSZ_8; @@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, omap_domain->iommu_dev = arch_data->iommu_dev = NULL; omap_domain->dev = NULL; + oiommu->domain = NULL; } static void omap_iommu_detach_dev(struct iommu_domain *domain, @@ -1288,6 +1364,7 @@ static const struct iommu_ops omap_iommu_ops = { .detach_dev = omap_iommu_detach_dev, .map = omap_iommu_map, .unmap = omap_iommu_unmap, + .map_sg = default_iommu_map_sg, .iova_to_phys = omap_iommu_iova_to_phys, .add_device = omap_iommu_add_device, .remove_device = omap_iommu_remove_device, @@ -1308,6 +1385,8 @@ static int __init omap_iommu_init(void) bus_set_iommu(&platform_bus_type, &omap_iommu_ops); + omap_iommu_debugfs_init(); + return platform_driver_register(&omap_iommu_driver); } /* must be ready before omap3isp is probed */ @@ -1318,6 +1397,8 @@ static void __exit omap_iommu_exit(void) kmem_cache_destroy(iopte_cachep); platform_driver_unregister(&omap_iommu_driver); + + omap_iommu_debugfs_exit(); } module_exit(omap_iommu_exit); diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h index 4f1b68c08c15..d736630df3c8 100644 --- a/drivers/iommu/omap-iommu.h +++ b/drivers/iommu/omap-iommu.h @@ -10,9 +10,8 @@ * published by the Free Software Foundation. */ -#if defined(CONFIG_ARCH_OMAP1) -#error "iommu for this processor not implemented yet" -#endif +#ifndef _OMAP_IOMMU_H +#define _OMAP_IOMMU_H struct iotlb_entry { u32 da; @@ -30,10 +29,9 @@ struct omap_iommu { const char *name; void __iomem *regbase; struct device *dev; - void *isr_priv; struct iommu_domain *domain; + struct dentry *debug_dir; - unsigned int refcount; spinlock_t iommu_lock; /* global for this whole object */ /* @@ -67,34 +65,6 @@ struct cr_regs { }; }; -/* architecture specific functions */ -struct iommu_functions { - unsigned long version; - - int (*enable)(struct omap_iommu *obj); - void (*disable)(struct omap_iommu *obj); - void (*set_twl)(struct omap_iommu *obj, bool on); - u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra); - - void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr); - void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr); - - struct cr_regs *(*alloc_cr)(struct omap_iommu *obj, - struct iotlb_entry *e); - int (*cr_valid)(struct cr_regs *cr); - u32 (*cr_to_virt)(struct cr_regs *cr); - void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); - ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr, - char *buf); - - u32 (*get_pte_attr)(struct iotlb_entry *e); - - void (*save_ctx)(struct omap_iommu *obj); - void (*restore_ctx)(struct omap_iommu *obj); - ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len); -}; - -#ifdef CONFIG_IOMMU_API /** * dev_to_omap_iommu() - retrieves an omap iommu object from a user device * @dev: iommu client device @@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) return arch_data->iommu_dev; } -#endif /* * MMU Register offsets @@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) /* * MMU Register bit definitions */ +/* IRQSTATUS & IRQENABLE */ +#define MMU_IRQ_MULTIHITFAULT (1 << 4) +#define MMU_IRQ_TABLEWALKFAULT (1 << 3) +#define MMU_IRQ_EMUMISS (1 << 2) +#define MMU_IRQ_TRANSLATIONFAULT (1 << 1) +#define MMU_IRQ_TLBMISS (1 << 0) + +#define __MMU_IRQ_FAULT \ + (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT) +#define MMU_IRQ_MASK \ + (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS) +#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT) +#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS) + +/* MMU_CNTL */ +#define MMU_CNTL_SHIFT 1 +#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT) +#define MMU_CNTL_EML_TLB (1 << 3) +#define MMU_CNTL_TWL_EN (1 << 2) +#define MMU_CNTL_MMU_EN (1 << 1) + +/* CAM */ #define MMU_CAM_VATAG_SHIFT 12 #define MMU_CAM_VATAG_MASK \ ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT) @@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) #define MMU_CAM_PGSZ_4K (2 << 0) #define MMU_CAM_PGSZ_16M (3 << 0) +/* RAM */ #define MMU_RAM_PADDR_SHIFT 12 #define MMU_RAM_PADDR_MASK \ ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) @@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) #define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 +#define get_cam_va_mask(pgsz) \ + (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \ + ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \ + ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \ + ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) + /* * utilities for super page(16MB, 1MB, 64KB and 4KB) */ @@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) /* * global functions */ -extern u32 omap_iommu_arch_version(void); - -extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); - -extern int -omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); - -extern void omap_iommu_save_ctx(struct device *dev); -extern void omap_iommu_restore_ctx(struct device *dev); - -extern int omap_foreach_iommu_device(void *data, - int (*fn)(struct device *, void *)); - -extern int omap_install_iommu_arch(const struct iommu_functions *ops); -extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops); - +#ifdef CONFIG_OMAP_IOMMU_DEBUG extern ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); extern size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); +void omap_iommu_debugfs_init(void); +void omap_iommu_debugfs_exit(void); + +void omap_iommu_debugfs_add(struct omap_iommu *obj); +void omap_iommu_debugfs_remove(struct omap_iommu *obj); +#else +static inline void omap_iommu_debugfs_init(void) { } +static inline void omap_iommu_debugfs_exit(void) { } + +static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { } +static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { } +#endif + /* * register accessors */ @@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) { __raw_writel(val, obj->regbase + offs); } + +#endif /* _OMAP_IOMMU_H */ diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c deleted file mode 100644 index 5e1ea3b0bf16..000000000000 --- a/drivers/iommu/omap-iommu2.c +++ /dev/null @@ -1,337 +0,0 @@ -/* - * omap iommu: omap2/3 architecture specific functions - * - * Copyright (C) 2008-2009 Nokia Corporation - * - * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, - * Paul Mundt and Toshihiro Kobayashi - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/err.h> -#include <linux/device.h> -#include <linux/io.h> -#include <linux/jiffies.h> -#include <linux/module.h> -#include <linux/omap-iommu.h> -#include <linux/slab.h> -#include <linux/stringify.h> -#include <linux/platform_data/iommu-omap.h> - -#include "omap-iommu.h" - -/* - * omap2 architecture specific register bit definitions - */ -#define IOMMU_ARCH_VERSION 0x00000011 - -/* IRQSTATUS & IRQENABLE */ -#define MMU_IRQ_MULTIHITFAULT (1 << 4) -#define MMU_IRQ_TABLEWALKFAULT (1 << 3) -#define MMU_IRQ_EMUMISS (1 << 2) -#define MMU_IRQ_TRANSLATIONFAULT (1 << 1) -#define MMU_IRQ_TLBMISS (1 << 0) - -#define __MMU_IRQ_FAULT \ - (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT) -#define MMU_IRQ_MASK \ - (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS) -#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT) -#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS) - -/* MMU_CNTL */ -#define MMU_CNTL_SHIFT 1 -#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT) -#define MMU_CNTL_EML_TLB (1 << 3) -#define MMU_CNTL_TWL_EN (1 << 2) -#define MMU_CNTL_MMU_EN (1 << 1) - -#define get_cam_va_mask(pgsz) \ - (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \ - ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \ - ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \ - ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) - -/* IOMMU errors */ -#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0) -#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1) -#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2) -#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3) -#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4) - -static void __iommu_set_twl(struct omap_iommu *obj, bool on) -{ - u32 l = iommu_read_reg(obj, MMU_CNTL); - - if (on) - iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); - else - iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); - - l &= ~MMU_CNTL_MASK; - if (on) - l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); - else - l |= (MMU_CNTL_MMU_EN); - - iommu_write_reg(obj, l, MMU_CNTL); -} - - -static int omap2_iommu_enable(struct omap_iommu *obj) -{ - u32 l, pa; - - if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) - return -EINVAL; - - pa = virt_to_phys(obj->iopgd); - if (!IS_ALIGNED(pa, SZ_16K)) - return -EINVAL; - - l = iommu_read_reg(obj, MMU_REVISION); - dev_info(obj->dev, "%s: version %d.%d\n", obj->name, - (l >> 4) & 0xf, l & 0xf); - - iommu_write_reg(obj, pa, MMU_TTB); - - if (obj->has_bus_err_back) - iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); - - __iommu_set_twl(obj, true); - - return 0; -} - -static void omap2_iommu_disable(struct omap_iommu *obj) -{ - u32 l = iommu_read_reg(obj, MMU_CNTL); - - l &= ~MMU_CNTL_MASK; - iommu_write_reg(obj, l, MMU_CNTL); - - dev_dbg(obj->dev, "%s is shutting down\n", obj->name); -} - -static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on) -{ - __iommu_set_twl(obj, false); -} - -static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) -{ - u32 stat, da; - u32 errs = 0; - - stat = iommu_read_reg(obj, MMU_IRQSTATUS); - stat &= MMU_IRQ_MASK; - if (!stat) { - *ra = 0; - return 0; - } - - da = iommu_read_reg(obj, MMU_FAULT_AD); - *ra = da; - - if (stat & MMU_IRQ_TLBMISS) - errs |= OMAP_IOMMU_ERR_TLB_MISS; - if (stat & MMU_IRQ_TRANSLATIONFAULT) - errs |= OMAP_IOMMU_ERR_TRANS_FAULT; - if (stat & MMU_IRQ_EMUMISS) - errs |= OMAP_IOMMU_ERR_EMU_MISS; - if (stat & MMU_IRQ_TABLEWALKFAULT) - errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT; - if (stat & MMU_IRQ_MULTIHITFAULT) - errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT; - iommu_write_reg(obj, stat, MMU_IRQSTATUS); - - return errs; -} - -static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) -{ - cr->cam = iommu_read_reg(obj, MMU_READ_CAM); - cr->ram = iommu_read_reg(obj, MMU_READ_RAM); -} - -static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) -{ - iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); - iommu_write_reg(obj, cr->ram, MMU_RAM); -} - -static u32 omap2_cr_to_virt(struct cr_regs *cr) -{ - u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; - u32 mask = get_cam_va_mask(cr->cam & page_size); - - return cr->cam & mask; -} - -static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj, - struct iotlb_entry *e) -{ - struct cr_regs *cr; - - if (e->da & ~(get_cam_va_mask(e->pgsz))) { - dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, - e->da); - return ERR_PTR(-EINVAL); - } - - cr = kmalloc(sizeof(*cr), GFP_KERNEL); - if (!cr) - return ERR_PTR(-ENOMEM); - - cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; - cr->ram = e->pa | e->endian | e->elsz | e->mixed; - - return cr; -} - -static inline int omap2_cr_valid(struct cr_regs *cr) -{ - return cr->cam & MMU_CAM_V; -} - -static u32 omap2_get_pte_attr(struct iotlb_entry *e) -{ - u32 attr; - - attr = e->mixed << 5; - attr |= e->endian; - attr |= e->elsz >> 3; - attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || - (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); - return attr; -} - -static ssize_t -omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf) -{ - char *p = buf; - - /* FIXME: Need more detail analysis of cam/ram */ - p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram, - (cr->cam & MMU_CAM_P) ? 1 : 0); - - return p - buf; -} - -#define pr_reg(name) \ - do { \ - ssize_t bytes; \ - const char *str = "%20s: %08x\n"; \ - const int maxcol = 32; \ - bytes = snprintf(p, maxcol, str, __stringify(name), \ - iommu_read_reg(obj, MMU_##name)); \ - p += bytes; \ - len -= bytes; \ - if (len < maxcol) \ - goto out; \ - } while (0) - -static ssize_t -omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) -{ - char *p = buf; - - pr_reg(REVISION); - pr_reg(IRQSTATUS); - pr_reg(IRQENABLE); - pr_reg(WALKING_ST); - pr_reg(CNTL); - pr_reg(FAULT_AD); - pr_reg(TTB); - pr_reg(LOCK); - pr_reg(LD_TLB); - pr_reg(CAM); - pr_reg(RAM); - pr_reg(GFLUSH); - pr_reg(FLUSH_ENTRY); - pr_reg(READ_CAM); - pr_reg(READ_RAM); - pr_reg(EMU_FAULT_AD); -out: - return p - buf; -} - -static void omap2_iommu_save_ctx(struct omap_iommu *obj) -{ - int i; - u32 *p = obj->ctx; - - for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { - p[i] = iommu_read_reg(obj, i * sizeof(u32)); - dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); - } - - BUG_ON(p[0] != IOMMU_ARCH_VERSION); -} - -static void omap2_iommu_restore_ctx(struct omap_iommu *obj) -{ - int i; - u32 *p = obj->ctx; - - for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { - iommu_write_reg(obj, p[i], i * sizeof(u32)); - dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]); - } - - BUG_ON(p[0] != IOMMU_ARCH_VERSION); -} - -static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) -{ - e->da = cr->cam & MMU_CAM_VATAG_MASK; - e->pa = cr->ram & MMU_RAM_PADDR_MASK; - e->valid = cr->cam & MMU_CAM_V; - e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK; - e->endian = cr->ram & MMU_RAM_ENDIAN_MASK; - e->elsz = cr->ram & MMU_RAM_ELSZ_MASK; - e->mixed = cr->ram & MMU_RAM_MIXED; -} - -static const struct iommu_functions omap2_iommu_ops = { - .version = IOMMU_ARCH_VERSION, - - .enable = omap2_iommu_enable, - .disable = omap2_iommu_disable, - .set_twl = omap2_iommu_set_twl, - .fault_isr = omap2_iommu_fault_isr, - - .tlb_read_cr = omap2_tlb_read_cr, - .tlb_load_cr = omap2_tlb_load_cr, - - .cr_to_e = omap2_cr_to_e, - .cr_to_virt = omap2_cr_to_virt, - .alloc_cr = omap2_alloc_cr, - .cr_valid = omap2_cr_valid, - .dump_cr = omap2_dump_cr, - - .get_pte_attr = omap2_get_pte_attr, - - .save_ctx = omap2_iommu_save_ctx, - .restore_ctx = omap2_iommu_restore_ctx, - .dump_ctx = omap2_iommu_dump_ctx, -}; - -static int __init omap2_iommu_init(void) -{ - return omap_install_iommu_arch(&omap2_iommu_ops); -} -module_init(omap2_iommu_init); - -static void __exit omap2_iommu_exit(void) -{ - omap_uninstall_iommu_arch(&omap2_iommu_ops); -} -module_exit(omap2_iommu_exit); - -MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); -MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c new file mode 100644 index 000000000000..b2023af384b9 --- /dev/null +++ b/drivers/iommu/rockchip-iommu.c @@ -0,0 +1,1038 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <asm/cacheflush.h> +#include <asm/pgtable.h> +#include <linux/compiler.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iommu.h> +#include <linux/jiffies.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +/** MMU register offsets */ +#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ +#define RK_MMU_STATUS 0x04 +#define RK_MMU_COMMAND 0x08 +#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ +#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ +#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ +#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ +#define RK_MMU_INT_MASK 0x1C /* IRQ enable */ +#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ +#define RK_MMU_AUTO_GATING 0x24 + +#define DTE_ADDR_DUMMY 0xCAFEBABE +#define FORCE_RESET_TIMEOUT 100 /* ms */ + +/* RK_MMU_STATUS fields */ +#define RK_MMU_STATUS_PAGING_ENABLED BIT(0) +#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) +#define RK_MMU_STATUS_STALL_ACTIVE BIT(2) +#define RK_MMU_STATUS_IDLE BIT(3) +#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) +#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) +#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) + +/* RK_MMU_COMMAND command values */ +#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ +#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ +#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ +#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ +#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ +#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ +#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ + +/* RK_MMU_INT_* register fields */ +#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ +#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ +#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) + +#define NUM_DT_ENTRIES 1024 +#define NUM_PT_ENTRIES 1024 + +#define SPAGE_ORDER 12 +#define SPAGE_SIZE (1 << SPAGE_ORDER) + + /* + * Support mapping any size that fits in one page table: + * 4 KiB to 4 MiB + */ +#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 + +#define IOMMU_REG_POLL_COUNT_FAST 1000 + +struct rk_iommu_domain { + struct list_head iommus; + u32 *dt; /* page directory table */ + spinlock_t iommus_lock; /* lock for iommus list */ + spinlock_t dt_lock; /* lock for modifying page directory table */ +}; + +struct rk_iommu { + struct device *dev; + void __iomem *base; + int irq; + struct list_head node; /* entry in rk_iommu_domain.iommus */ + struct iommu_domain *domain; /* domain to which iommu is attached */ +}; + +static inline void rk_table_flush(u32 *va, unsigned int count) +{ + phys_addr_t pa_start = virt_to_phys(va); + phys_addr_t pa_end = virt_to_phys(va + count); + size_t size = pa_end - pa_start; + + __cpuc_flush_dcache_area(va, size); + outer_flush_range(pa_start, pa_end); +} + +/** + * Inspired by _wait_for in intel_drv.h + * This is NOT safe for use in interrupt context. + * + * Note that it's important that we check the condition again after having + * timed out, since the timeout could be due to preemption or similar and + * we've never had a chance to check the condition before the timeout. + */ +#define rk_wait_for(COND, MS) ({ \ + unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ + int ret__ = 0; \ + while (!(COND)) { \ + if (time_after(jiffies, timeout__)) { \ + ret__ = (COND) ? 0 : -ETIMEDOUT; \ + break; \ + } \ + usleep_range(50, 100); \ + } \ + ret__; \ +}) + +/* + * The Rockchip rk3288 iommu uses a 2-level page table. + * The first level is the "Directory Table" (DT). + * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing + * to a "Page Table". + * The second level is the 1024 Page Tables (PT). + * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to + * a 4 KB page of physical memory. + * + * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). + * Each iommu device has a MMU_DTE_ADDR register that contains the physical + * address of the start of the DT page. + * + * The structure of the page table is as follows: + * + * DT + * MMU_DTE_ADDR -> +-----+ + * | | + * +-----+ PT + * | DTE | -> +-----+ + * +-----+ | | Memory + * | | +-----+ Page + * | | | PTE | -> +-----+ + * +-----+ +-----+ | | + * | | | | + * | | | | + * +-----+ | | + * | | + * | | + * +-----+ + */ + +/* + * Each DTE has a PT address and a valid bit: + * +---------------------+-----------+-+ + * | PT address | Reserved |V| + * +---------------------+-----------+-+ + * 31:12 - PT address (PTs always starts on a 4 KB boundary) + * 11: 1 - Reserved + * 0 - 1 if PT @ PT address is valid + */ +#define RK_DTE_PT_ADDRESS_MASK 0xfffff000 +#define RK_DTE_PT_VALID BIT(0) + +static inline phys_addr_t rk_dte_pt_address(u32 dte) +{ + return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; +} + +static inline bool rk_dte_is_pt_valid(u32 dte) +{ + return dte & RK_DTE_PT_VALID; +} + +static u32 rk_mk_dte(u32 *pt) +{ + phys_addr_t pt_phys = virt_to_phys(pt); + return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; +} + +/* + * Each PTE has a Page address, some flags and a valid bit: + * +---------------------+---+-------+-+ + * | Page address |Rsv| Flags |V| + * +---------------------+---+-------+-+ + * 31:12 - Page address (Pages always start on a 4 KB boundary) + * 11: 9 - Reserved + * 8: 1 - Flags + * 8 - Read allocate - allocate cache space on read misses + * 7 - Read cache - enable cache & prefetch of data + * 6 - Write buffer - enable delaying writes on their way to memory + * 5 - Write allocate - allocate cache space on write misses + * 4 - Write cache - different writes can be merged together + * 3 - Override cache attributes + * if 1, bits 4-8 control cache attributes + * if 0, the system bus defaults are used + * 2 - Writable + * 1 - Readable + * 0 - 1 if Page @ Page address is valid + */ +#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 +#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe +#define RK_PTE_PAGE_WRITABLE BIT(2) +#define RK_PTE_PAGE_READABLE BIT(1) +#define RK_PTE_PAGE_VALID BIT(0) + +static inline phys_addr_t rk_pte_page_address(u32 pte) +{ + return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; +} + +static inline bool rk_pte_is_page_valid(u32 pte) +{ + return pte & RK_PTE_PAGE_VALID; +} + +/* TODO: set cache flags per prot IOMMU_CACHE */ +static u32 rk_mk_pte(phys_addr_t page, int prot) +{ + u32 flags = 0; + flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; + flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; + page &= RK_PTE_PAGE_ADDRESS_MASK; + return page | flags | RK_PTE_PAGE_VALID; +} + +static u32 rk_mk_pte_invalid(u32 pte) +{ + return pte & ~RK_PTE_PAGE_VALID; +} + +/* + * rk3288 iova (IOMMU Virtual Address) format + * 31 22.21 12.11 0 + * +-----------+-----------+-------------+ + * | DTE index | PTE index | Page offset | + * +-----------+-----------+-------------+ + * 31:22 - DTE index - index of DTE in DT + * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address + * 11: 0 - Page offset - offset into page @ PTE.page_address + */ +#define RK_IOVA_DTE_MASK 0xffc00000 +#define RK_IOVA_DTE_SHIFT 22 +#define RK_IOVA_PTE_MASK 0x003ff000 +#define RK_IOVA_PTE_SHIFT 12 +#define RK_IOVA_PAGE_MASK 0x00000fff +#define RK_IOVA_PAGE_SHIFT 0 + +static u32 rk_iova_dte_index(dma_addr_t iova) +{ + return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; +} + +static u32 rk_iova_pte_index(dma_addr_t iova) +{ + return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; +} + +static u32 rk_iova_page_offset(dma_addr_t iova) +{ + return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; +} + +static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset) +{ + return readl(iommu->base + offset); +} + +static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value) +{ + writel(value, iommu->base + offset); +} + +static void rk_iommu_command(struct rk_iommu *iommu, u32 command) +{ + writel(command, iommu->base + RK_MMU_COMMAND); +} + +static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, + size_t size) +{ + dma_addr_t iova_end = iova + size; + /* + * TODO(djkurtz): Figure out when it is more efficient to shootdown the + * entire iotlb rather than iterate over individual iovas. + */ + for (; iova < iova_end; iova += SPAGE_SIZE) + rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); +} + +static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) +{ + return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE; +} + +static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) +{ + return rk_iommu_read(iommu, RK_MMU_STATUS) & + RK_MMU_STATUS_PAGING_ENABLED; +} + +static int rk_iommu_enable_stall(struct rk_iommu *iommu) +{ + int ret; + + if (rk_iommu_is_stall_active(iommu)) + return 0; + + /* Stall can only be enabled if paging is enabled */ + if (!rk_iommu_is_paging_enabled(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); + + ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1); + if (ret) + dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_disable_stall(struct rk_iommu *iommu) +{ + int ret; + + if (!rk_iommu_is_stall_active(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); + + ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1); + if (ret) + dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_enable_paging(struct rk_iommu *iommu) +{ + int ret; + + if (rk_iommu_is_paging_enabled(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); + + ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1); + if (ret) + dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_disable_paging(struct rk_iommu *iommu) +{ + int ret; + + if (!rk_iommu_is_paging_enabled(iommu)) + return 0; + + rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); + + ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1); + if (ret) + dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", + rk_iommu_read(iommu, RK_MMU_STATUS)); + + return ret; +} + +static int rk_iommu_force_reset(struct rk_iommu *iommu) +{ + int ret; + u32 dte_addr; + + /* + * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY + * and verifying that upper 5 nybbles are read back. + */ + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); + + dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); + if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { + dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); + return -EFAULT; + } + + rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); + + ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000, + FORCE_RESET_TIMEOUT); + if (ret) + dev_err(iommu->dev, "FORCE_RESET command timed out\n"); + + return ret; +} + +static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) +{ + u32 dte_index, pte_index, page_offset; + u32 mmu_dte_addr; + phys_addr_t mmu_dte_addr_phys, dte_addr_phys; + u32 *dte_addr; + u32 dte; + phys_addr_t pte_addr_phys = 0; + u32 *pte_addr = NULL; + u32 pte = 0; + phys_addr_t page_addr_phys = 0; + u32 page_flags = 0; + + dte_index = rk_iova_dte_index(iova); + pte_index = rk_iova_pte_index(iova); + page_offset = rk_iova_page_offset(iova); + + mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); + mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; + + dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); + dte_addr = phys_to_virt(dte_addr_phys); + dte = *dte_addr; + + if (!rk_dte_is_pt_valid(dte)) + goto print_it; + + pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); + pte_addr = phys_to_virt(pte_addr_phys); + pte = *pte_addr; + + if (!rk_pte_is_page_valid(pte)) + goto print_it; + + page_addr_phys = rk_pte_page_address(pte) + page_offset; + page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; + +print_it: + dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", + &iova, dte_index, pte_index, page_offset); + dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", + &mmu_dte_addr_phys, &dte_addr_phys, dte, + rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, + rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); +} + +static irqreturn_t rk_iommu_irq(int irq, void *dev_id) +{ + struct rk_iommu *iommu = dev_id; + u32 status; + u32 int_status; + dma_addr_t iova; + + int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS); + if (int_status == 0) + return IRQ_NONE; + + iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR); + + if (int_status & RK_MMU_IRQ_PAGE_FAULT) { + int flags; + + status = rk_iommu_read(iommu, RK_MMU_STATUS); + flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? + IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; + + dev_err(iommu->dev, "Page fault at %pad of type %s\n", + &iova, + (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); + + log_iova(iommu, iova); + + /* + * Report page fault to any installed handlers. + * Ignore the return code, though, since we always zap cache + * and clear the page fault anyway. + */ + if (iommu->domain) + report_iommu_fault(iommu->domain, iommu->dev, iova, + flags); + else + dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); + + rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); + rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE); + } + + if (int_status & RK_MMU_IRQ_BUS_ERROR) + dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); + + if (int_status & ~RK_MMU_IRQ_MASK) + dev_err(iommu->dev, "unexpected int_status: %#08x\n", + int_status); + + rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status); + + return IRQ_HANDLED; +} + +static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + phys_addr_t pt_phys, phys = 0; + u32 dte, pte; + u32 *page_table; + + spin_lock_irqsave(&rk_domain->dt_lock, flags); + + dte = rk_domain->dt[rk_iova_dte_index(iova)]; + if (!rk_dte_is_pt_valid(dte)) + goto out; + + pt_phys = rk_dte_pt_address(dte); + page_table = (u32 *)phys_to_virt(pt_phys); + pte = page_table[rk_iova_pte_index(iova)]; + if (!rk_pte_is_page_valid(pte)) + goto out; + + phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); +out: + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + + return phys; +} + +static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, + dma_addr_t iova, size_t size) +{ + struct list_head *pos; + unsigned long flags; + + /* shootdown these iova from all iommus using this domain */ + spin_lock_irqsave(&rk_domain->iommus_lock, flags); + list_for_each(pos, &rk_domain->iommus) { + struct rk_iommu *iommu; + iommu = list_entry(pos, struct rk_iommu, node); + rk_iommu_zap_lines(iommu, iova, size); + } + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); +} + +static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, + dma_addr_t iova) +{ + u32 *page_table, *dte_addr; + u32 dte; + phys_addr_t pt_phys; + + assert_spin_locked(&rk_domain->dt_lock); + + dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)]; + dte = *dte_addr; + if (rk_dte_is_pt_valid(dte)) + goto done; + + page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); + if (!page_table) + return ERR_PTR(-ENOMEM); + + dte = rk_mk_dte(page_table); + *dte_addr = dte; + + rk_table_flush(page_table, NUM_PT_ENTRIES); + rk_table_flush(dte_addr, 1); + + /* + * Zap the first iova of newly allocated page table so iommu evicts + * old cached value of new dte from the iotlb. + */ + rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); + +done: + pt_phys = rk_dte_pt_address(dte); + return (u32 *)phys_to_virt(pt_phys); +} + +static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, + u32 *pte_addr, dma_addr_t iova, size_t size) +{ + unsigned int pte_count; + unsigned int pte_total = size / SPAGE_SIZE; + + assert_spin_locked(&rk_domain->dt_lock); + + for (pte_count = 0; pte_count < pte_total; pte_count++) { + u32 pte = pte_addr[pte_count]; + if (!rk_pte_is_page_valid(pte)) + break; + + pte_addr[pte_count] = rk_mk_pte_invalid(pte); + } + + rk_table_flush(pte_addr, pte_count); + + return pte_count * SPAGE_SIZE; +} + +static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, + dma_addr_t iova, phys_addr_t paddr, size_t size, + int prot) +{ + unsigned int pte_count; + unsigned int pte_total = size / SPAGE_SIZE; + phys_addr_t page_phys; + + assert_spin_locked(&rk_domain->dt_lock); + + for (pte_count = 0; pte_count < pte_total; pte_count++) { + u32 pte = pte_addr[pte_count]; + + if (rk_pte_is_page_valid(pte)) + goto unwind; + + pte_addr[pte_count] = rk_mk_pte(paddr, prot); + + paddr += SPAGE_SIZE; + } + + rk_table_flush(pte_addr, pte_count); + + return 0; +unwind: + /* Unmap the range of iovas that we just mapped */ + rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE); + + iova += pte_count * SPAGE_SIZE; + page_phys = rk_pte_page_address(pte_addr[pte_count]); + pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", + &iova, &page_phys, &paddr, prot); + + return -EADDRINUSE; +} + +static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, + phys_addr_t paddr, size_t size, int prot) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + dma_addr_t iova = (dma_addr_t)_iova; + u32 *page_table, *pte_addr; + int ret; + + spin_lock_irqsave(&rk_domain->dt_lock, flags); + + /* + * pgsize_bitmap specifies iova sizes that fit in one page table + * (1024 4-KiB pages = 4 MiB). + * So, size will always be 4096 <= size <= 4194304. + * Since iommu_map() guarantees that both iova and size will be + * aligned, we will always only be mapping from a single dte here. + */ + page_table = rk_dte_get_page_table(rk_domain, iova); + if (IS_ERR(page_table)) { + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + return PTR_ERR(page_table); + } + + pte_addr = &page_table[rk_iova_pte_index(iova)]; + ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot); + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + + return ret; +} + +static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, + size_t size) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + dma_addr_t iova = (dma_addr_t)_iova; + phys_addr_t pt_phys; + u32 dte; + u32 *pte_addr; + size_t unmap_size; + + spin_lock_irqsave(&rk_domain->dt_lock, flags); + + /* + * pgsize_bitmap specifies iova sizes that fit in one page table + * (1024 4-KiB pages = 4 MiB). + * So, size will always be 4096 <= size <= 4194304. + * Since iommu_unmap() guarantees that both iova and size will be + * aligned, we will always only be unmapping from a single dte here. + */ + dte = rk_domain->dt[rk_iova_dte_index(iova)]; + /* Just return 0 if iova is unmapped */ + if (!rk_dte_is_pt_valid(dte)) { + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + return 0; + } + + pt_phys = rk_dte_pt_address(dte); + pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); + unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size); + + spin_unlock_irqrestore(&rk_domain->dt_lock, flags); + + /* Shootdown iotlb entries for iova range that was just unmapped */ + rk_iommu_zap_iova(rk_domain, iova, unmap_size); + + return unmap_size; +} + +static struct rk_iommu *rk_iommu_from_dev(struct device *dev) +{ + struct iommu_group *group; + struct device *iommu_dev; + struct rk_iommu *rk_iommu; + + group = iommu_group_get(dev); + if (!group) + return NULL; + iommu_dev = iommu_group_get_iommudata(group); + rk_iommu = dev_get_drvdata(iommu_dev); + iommu_group_put(group); + + return rk_iommu; +} + +static int rk_iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct rk_iommu *iommu; + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + int ret; + phys_addr_t dte_addr; + + /* + * Allow 'virtual devices' (e.g., drm) to attach to domain. + * Such a device does not belong to an iommu group. + */ + iommu = rk_iommu_from_dev(dev); + if (!iommu) + return 0; + + ret = rk_iommu_enable_stall(iommu); + if (ret) + return ret; + + ret = rk_iommu_force_reset(iommu); + if (ret) + return ret; + + iommu->domain = domain; + + ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq, + IRQF_SHARED, dev_name(dev), iommu); + if (ret) + return ret; + + dte_addr = virt_to_phys(rk_domain->dt); + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); + rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); + rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); + + ret = rk_iommu_enable_paging(iommu); + if (ret) + return ret; + + spin_lock_irqsave(&rk_domain->iommus_lock, flags); + list_add_tail(&iommu->node, &rk_domain->iommus); + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + + dev_info(dev, "Attached to iommu domain\n"); + + rk_iommu_disable_stall(iommu); + + return 0; +} + +static void rk_iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct rk_iommu *iommu; + struct rk_iommu_domain *rk_domain = domain->priv; + unsigned long flags; + + /* Allow 'virtual devices' (eg drm) to detach from domain */ + iommu = rk_iommu_from_dev(dev); + if (!iommu) + return; + + spin_lock_irqsave(&rk_domain->iommus_lock, flags); + list_del_init(&iommu->node); + spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); + + /* Ignore error while disabling, just keep going */ + rk_iommu_enable_stall(iommu); + rk_iommu_disable_paging(iommu); + rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); + rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); + rk_iommu_disable_stall(iommu); + + devm_free_irq(dev, iommu->irq, iommu); + + iommu->domain = NULL; + + dev_info(dev, "Detached from iommu domain\n"); +} + +static int rk_iommu_domain_init(struct iommu_domain *domain) +{ + struct rk_iommu_domain *rk_domain; + + rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); + if (!rk_domain) + return -ENOMEM; + + /* + * rk32xx iommus use a 2 level pagetable. + * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. + * Allocate one 4 KiB page for each table. + */ + rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); + if (!rk_domain->dt) + goto err_dt; + + rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES); + + spin_lock_init(&rk_domain->iommus_lock); + spin_lock_init(&rk_domain->dt_lock); + INIT_LIST_HEAD(&rk_domain->iommus); + + domain->priv = rk_domain; + + return 0; +err_dt: + kfree(rk_domain); + return -ENOMEM; +} + +static void rk_iommu_domain_destroy(struct iommu_domain *domain) +{ + struct rk_iommu_domain *rk_domain = domain->priv; + int i; + + WARN_ON(!list_empty(&rk_domain->iommus)); + + for (i = 0; i < NUM_DT_ENTRIES; i++) { + u32 dte = rk_domain->dt[i]; + if (rk_dte_is_pt_valid(dte)) { + phys_addr_t pt_phys = rk_dte_pt_address(dte); + u32 *page_table = phys_to_virt(pt_phys); + free_page((unsigned long)page_table); + } + } + + free_page((unsigned long)rk_domain->dt); + kfree(domain->priv); + domain->priv = NULL; +} + +static bool rk_iommu_is_dev_iommu_master(struct device *dev) +{ + struct device_node *np = dev->of_node; + int ret; + + /* + * An iommu master has an iommus property containing a list of phandles + * to iommu nodes, each with an #iommu-cells property with value 0. + */ + ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells"); + return (ret > 0); +} + +static int rk_iommu_group_set_iommudata(struct iommu_group *group, + struct device *dev) +{ + struct device_node *np = dev->of_node; + struct platform_device *pd; + int ret; + struct of_phandle_args args; + + /* + * An iommu master has an iommus property containing a list of phandles + * to iommu nodes, each with an #iommu-cells property with value 0. + */ + ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0, + &args); + if (ret) { + dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n", + np->full_name, ret); + return ret; + } + if (args.args_count != 0) { + dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n", + args.np->full_name, args.args_count); + return -EINVAL; + } + + pd = of_find_device_by_node(args.np); + of_node_put(args.np); + if (!pd) { + dev_err(dev, "iommu %s not found\n", args.np->full_name); + return -EPROBE_DEFER; + } + + /* TODO(djkurtz): handle multiple slave iommus for a single master */ + iommu_group_set_iommudata(group, &pd->dev, NULL); + + return 0; +} + +static int rk_iommu_add_device(struct device *dev) +{ + struct iommu_group *group; + int ret; + + if (!rk_iommu_is_dev_iommu_master(dev)) + return -ENODEV; + + group = iommu_group_get(dev); + if (!group) { + group = iommu_group_alloc(); + if (IS_ERR(group)) { + dev_err(dev, "Failed to allocate IOMMU group\n"); + return PTR_ERR(group); + } + } + + ret = iommu_group_add_device(group, dev); + if (ret) + goto err_put_group; + + ret = rk_iommu_group_set_iommudata(group, dev); + if (ret) + goto err_remove_device; + + iommu_group_put(group); + + return 0; + +err_remove_device: + iommu_group_remove_device(dev); +err_put_group: + iommu_group_put(group); + return ret; +} + +static void rk_iommu_remove_device(struct device *dev) +{ + if (!rk_iommu_is_dev_iommu_master(dev)) + return; + + iommu_group_remove_device(dev); +} + +static const struct iommu_ops rk_iommu_ops = { + .domain_init = rk_iommu_domain_init, + .domain_destroy = rk_iommu_domain_destroy, + .attach_dev = rk_iommu_attach_device, + .detach_dev = rk_iommu_detach_device, + .map = rk_iommu_map, + .unmap = rk_iommu_unmap, + .add_device = rk_iommu_add_device, + .remove_device = rk_iommu_remove_device, + .iova_to_phys = rk_iommu_iova_to_phys, + .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, +}; + +static int rk_iommu_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rk_iommu *iommu; + struct resource *res; + + iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return -ENOMEM; + + platform_set_drvdata(pdev, iommu); + iommu->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iommu->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(iommu->base)) + return PTR_ERR(iommu->base); + + iommu->irq = platform_get_irq(pdev, 0); + if (iommu->irq < 0) { + dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq); + return -ENXIO; + } + + return 0; +} + +static int rk_iommu_remove(struct platform_device *pdev) +{ + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id rk_iommu_dt_ids[] = { + { .compatible = "rockchip,iommu" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); +#endif + +static struct platform_driver rk_iommu_driver = { + .probe = rk_iommu_probe, + .remove = rk_iommu_remove, + .driver = { + .name = "rk_iommu", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(rk_iommu_dt_ids), + }, +}; + +static int __init rk_iommu_init(void) +{ + int ret; + + ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); + if (ret) + return ret; + + return platform_driver_register(&rk_iommu_driver); +} +static void __exit rk_iommu_exit(void) +{ + platform_driver_unregister(&rk_iommu_driver); +} + +subsys_initcall(rk_iommu_init); +module_exit(rk_iommu_exit); + +MODULE_DESCRIPTION("IOMMU API for Rockchip"); +MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>"); +MODULE_ALIAS("platform:rockchip-iommu"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c index 1333e6fb3405..f1b00774e4de 100644 --- a/drivers/iommu/shmobile-iommu.c +++ b/drivers/iommu/shmobile-iommu.c @@ -361,6 +361,7 @@ static const struct iommu_ops shmobile_iommu_ops = { .detach_dev = shmobile_iommu_detach_device, .map = shmobile_iommu_map, .unmap = shmobile_iommu_unmap, + .map_sg = default_iommu_map_sg, .iova_to_phys = shmobile_iommu_iova_to_phys, .add_device = shmobile_iommu_add_device, .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K, diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 3afdf43f732a..6e134c7c227f 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -1,1295 +1,732 @@ /* - * IOMMU API for SMMU in Tegra30 + * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. * - * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ -#define pr_fmt(fmt) "%s(): " fmt, __func__ - #include <linux/err.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> -#include <linux/pagemap.h> -#include <linux/device.h> -#include <linux/sched.h> #include <linux/iommu.h> -#include <linux/io.h> +#include <linux/kernel.h> #include <linux/of.h> -#include <linux/of_iommu.h> -#include <linux/debugfs.h> -#include <linux/seq_file.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> #include <soc/tegra/ahb.h> +#include <soc/tegra/mc.h> -#include <asm/page.h> -#include <asm/cacheflush.h> - -enum smmu_hwgrp { - HWGRP_AFI, - HWGRP_AVPC, - HWGRP_DC, - HWGRP_DCB, - HWGRP_EPP, - HWGRP_G2, - HWGRP_HC, - HWGRP_HDA, - HWGRP_ISP, - HWGRP_MPE, - HWGRP_NV, - HWGRP_NV2, - HWGRP_PPCS, - HWGRP_SATA, - HWGRP_VDE, - HWGRP_VI, - - HWGRP_COUNT, - - HWGRP_END = ~0, -}; +struct tegra_smmu { + void __iomem *regs; + struct device *dev; -#define HWG_AFI (1 << HWGRP_AFI) -#define HWG_AVPC (1 << HWGRP_AVPC) -#define HWG_DC (1 << HWGRP_DC) -#define HWG_DCB (1 << HWGRP_DCB) -#define HWG_EPP (1 << HWGRP_EPP) -#define HWG_G2 (1 << HWGRP_G2) -#define HWG_HC (1 << HWGRP_HC) -#define HWG_HDA (1 << HWGRP_HDA) -#define HWG_ISP (1 << HWGRP_ISP) -#define HWG_MPE (1 << HWGRP_MPE) -#define HWG_NV (1 << HWGRP_NV) -#define HWG_NV2 (1 << HWGRP_NV2) -#define HWG_PPCS (1 << HWGRP_PPCS) -#define HWG_SATA (1 << HWGRP_SATA) -#define HWG_VDE (1 << HWGRP_VDE) -#define HWG_VI (1 << HWGRP_VI) - -/* bitmap of the page sizes currently supported */ -#define SMMU_IOMMU_PGSIZES (SZ_4K) - -#define SMMU_CONFIG 0x10 -#define SMMU_CONFIG_DISABLE 0 -#define SMMU_CONFIG_ENABLE 1 - -/* REVISIT: To support multiple MCs */ -enum { - _MC = 0, -}; + struct tegra_mc *mc; + const struct tegra_smmu_soc *soc; -enum { - _TLB = 0, - _PTC, -}; + unsigned long *asids; + struct mutex lock; -#define SMMU_CACHE_CONFIG_BASE 0x14 -#define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache) -#define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache) - -#define SMMU_CACHE_CONFIG_STATS_SHIFT 31 -#define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT) -#define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30 -#define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT) - -#define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29) -#define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10 -#define SMMU_TLB_CONFIG_RESET_VAL 0x20000010 - -#define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29) -#define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f -#define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f - -#define SMMU_PTB_ASID 0x1c -#define SMMU_PTB_ASID_CURRENT_SHIFT 0 - -#define SMMU_PTB_DATA 0x20 -#define SMMU_PTB_DATA_RESET_VAL 0 -#define SMMU_PTB_DATA_ASID_NONSECURE_SHIFT 29 -#define SMMU_PTB_DATA_ASID_WRITABLE_SHIFT 30 -#define SMMU_PTB_DATA_ASID_READABLE_SHIFT 31 - -#define SMMU_TLB_FLUSH 0x30 -#define SMMU_TLB_FLUSH_VA_MATCH_ALL 0 -#define SMMU_TLB_FLUSH_VA_MATCH_SECTION 2 -#define SMMU_TLB_FLUSH_VA_MATCH_GROUP 3 -#define SMMU_TLB_FLUSH_ASID_SHIFT 29 -#define SMMU_TLB_FLUSH_ASID_MATCH_DISABLE 0 -#define SMMU_TLB_FLUSH_ASID_MATCH_ENABLE 1 -#define SMMU_TLB_FLUSH_ASID_MATCH_SHIFT 31 - -#define SMMU_PTC_FLUSH 0x34 -#define SMMU_PTC_FLUSH_TYPE_ALL 0 -#define SMMU_PTC_FLUSH_TYPE_ADR 1 -#define SMMU_PTC_FLUSH_ADR_SHIFT 4 - -#define SMMU_ASID_SECURITY 0x38 - -#define SMMU_STATS_CACHE_COUNT_BASE 0x1f0 - -#define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \ - (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss) - -#define SMMU_TRANSLATION_ENABLE_0 0x228 -#define SMMU_TRANSLATION_ENABLE_1 0x22c -#define SMMU_TRANSLATION_ENABLE_2 0x230 - -#define SMMU_AFI_ASID 0x238 /* PCIE */ -#define SMMU_AVPC_ASID 0x23c /* AVP */ -#define SMMU_DC_ASID 0x240 /* Display controller */ -#define SMMU_DCB_ASID 0x244 /* Display controller B */ -#define SMMU_EPP_ASID 0x248 /* Encoder pre-processor */ -#define SMMU_G2_ASID 0x24c /* 2D engine */ -#define SMMU_HC_ASID 0x250 /* Host1x */ -#define SMMU_HDA_ASID 0x254 /* High-def audio */ -#define SMMU_ISP_ASID 0x258 /* Image signal processor */ -#define SMMU_MPE_ASID 0x264 /* MPEG encoder */ -#define SMMU_NV_ASID 0x268 /* (3D) */ -#define SMMU_NV2_ASID 0x26c /* (3D) */ -#define SMMU_PPCS_ASID 0x270 /* AHB */ -#define SMMU_SATA_ASID 0x278 /* SATA */ -#define SMMU_VDE_ASID 0x27c /* Video decoder */ -#define SMMU_VI_ASID 0x280 /* Video input */ - -#define SMMU_PDE_NEXT_SHIFT 28 - -#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000 -#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */ -#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000 -#define SMMU_TLB_FLUSH_VA_GROUP__SHIFT 12 /* right shift */ -#define SMMU_TLB_FLUSH_VA(iova, which) \ - ((((iova) & SMMU_TLB_FLUSH_VA_##which##__MASK) >> \ - SMMU_TLB_FLUSH_VA_##which##__SHIFT) | \ - SMMU_TLB_FLUSH_VA_MATCH_##which) -#define SMMU_PTB_ASID_CUR(n) \ - ((n) << SMMU_PTB_ASID_CURRENT_SHIFT) -#define SMMU_TLB_FLUSH_ASID_MATCH_disable \ - (SMMU_TLB_FLUSH_ASID_MATCH_DISABLE << \ - SMMU_TLB_FLUSH_ASID_MATCH_SHIFT) -#define SMMU_TLB_FLUSH_ASID_MATCH__ENABLE \ - (SMMU_TLB_FLUSH_ASID_MATCH_ENABLE << \ - SMMU_TLB_FLUSH_ASID_MATCH_SHIFT) - -#define SMMU_PAGE_SHIFT 12 -#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT) -#define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1) - -#define SMMU_PDIR_COUNT 1024 -#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT) -#define SMMU_PTBL_COUNT 1024 -#define SMMU_PTBL_SIZE (sizeof(unsigned long) * SMMU_PTBL_COUNT) -#define SMMU_PDIR_SHIFT 12 -#define SMMU_PDE_SHIFT 12 -#define SMMU_PTE_SHIFT 12 -#define SMMU_PFN_MASK 0x000fffff - -#define SMMU_ADDR_TO_PFN(addr) ((addr) >> 12) -#define SMMU_ADDR_TO_PDN(addr) ((addr) >> 22) -#define SMMU_PDN_TO_ADDR(pdn) ((pdn) << 22) - -#define _READABLE (1 << SMMU_PTB_DATA_ASID_READABLE_SHIFT) -#define _WRITABLE (1 << SMMU_PTB_DATA_ASID_WRITABLE_SHIFT) -#define _NONSECURE (1 << SMMU_PTB_DATA_ASID_NONSECURE_SHIFT) -#define _PDE_NEXT (1 << SMMU_PDE_NEXT_SHIFT) -#define _MASK_ATTR (_READABLE | _WRITABLE | _NONSECURE) - -#define _PDIR_ATTR (_READABLE | _WRITABLE | _NONSECURE) - -#define _PDE_ATTR (_READABLE | _WRITABLE | _NONSECURE) -#define _PDE_ATTR_N (_PDE_ATTR | _PDE_NEXT) -#define _PDE_VACANT(pdn) (((pdn) << 10) | _PDE_ATTR) - -#define _PTE_ATTR (_READABLE | _WRITABLE | _NONSECURE) -#define _PTE_VACANT(addr) (((addr) >> SMMU_PAGE_SHIFT) | _PTE_ATTR) - -#define SMMU_MK_PDIR(page, attr) \ - ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr)) -#define SMMU_MK_PDE(page, attr) \ - (unsigned long)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr)) -#define SMMU_EX_PTBL_PAGE(pde) \ - pfn_to_page((unsigned long)(pde) & SMMU_PFN_MASK) -#define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr)) - -#define SMMU_ASID_ENABLE(asid) ((asid) | (1 << 31)) -#define SMMU_ASID_DISABLE 0 -#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0)) - -#define NUM_SMMU_REG_BANKS 3 - -#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1) -#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0) -#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1) -#define __smmu_client_disable_hwgrp(c) __smmu_client_set_hwgrp(c, 0, 0) - -#define HWGRP_INIT(client) [HWGRP_##client] = SMMU_##client##_ASID - -static const u32 smmu_hwgrp_asid_reg[] = { - HWGRP_INIT(AFI), - HWGRP_INIT(AVPC), - HWGRP_INIT(DC), - HWGRP_INIT(DCB), - HWGRP_INIT(EPP), - HWGRP_INIT(G2), - HWGRP_INIT(HC), - HWGRP_INIT(HDA), - HWGRP_INIT(ISP), - HWGRP_INIT(MPE), - HWGRP_INIT(NV), - HWGRP_INIT(NV2), - HWGRP_INIT(PPCS), - HWGRP_INIT(SATA), - HWGRP_INIT(VDE), - HWGRP_INIT(VI), + struct list_head list; }; -#define HWGRP_ASID_REG(x) (smmu_hwgrp_asid_reg[x]) -/* - * Per client for address space - */ -struct smmu_client { - struct device *dev; - struct list_head list; - struct smmu_as *as; - u32 hwgrp; +struct tegra_smmu_as { + struct iommu_domain *domain; + struct tegra_smmu *smmu; + unsigned int use_count; + struct page *count; + struct page *pd; + unsigned id; + u32 attr; }; -/* - * Per address space - */ -struct smmu_as { - struct smmu_device *smmu; /* back pointer to container */ - unsigned int asid; - spinlock_t lock; /* for pagetable */ - struct page *pdir_page; - unsigned long pdir_attr; - unsigned long pde_attr; - unsigned long pte_attr; - unsigned int *pte_count; - - struct list_head client; - spinlock_t client_lock; /* for client list */ -}; +static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, + unsigned long offset) +{ + writel(value, smmu->regs + offset); +} -struct smmu_debugfs_info { - struct smmu_device *smmu; - int mc; - int cache; -}; +static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) +{ + return readl(smmu->regs + offset); +} -/* - * Per SMMU device - IOMMU device - */ -struct smmu_device { - void __iomem *regbase; /* register offset base */ - void __iomem **regs; /* register block start address array */ - void __iomem **rege; /* register block end address array */ - int nregs; /* number of register blocks */ - - unsigned long iovmm_base; /* remappable base address */ - unsigned long page_count; /* total remappable size */ - spinlock_t lock; - char *name; - struct device *dev; - struct page *avp_vector_page; /* dummy page shared by all AS's */ +#define SMMU_CONFIG 0x010 +#define SMMU_CONFIG_ENABLE (1 << 0) - /* - * Register image savers for suspend/resume - */ - unsigned long translation_enable_0; - unsigned long translation_enable_1; - unsigned long translation_enable_2; - unsigned long asid_security; +#define SMMU_TLB_CONFIG 0x14 +#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) +#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) +#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f) - struct dentry *debugfs_root; - struct smmu_debugfs_info *debugfs_info; +#define SMMU_PTC_CONFIG 0x18 +#define SMMU_PTC_CONFIG_ENABLE (1 << 29) +#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) +#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) - struct device_node *ahb; +#define SMMU_PTB_ASID 0x01c +#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) - int num_as; - struct smmu_as as[0]; /* Run-time allocated array */ -}; +#define SMMU_PTB_DATA 0x020 +#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr)) -static struct smmu_device *smmu_handle; /* unique for a system */ +#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr)) -/* - * SMMU register accessors - */ -static bool inline smmu_valid_reg(struct smmu_device *smmu, - void __iomem *addr) -{ - int i; +#define SMMU_TLB_FLUSH 0x030 +#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) +#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) +#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) +#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24) +#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ + SMMU_TLB_FLUSH_VA_MATCH_SECTION) +#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ + SMMU_TLB_FLUSH_VA_MATCH_GROUP) +#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) - for (i = 0; i < smmu->nregs; i++) { - if (addr < smmu->regs[i]) - break; - if (addr <= smmu->rege[i]) - return true; - } +#define SMMU_PTC_FLUSH 0x034 +#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) +#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) - return false; -} +#define SMMU_PTC_FLUSH_HI 0x9b8 +#define SMMU_PTC_FLUSH_HI_MASK 0x3 -static inline u32 smmu_read(struct smmu_device *smmu, size_t offs) -{ - void __iomem *addr = smmu->regbase + offs; +/* per-SWGROUP SMMU_*_ASID register */ +#define SMMU_ASID_ENABLE (1 << 31) +#define SMMU_ASID_MASK 0x7f +#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) - BUG_ON(!smmu_valid_reg(smmu, addr)); +/* page table definitions */ +#define SMMU_NUM_PDE 1024 +#define SMMU_NUM_PTE 1024 - return readl(addr); -} +#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) +#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) -static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs) -{ - void __iomem *addr = smmu->regbase + offs; +#define SMMU_PDE_SHIFT 22 +#define SMMU_PTE_SHIFT 12 - BUG_ON(!smmu_valid_reg(smmu, addr)); +#define SMMU_PFN_MASK 0x000fffff - writel(val, addr); -} +#define SMMU_PD_READABLE (1 << 31) +#define SMMU_PD_WRITABLE (1 << 30) +#define SMMU_PD_NONSECURE (1 << 29) -#define VA_PAGE_TO_PA(va, page) \ - (page_to_phys(page) + ((unsigned long)(va) & ~PAGE_MASK)) +#define SMMU_PDE_READABLE (1 << 31) +#define SMMU_PDE_WRITABLE (1 << 30) +#define SMMU_PDE_NONSECURE (1 << 29) +#define SMMU_PDE_NEXT (1 << 28) -#define FLUSH_CPU_DCACHE(va, page, size) \ - do { \ - unsigned long _pa_ = VA_PAGE_TO_PA(va, page); \ - __cpuc_flush_dcache_area((void *)(va), (size_t)(size)); \ - outer_flush_range(_pa_, _pa_+(size_t)(size)); \ - } while (0) +#define SMMU_PTE_READABLE (1 << 31) +#define SMMU_PTE_WRITABLE (1 << 30) +#define SMMU_PTE_NONSECURE (1 << 29) -/* - * Any interaction between any block on PPSB and a block on APB or AHB - * must have these read-back barriers to ensure the APB/AHB bus - * transaction is complete before initiating activity on the PPSB - * block. - */ -#define FLUSH_SMMU_REGS(smmu) smmu_read(smmu, SMMU_CONFIG) +#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ + SMMU_PDE_NONSECURE) +#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \ + SMMU_PTE_NONSECURE) -#define smmu_client_hwgrp(c) (u32)((c)->dev->platform_data) - -static int __smmu_client_set_hwgrp(struct smmu_client *c, - unsigned long map, int on) +static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page, + unsigned long offset) { - int i; - struct smmu_as *as = c->as; - u32 val, offs, mask = SMMU_ASID_ENABLE(as->asid); - struct smmu_device *smmu = as->smmu; - - WARN_ON(!on && map); - if (on && !map) - return -EINVAL; - if (!on) - map = smmu_client_hwgrp(c); - - for_each_set_bit(i, &map, HWGRP_COUNT) { - offs = HWGRP_ASID_REG(i); - val = smmu_read(smmu, offs); - if (on) { - if (WARN_ON(val & mask)) - goto err_hw_busy; - val |= mask; - } else { - WARN_ON((val & mask) == mask); - val &= ~mask; + phys_addr_t phys = page ? page_to_phys(page) : 0; + u32 value; + + if (page) { + offset &= ~(smmu->mc->soc->atom_size - 1); + + if (smmu->mc->soc->num_address_bits > 32) { +#ifdef CONFIG_PHYS_ADDR_T_64BIT + value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK; +#else + value = 0; +#endif + smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); } - smmu_write(smmu, val, offs); - } - FLUSH_SMMU_REGS(smmu); - c->hwgrp = map; - return 0; -err_hw_busy: - for_each_set_bit(i, &map, HWGRP_COUNT) { - offs = HWGRP_ASID_REG(i); - val = smmu_read(smmu, offs); - val &= ~mask; - smmu_write(smmu, val, offs); + value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR; + } else { + value = SMMU_PTC_FLUSH_TYPE_ALL; } - return -EBUSY; + + smmu_writel(smmu, value, SMMU_PTC_FLUSH); } -static int smmu_client_set_hwgrp(struct smmu_client *c, u32 map, int on) +static inline void smmu_flush_tlb(struct tegra_smmu *smmu) { - u32 val; - unsigned long flags; - struct smmu_as *as = c->as; - struct smmu_device *smmu = as->smmu; - - spin_lock_irqsave(&smmu->lock, flags); - val = __smmu_client_set_hwgrp(c, map, on); - spin_unlock_irqrestore(&smmu->lock, flags); - return val; + smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); } -/* - * Flush all TLB entries and all PTC entries - * Caller must lock smmu - */ -static void smmu_flush_regs(struct smmu_device *smmu, int enable) +static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, + unsigned long asid) { - u32 val; - - smmu_write(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); - FLUSH_SMMU_REGS(smmu); - val = SMMU_TLB_FLUSH_VA_MATCH_ALL | - SMMU_TLB_FLUSH_ASID_MATCH_disable; - smmu_write(smmu, val, SMMU_TLB_FLUSH); + u32 value; - if (enable) - smmu_write(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); - FLUSH_SMMU_REGS(smmu); + value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | + SMMU_TLB_FLUSH_VA_MATCH_ALL; + smmu_writel(smmu, value, SMMU_TLB_FLUSH); } -static int smmu_setup_regs(struct smmu_device *smmu) +static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, + unsigned long asid, + unsigned long iova) { - int i; - u32 val; + u32 value; - for (i = 0; i < smmu->num_as; i++) { - struct smmu_as *as = &smmu->as[i]; - struct smmu_client *c; - - smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); - val = as->pdir_page ? - SMMU_MK_PDIR(as->pdir_page, as->pdir_attr) : - SMMU_PTB_DATA_RESET_VAL; - smmu_write(smmu, val, SMMU_PTB_DATA); - - list_for_each_entry(c, &as->client, list) - __smmu_client_set_hwgrp(c, c->hwgrp, 1); - } - - smmu_write(smmu, smmu->translation_enable_0, SMMU_TRANSLATION_ENABLE_0); - smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1); - smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2); - smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY); - smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB)); - smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC)); - - smmu_flush_regs(smmu, 1); - - return tegra_ahb_enable_smmu(smmu->ahb); + value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | + SMMU_TLB_FLUSH_VA_SECTION(iova); + smmu_writel(smmu, value, SMMU_TLB_FLUSH); } -static void flush_ptc_and_tlb(struct smmu_device *smmu, - struct smmu_as *as, dma_addr_t iova, - unsigned long *pte, struct page *page, int is_pde) +static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, + unsigned long asid, + unsigned long iova) { - u32 val; - unsigned long tlb_flush_va = is_pde - ? SMMU_TLB_FLUSH_VA(iova, SECTION) - : SMMU_TLB_FLUSH_VA(iova, GROUP); - - val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pte, page); - smmu_write(smmu, val, SMMU_PTC_FLUSH); - FLUSH_SMMU_REGS(smmu); - val = tlb_flush_va | - SMMU_TLB_FLUSH_ASID_MATCH__ENABLE | - (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT); - smmu_write(smmu, val, SMMU_TLB_FLUSH); - FLUSH_SMMU_REGS(smmu); -} + u32 value; -static void free_ptbl(struct smmu_as *as, dma_addr_t iova) -{ - unsigned long pdn = SMMU_ADDR_TO_PDN(iova); - unsigned long *pdir = (unsigned long *)page_address(as->pdir_page); - - if (pdir[pdn] != _PDE_VACANT(pdn)) { - dev_dbg(as->smmu->dev, "pdn: %lx\n", pdn); - - ClearPageReserved(SMMU_EX_PTBL_PAGE(pdir[pdn])); - __free_page(SMMU_EX_PTBL_PAGE(pdir[pdn])); - pdir[pdn] = _PDE_VACANT(pdn); - FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]); - flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], - as->pdir_page, 1); - } + value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | + SMMU_TLB_FLUSH_VA_GROUP(iova); + smmu_writel(smmu, value, SMMU_TLB_FLUSH); } -static void free_pdir(struct smmu_as *as) +static inline void smmu_flush(struct tegra_smmu *smmu) { - unsigned addr; - int count; - struct device *dev = as->smmu->dev; - - if (!as->pdir_page) - return; - - addr = as->smmu->iovmm_base; - count = as->smmu->page_count; - while (count-- > 0) { - free_ptbl(as, addr); - addr += SMMU_PAGE_SIZE * SMMU_PTBL_COUNT; - } - ClearPageReserved(as->pdir_page); - __free_page(as->pdir_page); - as->pdir_page = NULL; - devm_kfree(dev, as->pte_count); - as->pte_count = NULL; + smmu_readl(smmu, SMMU_CONFIG); } -/* - * Maps PTBL for given iova and returns the PTE address - * Caller must unmap the mapped PTBL returned in *ptbl_page_p - */ -static unsigned long *locate_pte(struct smmu_as *as, - dma_addr_t iova, bool allocate, - struct page **ptbl_page_p, - unsigned int **count) +static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) { - unsigned long ptn = SMMU_ADDR_TO_PFN(iova); - unsigned long pdn = SMMU_ADDR_TO_PDN(iova); - unsigned long *pdir = page_address(as->pdir_page); - unsigned long *ptbl; - - if (pdir[pdn] != _PDE_VACANT(pdn)) { - /* Mapped entry table already exists */ - *ptbl_page_p = SMMU_EX_PTBL_PAGE(pdir[pdn]); - ptbl = page_address(*ptbl_page_p); - } else if (!allocate) { - return NULL; - } else { - int pn; - unsigned long addr = SMMU_PDN_TO_ADDR(pdn); + unsigned long id; - /* Vacant - allocate a new page table */ - dev_dbg(as->smmu->dev, "New PTBL pdn: %lx\n", pdn); + mutex_lock(&smmu->lock); - *ptbl_page_p = alloc_page(GFP_ATOMIC); - if (!*ptbl_page_p) { - dev_err(as->smmu->dev, - "failed to allocate smmu_device page table\n"); - return NULL; - } - SetPageReserved(*ptbl_page_p); - ptbl = (unsigned long *)page_address(*ptbl_page_p); - for (pn = 0; pn < SMMU_PTBL_COUNT; - pn++, addr += SMMU_PAGE_SIZE) { - ptbl[pn] = _PTE_VACANT(addr); - } - FLUSH_CPU_DCACHE(ptbl, *ptbl_page_p, SMMU_PTBL_SIZE); - pdir[pdn] = SMMU_MK_PDE(*ptbl_page_p, - as->pde_attr | _PDE_NEXT); - FLUSH_CPU_DCACHE(&pdir[pdn], as->pdir_page, sizeof pdir[pdn]); - flush_ptc_and_tlb(as->smmu, as, iova, &pdir[pdn], - as->pdir_page, 1); + id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); + if (id >= smmu->soc->num_asids) { + mutex_unlock(&smmu->lock); + return -ENOSPC; } - *count = &as->pte_count[pdn]; - return &ptbl[ptn % SMMU_PTBL_COUNT]; + set_bit(id, smmu->asids); + *idp = id; + + mutex_unlock(&smmu->lock); + return 0; } -#ifdef CONFIG_SMMU_SIG_DEBUG -static void put_signature(struct smmu_as *as, - dma_addr_t iova, unsigned long pfn) +static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) { - struct page *page; - unsigned long *vaddr; - - page = pfn_to_page(pfn); - vaddr = page_address(page); - if (!vaddr) - return; - - vaddr[0] = iova; - vaddr[1] = pfn << PAGE_SHIFT; - FLUSH_CPU_DCACHE(vaddr, page, sizeof(vaddr[0]) * 2); + mutex_lock(&smmu->lock); + clear_bit(id, smmu->asids); + mutex_unlock(&smmu->lock); } -#else -static inline void put_signature(struct smmu_as *as, - unsigned long addr, unsigned long pfn) + +static bool tegra_smmu_capable(enum iommu_cap cap) { + return false; } -#endif -/* - * Caller must not hold as->lock - */ -static int alloc_pdir(struct smmu_as *as) +static int tegra_smmu_domain_init(struct iommu_domain *domain) { - unsigned long *pdir, flags; - int pdn, err = 0; - u32 val; - struct smmu_device *smmu = as->smmu; - struct page *page; - unsigned int *cnt; + struct tegra_smmu_as *as; + unsigned int i; + uint32_t *pd; - /* - * do the allocation, then grab as->lock - */ - cnt = devm_kzalloc(smmu->dev, - sizeof(cnt[0]) * SMMU_PDIR_COUNT, - GFP_KERNEL); - page = alloc_page(GFP_KERNEL | __GFP_DMA); + as = kzalloc(sizeof(*as), GFP_KERNEL); + if (!as) + return -ENOMEM; - spin_lock_irqsave(&as->lock, flags); + as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; + as->domain = domain; - if (as->pdir_page) { - /* We raced, free the redundant */ - err = -EAGAIN; - goto err_out; + as->pd = alloc_page(GFP_KERNEL | __GFP_DMA); + if (!as->pd) { + kfree(as); + return -ENOMEM; } - if (!page || !cnt) { - dev_err(smmu->dev, "failed to allocate at %s\n", __func__); - err = -ENOMEM; - goto err_out; + as->count = alloc_page(GFP_KERNEL); + if (!as->count) { + __free_page(as->pd); + kfree(as); + return -ENOMEM; } - as->pdir_page = page; - as->pte_count = cnt; + /* clear PDEs */ + pd = page_address(as->pd); + SetPageReserved(as->pd); - SetPageReserved(as->pdir_page); - pdir = page_address(as->pdir_page); + for (i = 0; i < SMMU_NUM_PDE; i++) + pd[i] = 0; - for (pdn = 0; pdn < SMMU_PDIR_COUNT; pdn++) - pdir[pdn] = _PDE_VACANT(pdn); - FLUSH_CPU_DCACHE(pdir, as->pdir_page, SMMU_PDIR_SIZE); - val = SMMU_PTC_FLUSH_TYPE_ADR | VA_PAGE_TO_PA(pdir, as->pdir_page); - smmu_write(smmu, val, SMMU_PTC_FLUSH); - FLUSH_SMMU_REGS(as->smmu); - val = SMMU_TLB_FLUSH_VA_MATCH_ALL | - SMMU_TLB_FLUSH_ASID_MATCH__ENABLE | - (as->asid << SMMU_TLB_FLUSH_ASID_SHIFT); - smmu_write(smmu, val, SMMU_TLB_FLUSH); - FLUSH_SMMU_REGS(as->smmu); + /* clear PDE usage counters */ + pd = page_address(as->count); + SetPageReserved(as->count); - spin_unlock_irqrestore(&as->lock, flags); - - return 0; + for (i = 0; i < SMMU_NUM_PDE; i++) + pd[i] = 0; -err_out: - spin_unlock_irqrestore(&as->lock, flags); + domain->priv = as; - devm_kfree(smmu->dev, cnt); - if (page) - __free_page(page); - return err; + return 0; } -static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova) +static void tegra_smmu_domain_destroy(struct iommu_domain *domain) { - unsigned long *pte; - struct page *page; - unsigned int *count; + struct tegra_smmu_as *as = domain->priv; - pte = locate_pte(as, iova, false, &page, &count); - if (WARN_ON(!pte)) - return; + /* TODO: free page directory and page tables */ + ClearPageReserved(as->pd); - if (WARN_ON(*pte == _PTE_VACANT(iova))) - return; - - *pte = _PTE_VACANT(iova); - FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); - flush_ptc_and_tlb(as->smmu, as, iova, pte, page, 0); - if (!--(*count)) - free_ptbl(as, iova); + kfree(as); } -static void __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, - unsigned long pfn) +static const struct tegra_smmu_swgroup * +tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) { - struct smmu_device *smmu = as->smmu; - unsigned long *pte; - unsigned int *count; - struct page *page; + const struct tegra_smmu_swgroup *group = NULL; + unsigned int i; - pte = locate_pte(as, iova, true, &page, &count); - if (WARN_ON(!pte)) - return; + for (i = 0; i < smmu->soc->num_swgroups; i++) { + if (smmu->soc->swgroups[i].swgroup == swgroup) { + group = &smmu->soc->swgroups[i]; + break; + } + } - if (*pte == _PTE_VACANT(iova)) - (*count)++; - *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr); - if (unlikely((*pte == _PTE_VACANT(iova)))) - (*count)--; - FLUSH_CPU_DCACHE(pte, page, sizeof(*pte)); - flush_ptc_and_tlb(smmu, as, iova, pte, page, 0); - put_signature(as, iova, pfn); + return group; } -static int smmu_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t pa, size_t bytes, int prot) +static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, + unsigned int asid) { - struct smmu_as *as = domain->priv; - unsigned long pfn = __phys_to_pfn(pa); - unsigned long flags; + const struct tegra_smmu_swgroup *group; + unsigned int i; + u32 value; - dev_dbg(as->smmu->dev, "[%d] %08lx:%pa\n", as->asid, iova, &pa); + for (i = 0; i < smmu->soc->num_clients; i++) { + const struct tegra_mc_client *client = &smmu->soc->clients[i]; - if (!pfn_valid(pfn)) - return -ENOMEM; - - spin_lock_irqsave(&as->lock, flags); - __smmu_iommu_map_pfn(as, iova, pfn); - spin_unlock_irqrestore(&as->lock, flags); - return 0; -} - -static size_t smmu_iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t bytes) -{ - struct smmu_as *as = domain->priv; - unsigned long flags; + if (client->swgroup != swgroup) + continue; - dev_dbg(as->smmu->dev, "[%d] %08lx\n", as->asid, iova); + value = smmu_readl(smmu, client->smmu.reg); + value |= BIT(client->smmu.bit); + smmu_writel(smmu, value, client->smmu.reg); + } - spin_lock_irqsave(&as->lock, flags); - __smmu_iommu_unmap(as, iova); - spin_unlock_irqrestore(&as->lock, flags); - return SMMU_PAGE_SIZE; + group = tegra_smmu_find_swgroup(smmu, swgroup); + if (group) { + value = smmu_readl(smmu, group->reg); + value &= ~SMMU_ASID_MASK; + value |= SMMU_ASID_VALUE(asid); + value |= SMMU_ASID_ENABLE; + smmu_writel(smmu, value, group->reg); + } } -static phys_addr_t smmu_iommu_iova_to_phys(struct iommu_domain *domain, - dma_addr_t iova) +static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, + unsigned int asid) { - struct smmu_as *as = domain->priv; - unsigned long *pte; - unsigned int *count; - struct page *page; - unsigned long pfn; - unsigned long flags; + const struct tegra_smmu_swgroup *group; + unsigned int i; + u32 value; - spin_lock_irqsave(&as->lock, flags); + group = tegra_smmu_find_swgroup(smmu, swgroup); + if (group) { + value = smmu_readl(smmu, group->reg); + value &= ~SMMU_ASID_MASK; + value |= SMMU_ASID_VALUE(asid); + value &= ~SMMU_ASID_ENABLE; + smmu_writel(smmu, value, group->reg); + } - pte = locate_pte(as, iova, true, &page, &count); - pfn = *pte & SMMU_PFN_MASK; - WARN_ON(!pfn_valid(pfn)); - dev_dbg(as->smmu->dev, - "iova:%08llx pfn:%08lx asid:%d\n", (unsigned long long)iova, - pfn, as->asid); + for (i = 0; i < smmu->soc->num_clients; i++) { + const struct tegra_mc_client *client = &smmu->soc->clients[i]; - spin_unlock_irqrestore(&as->lock, flags); - return PFN_PHYS(pfn); -} + if (client->swgroup != swgroup) + continue; -static bool smmu_iommu_capable(enum iommu_cap cap) -{ - return false; + value = smmu_readl(smmu, client->smmu.reg); + value &= ~BIT(client->smmu.bit); + smmu_writel(smmu, value, client->smmu.reg); + } } -static int smmu_iommu_attach_dev(struct iommu_domain *domain, - struct device *dev) +static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, + struct tegra_smmu_as *as) { - struct smmu_as *as = domain->priv; - struct smmu_device *smmu = as->smmu; - struct smmu_client *client, *c; - u32 map; + u32 value; int err; - client = devm_kzalloc(smmu->dev, sizeof(*c), GFP_KERNEL); - if (!client) - return -ENOMEM; - client->dev = dev; - client->as = as; - map = (unsigned long)dev->platform_data; - if (!map) - return -EINVAL; - - err = smmu_client_enable_hwgrp(client, map); - if (err) - goto err_hwgrp; - - spin_lock(&as->client_lock); - list_for_each_entry(c, &as->client, list) { - if (c->dev == dev) { - dev_err(smmu->dev, - "%s is already attached\n", dev_name(c->dev)); - err = -EINVAL; - goto err_client; - } + if (as->use_count > 0) { + as->use_count++; + return 0; } - list_add(&client->list, &as->client); - spin_unlock(&as->client_lock); - /* - * Reserve "page zero" for AVP vectors using a common dummy - * page. - */ - if (map & HWG_AVPC) { - struct page *page; + err = tegra_smmu_alloc_asid(smmu, &as->id); + if (err < 0) + return err; - page = as->smmu->avp_vector_page; - __smmu_iommu_map_pfn(as, 0, page_to_pfn(page)); + smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD); + smmu_flush_ptc(smmu, as->pd, 0); + smmu_flush_tlb_asid(smmu, as->id); - pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n"); - } + smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); + value = SMMU_PTB_DATA_VALUE(as->pd, as->attr); + smmu_writel(smmu, value, SMMU_PTB_DATA); + smmu_flush(smmu); - dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev)); - return 0; + as->smmu = smmu; + as->use_count++; -err_client: - smmu_client_disable_hwgrp(client); - spin_unlock(&as->client_lock); -err_hwgrp: - devm_kfree(smmu->dev, client); - return err; + return 0; } -static void smmu_iommu_detach_dev(struct iommu_domain *domain, - struct device *dev) +static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, + struct tegra_smmu_as *as) { - struct smmu_as *as = domain->priv; - struct smmu_device *smmu = as->smmu; - struct smmu_client *c; - - spin_lock(&as->client_lock); - - list_for_each_entry(c, &as->client, list) { - if (c->dev == dev) { - smmu_client_disable_hwgrp(c); - list_del(&c->list); - devm_kfree(smmu->dev, c); - c->as = NULL; - dev_dbg(smmu->dev, - "%s is detached\n", dev_name(c->dev)); - goto out; - } - } - dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev)); -out: - spin_unlock(&as->client_lock); + if (--as->use_count > 0) + return; + + tegra_smmu_free_asid(smmu, as->id); + as->smmu = NULL; } -static int smmu_iommu_domain_init(struct iommu_domain *domain) +static int tegra_smmu_attach_dev(struct iommu_domain *domain, + struct device *dev) { - int i, err = -EAGAIN; - unsigned long flags; - struct smmu_as *as; - struct smmu_device *smmu = smmu_handle; + struct tegra_smmu *smmu = dev->archdata.iommu; + struct tegra_smmu_as *as = domain->priv; + struct device_node *np = dev->of_node; + struct of_phandle_args args; + unsigned int index = 0; + int err = 0; - /* Look for a free AS with lock held */ - for (i = 0; i < smmu->num_as; i++) { - as = &smmu->as[i]; + while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, + &args)) { + unsigned int swgroup = args.args[0]; - if (as->pdir_page) + if (args.np != smmu->dev->of_node) { + of_node_put(args.np); continue; + } - err = alloc_pdir(as); - if (!err) - goto found; + of_node_put(args.np); - if (err != -EAGAIN) - break; + err = tegra_smmu_as_prepare(smmu, as); + if (err < 0) + return err; + + tegra_smmu_enable(smmu, swgroup, as->id); + index++; } - if (i == smmu->num_as) - dev_err(smmu->dev, "no free AS\n"); - return err; -found: - spin_lock_irqsave(&smmu->lock, flags); + if (index == 0) + return -ENODEV; - /* Update PDIR register */ - smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); - smmu_write(smmu, - SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA); - FLUSH_SMMU_REGS(smmu); + return 0; +} - spin_unlock_irqrestore(&smmu->lock, flags); +static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct tegra_smmu_as *as = domain->priv; + struct device_node *np = dev->of_node; + struct tegra_smmu *smmu = as->smmu; + struct of_phandle_args args; + unsigned int index = 0; - domain->priv = as; + while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, + &args)) { + unsigned int swgroup = args.args[0]; - domain->geometry.aperture_start = smmu->iovmm_base; - domain->geometry.aperture_end = smmu->iovmm_base + - smmu->page_count * SMMU_PAGE_SIZE - 1; - domain->geometry.force_aperture = true; + if (args.np != smmu->dev->of_node) { + of_node_put(args.np); + continue; + } - dev_dbg(smmu->dev, "smmu_as@%p\n", as); + of_node_put(args.np); - return 0; + tegra_smmu_disable(smmu, swgroup, as->id); + tegra_smmu_as_unprepare(smmu, as); + index++; + } } -static void smmu_iommu_domain_destroy(struct iommu_domain *domain) +static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, + struct page **pagep) { - struct smmu_as *as = domain->priv; - struct smmu_device *smmu = as->smmu; - unsigned long flags; + u32 *pd = page_address(as->pd), *pt, *count; + u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; + u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff; + struct tegra_smmu *smmu = as->smmu; + struct page *page; + unsigned int i; + + if (pd[pde] == 0) { + page = alloc_page(GFP_KERNEL | __GFP_DMA); + if (!page) + return NULL; - spin_lock_irqsave(&as->lock, flags); + pt = page_address(page); + SetPageReserved(page); - if (as->pdir_page) { - spin_lock(&smmu->lock); - smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID); - smmu_write(smmu, SMMU_PTB_DATA_RESET_VAL, SMMU_PTB_DATA); - FLUSH_SMMU_REGS(smmu); - spin_unlock(&smmu->lock); + for (i = 0; i < SMMU_NUM_PTE; i++) + pt[i] = 0; - free_pdir(as); - } + smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT); - if (!list_empty(&as->client)) { - struct smmu_client *c; + pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT); - list_for_each_entry(c, &as->client, list) - smmu_iommu_detach_dev(domain, c->dev); + smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4); + smmu_flush_ptc(smmu, as->pd, pde << 2); + smmu_flush_tlb_section(smmu, as->id, iova); + smmu_flush(smmu); + } else { + page = pfn_to_page(pd[pde] & SMMU_PFN_MASK); + pt = page_address(page); } - spin_unlock_irqrestore(&as->lock, flags); + *pagep = page; - domain->priv = NULL; - dev_dbg(smmu->dev, "smmu_as@%p\n", as); -} + /* Keep track of entries in this page table. */ + count = page_address(as->count); + if (pt[pte] == 0) + count[pde]++; -static const struct iommu_ops smmu_iommu_ops = { - .capable = smmu_iommu_capable, - .domain_init = smmu_iommu_domain_init, - .domain_destroy = smmu_iommu_domain_destroy, - .attach_dev = smmu_iommu_attach_dev, - .detach_dev = smmu_iommu_detach_dev, - .map = smmu_iommu_map, - .unmap = smmu_iommu_unmap, - .iova_to_phys = smmu_iommu_iova_to_phys, - .pgsize_bitmap = SMMU_IOMMU_PGSIZES, -}; - -/* Should be in the order of enum */ -static const char * const smmu_debugfs_mc[] = { "mc", }; -static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", }; + return &pt[pte]; +} -static ssize_t smmu_debugfs_stats_write(struct file *file, - const char __user *buffer, - size_t count, loff_t *pos) +static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) { - struct smmu_debugfs_info *info; - struct smmu_device *smmu; - int i; - enum { - _OFF = 0, - _ON, - _RESET, - }; - const char * const command[] = { - [_OFF] = "off", - [_ON] = "on", - [_RESET] = "reset", - }; - char str[] = "reset"; - u32 val; - size_t offs; + u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; + u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff; + u32 *count = page_address(as->count); + u32 *pd = page_address(as->pd), *pt; + struct page *page; - count = min_t(size_t, count, sizeof(str)); - if (copy_from_user(str, buffer, count)) - return -EINVAL; + page = pfn_to_page(pd[pde] & SMMU_PFN_MASK); + pt = page_address(page); - for (i = 0; i < ARRAY_SIZE(command); i++) - if (strncmp(str, command[i], - strlen(command[i])) == 0) - break; + /* + * When no entries in this page table are used anymore, return the + * memory page to the system. + */ + if (pt[pte] != 0) { + if (--count[pde] == 0) { + ClearPageReserved(page); + __free_page(page); + pd[pde] = 0; + } - if (i == ARRAY_SIZE(command)) - return -EINVAL; - - info = file_inode(file)->i_private; - smmu = info->smmu; - - offs = SMMU_CACHE_CONFIG(info->cache); - val = smmu_read(smmu, offs); - switch (i) { - case _OFF: - val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE; - val &= ~SMMU_CACHE_CONFIG_STATS_TEST; - smmu_write(smmu, val, offs); - break; - case _ON: - val |= SMMU_CACHE_CONFIG_STATS_ENABLE; - val &= ~SMMU_CACHE_CONFIG_STATS_TEST; - smmu_write(smmu, val, offs); - break; - case _RESET: - val |= SMMU_CACHE_CONFIG_STATS_TEST; - smmu_write(smmu, val, offs); - val &= ~SMMU_CACHE_CONFIG_STATS_TEST; - smmu_write(smmu, val, offs); - break; - default: - BUG(); - break; + pt[pte] = 0; } - - dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__, - val, smmu_read(smmu, offs), offs); - - return count; } -static int smmu_debugfs_stats_show(struct seq_file *s, void *v) +static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) { - struct smmu_debugfs_info *info = s->private; - struct smmu_device *smmu = info->smmu; - int i; - const char * const stats[] = { "hit", "miss", }; + struct tegra_smmu_as *as = domain->priv; + struct tegra_smmu *smmu = as->smmu; + unsigned long offset; + struct page *page; + u32 *pte; + pte = as_get_pte(as, iova, &page); + if (!pte) + return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(stats); i++) { - u32 val; - size_t offs; + *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR; + offset = offset_in_page(pte); - offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i); - val = smmu_read(smmu, offs); - seq_printf(s, "%s:%08x ", stats[i], val); + smmu->soc->ops->flush_dcache(page, offset, 4); + smmu_flush_ptc(smmu, page, offset); + smmu_flush_tlb_group(smmu, as->id, iova); + smmu_flush(smmu); - dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__, - stats[i], val, offs); - } - seq_printf(s, "\n"); return 0; } -static int smmu_debugfs_stats_open(struct inode *inode, struct file *file) +static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size) { - return single_open(file, smmu_debugfs_stats_show, inode->i_private); -} + struct tegra_smmu_as *as = domain->priv; + struct tegra_smmu *smmu = as->smmu; + unsigned long offset; + struct page *page; + u32 *pte; -static const struct file_operations smmu_debugfs_stats_fops = { - .open = smmu_debugfs_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = smmu_debugfs_stats_write, -}; + pte = as_get_pte(as, iova, &page); + if (!pte) + return 0; -static void smmu_debugfs_delete(struct smmu_device *smmu) -{ - debugfs_remove_recursive(smmu->debugfs_root); - kfree(smmu->debugfs_info); + offset = offset_in_page(pte); + as_put_pte(as, iova); + + smmu->soc->ops->flush_dcache(page, offset, 4); + smmu_flush_ptc(smmu, page, offset); + smmu_flush_tlb_group(smmu, as->id, iova); + smmu_flush(smmu); + + return size; } -static void smmu_debugfs_create(struct smmu_device *smmu) +static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) { - int i; - size_t bytes; - struct dentry *root; - - bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) * - sizeof(*smmu->debugfs_info); - smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL); - if (!smmu->debugfs_info) - return; - - root = debugfs_create_dir(dev_name(smmu->dev), NULL); - if (!root) - goto err_out; - smmu->debugfs_root = root; - - for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) { - int j; - struct dentry *mc; - - mc = debugfs_create_dir(smmu_debugfs_mc[i], root); - if (!mc) - goto err_out; - - for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) { - struct dentry *cache; - struct smmu_debugfs_info *info; - - info = smmu->debugfs_info; - info += i * ARRAY_SIZE(smmu_debugfs_mc) + j; - info->smmu = smmu; - info->mc = i; - info->cache = j; - - cache = debugfs_create_file(smmu_debugfs_cache[j], - S_IWUGO | S_IRUGO, mc, - (void *)info, - &smmu_debugfs_stats_fops); - if (!cache) - goto err_out; - } - } + struct tegra_smmu_as *as = domain->priv; + struct page *page; + unsigned long pfn; + u32 *pte; - return; + pte = as_get_pte(as, iova, &page); + pfn = *pte & SMMU_PFN_MASK; -err_out: - smmu_debugfs_delete(smmu); + return PFN_PHYS(pfn); } -static int tegra_smmu_suspend(struct device *dev) +static struct tegra_smmu *tegra_smmu_find(struct device_node *np) { - struct smmu_device *smmu = dev_get_drvdata(dev); + struct platform_device *pdev; + struct tegra_mc *mc; - smmu->translation_enable_0 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_0); - smmu->translation_enable_1 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_1); - smmu->translation_enable_2 = smmu_read(smmu, SMMU_TRANSLATION_ENABLE_2); - smmu->asid_security = smmu_read(smmu, SMMU_ASID_SECURITY); - return 0; + pdev = of_find_device_by_node(np); + if (!pdev) + return NULL; + + mc = platform_get_drvdata(pdev); + if (!mc) + return NULL; + + return mc->smmu; } -static int tegra_smmu_resume(struct device *dev) +static int tegra_smmu_add_device(struct device *dev) { - struct smmu_device *smmu = dev_get_drvdata(dev); - unsigned long flags; - int err; + struct device_node *np = dev->of_node; + struct of_phandle_args args; + unsigned int index = 0; - spin_lock_irqsave(&smmu->lock, flags); - err = smmu_setup_regs(smmu); - spin_unlock_irqrestore(&smmu->lock, flags); - return err; + while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, + &args) == 0) { + struct tegra_smmu *smmu; + + smmu = tegra_smmu_find(args.np); + if (smmu) { + /* + * Only a single IOMMU master interface is currently + * supported by the Linux kernel, so abort after the + * first match. + */ + dev->archdata.iommu = smmu; + break; + } + + index++; + } + + return 0; } -static int tegra_smmu_probe(struct platform_device *pdev) +static void tegra_smmu_remove_device(struct device *dev) { - struct smmu_device *smmu; - struct device *dev = &pdev->dev; - int i, asids, err = 0; - dma_addr_t uninitialized_var(base); - size_t bytes, uninitialized_var(size); + dev->archdata.iommu = NULL; +} - if (smmu_handle) - return -EIO; +static const struct iommu_ops tegra_smmu_ops = { + .capable = tegra_smmu_capable, + .domain_init = tegra_smmu_domain_init, + .domain_destroy = tegra_smmu_domain_destroy, + .attach_dev = tegra_smmu_attach_dev, + .detach_dev = tegra_smmu_detach_dev, + .add_device = tegra_smmu_add_device, + .remove_device = tegra_smmu_remove_device, + .map = tegra_smmu_map, + .unmap = tegra_smmu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = tegra_smmu_iova_to_phys, - BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT); + .pgsize_bitmap = SZ_4K, +}; - if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids)) - return -ENODEV; +static void tegra_smmu_ahb_enable(void) +{ + static const struct of_device_id ahb_match[] = { + { .compatible = "nvidia,tegra30-ahb", }, + { } + }; + struct device_node *ahb; - bytes = sizeof(*smmu) + asids * sizeof(*smmu->as); - smmu = devm_kzalloc(dev, bytes, GFP_KERNEL); - if (!smmu) { - dev_err(dev, "failed to allocate smmu_device\n"); - return -ENOMEM; + ahb = of_find_matching_node(NULL, ahb_match); + if (ahb) { + tegra_ahb_enable_smmu(ahb); + of_node_put(ahb); } +} - smmu->nregs = pdev->num_resources; - smmu->regs = devm_kzalloc(dev, 2 * smmu->nregs * sizeof(*smmu->regs), - GFP_KERNEL); - smmu->rege = smmu->regs + smmu->nregs; - if (!smmu->regs) - return -ENOMEM; - for (i = 0; i < smmu->nregs; i++) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, i); - smmu->regs[i] = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(smmu->regs[i])) - return PTR_ERR(smmu->regs[i]); - smmu->rege[i] = smmu->regs[i] + resource_size(res) - 1; - } - /* Same as "mc" 1st regiter block start address */ - smmu->regbase = (void __iomem *)((u32)smmu->regs[0] & PAGE_MASK); +struct tegra_smmu *tegra_smmu_probe(struct device *dev, + const struct tegra_smmu_soc *soc, + struct tegra_mc *mc) +{ + struct tegra_smmu *smmu; + size_t size; + u32 value; + int err; - err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size); - if (err) - return -ENODEV; + /* This can happen on Tegra20 which doesn't have an SMMU */ + if (!soc) + return NULL; - if (size & SMMU_PAGE_MASK) - return -EINVAL; + smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); + if (!smmu) + return ERR_PTR(-ENOMEM); - size >>= SMMU_PAGE_SHIFT; - if (!size) - return -EINVAL; + /* + * This is a bit of a hack. Ideally we'd want to simply return this + * value. However the IOMMU registration process will attempt to add + * all devices to the IOMMU when bus_set_iommu() is called. In order + * not to rely on global variables to track the IOMMU instance, we + * set it here so that it can be looked up from the .add_device() + * callback via the IOMMU device's .drvdata field. + */ + mc->smmu = smmu; - smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0); - if (!smmu->ahb) - return -ENODEV; + size = BITS_TO_LONGS(soc->num_asids) * sizeof(long); - smmu->dev = dev; - smmu->num_as = asids; - smmu->iovmm_base = base; - smmu->page_count = size; - - smmu->translation_enable_0 = ~0; - smmu->translation_enable_1 = ~0; - smmu->translation_enable_2 = ~0; - smmu->asid_security = 0; - - for (i = 0; i < smmu->num_as; i++) { - struct smmu_as *as = &smmu->as[i]; - - as->smmu = smmu; - as->asid = i; - as->pdir_attr = _PDIR_ATTR; - as->pde_attr = _PDE_ATTR; - as->pte_attr = _PTE_ATTR; - - spin_lock_init(&as->lock); - spin_lock_init(&as->client_lock); - INIT_LIST_HEAD(&as->client); - } - spin_lock_init(&smmu->lock); - err = smmu_setup_regs(smmu); - if (err) - return err; - platform_set_drvdata(pdev, smmu); + smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL); + if (!smmu->asids) + return ERR_PTR(-ENOMEM); - smmu->avp_vector_page = alloc_page(GFP_KERNEL); - if (!smmu->avp_vector_page) - return -ENOMEM; + mutex_init(&smmu->lock); - smmu_debugfs_create(smmu); - smmu_handle = smmu; - bus_set_iommu(&platform_bus_type, &smmu_iommu_ops); - return 0; -} + smmu->regs = mc->regs; + smmu->soc = soc; + smmu->dev = dev; + smmu->mc = mc; -static int tegra_smmu_remove(struct platform_device *pdev) -{ - struct smmu_device *smmu = platform_get_drvdata(pdev); - int i; + value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); - smmu_debugfs_delete(smmu); + if (soc->supports_request_limit) + value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); - smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG); - for (i = 0; i < smmu->num_as; i++) - free_pdir(&smmu->as[i]); - __free_page(smmu->avp_vector_page); - smmu_handle = NULL; - return 0; -} + smmu_writel(smmu, value, SMMU_PTC_CONFIG); -static const struct dev_pm_ops tegra_smmu_pm_ops = { - .suspend = tegra_smmu_suspend, - .resume = tegra_smmu_resume, -}; + value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | + SMMU_TLB_CONFIG_ACTIVE_LINES(0x20); -static const struct of_device_id tegra_smmu_of_match[] = { - { .compatible = "nvidia,tegra30-smmu", }, - { }, -}; -MODULE_DEVICE_TABLE(of, tegra_smmu_of_match); - -static struct platform_driver tegra_smmu_driver = { - .probe = tegra_smmu_probe, - .remove = tegra_smmu_remove, - .driver = { - .owner = THIS_MODULE, - .name = "tegra-smmu", - .pm = &tegra_smmu_pm_ops, - .of_match_table = tegra_smmu_of_match, - }, -}; + if (soc->supports_round_robin_arbitration) + value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; -static int tegra_smmu_init(void) -{ - return platform_driver_register(&tegra_smmu_driver); -} + smmu_writel(smmu, value, SMMU_TLB_CONFIG); -static void __exit tegra_smmu_exit(void) -{ - platform_driver_unregister(&tegra_smmu_driver); -} + smmu_flush_ptc(smmu, NULL, 0); + smmu_flush_tlb(smmu); + smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); + smmu_flush(smmu); + + tegra_smmu_ahb_enable(); -subsys_initcall(tegra_smmu_init); -module_exit(tegra_smmu_exit); + err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); + if (err < 0) + return ERR_PTR(err); -MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30"); -MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>"); -MODULE_ALIAS("platform:tegra-smmu"); -MODULE_LICENSE("GPL v2"); + return smmu; +} |