summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c26
-rw-r--r--drivers/iommu/amd_iommu_init.c45
-rw-r--r--drivers/iommu/arm-smmu-v3.c69
-rw-r--r--drivers/iommu/arm-smmu.c19
-rw-r--r--drivers/iommu/dma-iommu.c471
-rw-r--r--drivers/iommu/exynos-iommu.c5
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c137
-rw-r--r--drivers/iommu/intel-iommu.c940
-rw-r--r--drivers/iommu/intel-pasid.c19
-rw-r--r--drivers/iommu/intel-pasid.h26
-rw-r--r--drivers/iommu/intel-svm.c15
-rw-r--r--drivers/iommu/intel_irq_remapping.c4
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c30
-rw-r--r--drivers/iommu/io-pgtable-arm.c53
-rw-r--r--drivers/iommu/io-pgtable.c13
-rw-r--r--drivers/iommu/iommu-sysfs.c5
-rw-r--r--drivers/iommu/iommu.c300
-rw-r--r--drivers/iommu/ipmmu-vmsa.c186
-rw-r--r--drivers/iommu/omap-iommu-debug.c40
-rw-r--r--drivers/iommu/omap-iommu.c8
-rw-r--r--drivers/iommu/omap-iommu.h5
-rw-r--r--drivers/iommu/omap-iopgtable.h5
-rw-r--r--drivers/iommu/qcom_iommu.c13
-rw-r--r--drivers/iommu/rockchip-iommu.c5
-rw-r--r--drivers/iommu/tegra-smmu.c5
25 files changed, 1507 insertions, 937 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index dce1d8d2e8a4..73740b969e62 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -619,9 +619,9 @@ retry:
pasid = ((event[0] >> 16) & 0xFFFF)
| ((event[1] << 6) & 0xF0000);
tag = event[1] & 0x03FF;
- dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n",
+ dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
- pasid, address, flags);
+ pasid, address, flags, tag);
break;
default:
dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
@@ -1295,6 +1295,16 @@ static void domain_flush_complete(struct protection_domain *domain)
}
}
+/* Flush the not present cache if it exists */
+static void domain_flush_np_cache(struct protection_domain *domain,
+ dma_addr_t iova, size_t size)
+{
+ if (unlikely(amd_iommu_np_cache)) {
+ domain_flush_pages(domain, iova, size);
+ domain_flush_complete(domain);
+ }
+}
+
/*
* This function flushes the DTEs for all devices in domain
@@ -2377,10 +2387,7 @@ static dma_addr_t __map_single(struct device *dev,
}
address += offset;
- if (unlikely(amd_iommu_np_cache)) {
- domain_flush_pages(&dma_dom->domain, address, size);
- domain_flush_complete(&dma_dom->domain);
- }
+ domain_flush_np_cache(&dma_dom->domain, address, size);
out:
return address;
@@ -2559,6 +2566,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
s->dma_length = s->length;
}
+ if (s)
+ domain_flush_np_cache(domain, s->dma_address, s->dma_length);
+
return nelems;
out_unmap:
@@ -2597,7 +2607,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
struct protection_domain *domain;
struct dma_ops_domain *dma_dom;
unsigned long startaddr;
- int npages = 2;
+ int npages;
domain = get_domain(dev);
if (IS_ERR(domain))
@@ -3039,6 +3049,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
mutex_unlock(&domain->api_lock);
+ domain_flush_np_cache(domain, iova, page_size);
+
return ret;
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 07d84dbab564..eb104c719629 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -406,6 +406,9 @@ static void iommu_enable(struct amd_iommu *iommu)
static void iommu_disable(struct amd_iommu *iommu)
{
+ if (!iommu->mmio_base)
+ return;
+
/* Disable command buffer */
iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
@@ -2325,15 +2328,6 @@ static void __init free_iommu_resources(void)
amd_iommu_dev_table = NULL;
free_iommu_all();
-
-#ifdef CONFIG_GART_IOMMU
- /*
- * We failed to initialize the AMD IOMMU - try fallback to GART
- * if possible.
- */
- gart_iommu_init();
-
-#endif
}
/* SB IOAPIC is always on this device in AMD systems */
@@ -2625,8 +2619,6 @@ static int __init state_next(void)
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
pr_info("AMD IOMMU disabled on kernel command-line\n");
- free_dma_resources();
- free_iommu_resources();
init_state = IOMMU_CMDLINE_DISABLED;
ret = -EINVAL;
}
@@ -2667,6 +2659,19 @@ static int __init state_next(void)
BUG();
}
+ if (ret) {
+ free_dma_resources();
+ if (!irq_remapping_enabled) {
+ disable_iommus();
+ free_iommu_resources();
+ } else {
+ struct amd_iommu *iommu;
+
+ uninit_device_table_dma();
+ for_each_iommu(iommu)
+ iommu_flush_all_caches(iommu);
+ }
+ }
return ret;
}
@@ -2740,17 +2745,15 @@ static int __init amd_iommu_init(void)
int ret;
ret = iommu_go_to_state(IOMMU_INITIALIZED);
- if (ret) {
- free_dma_resources();
- if (!irq_remapping_enabled) {
- disable_iommus();
- free_iommu_resources();
- } else {
- uninit_device_table_dma();
- for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
- }
+#ifdef CONFIG_GART_IOMMU
+ if (ret && list_empty(&amd_iommu_list)) {
+ /*
+ * We failed to initialize the AMD IOMMU - try fallback
+ * to GART if possible.
+ */
+ gart_iommu_init();
}
+#endif
for_each_iommu(iommu)
amd_iommu_debugfs_setup(iommu);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 4d5a694f02c2..2d96cf0023dd 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -192,6 +192,13 @@
#define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
#define Q_BASE_LOG2SIZE GENMASK(4, 0)
+/* Ensure DMA allocations are naturally aligned */
+#ifdef CONFIG_CMA_ALIGNMENT
+#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
+#else
+#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
+#endif
+
/*
* Stream table.
*
@@ -289,8 +296,9 @@
FIELD_GET(ARM64_TCR_##fld, tcr))
/* Command queue */
-#define CMDQ_ENT_DWORDS 2
-#define CMDQ_MAX_SZ_SHIFT 8
+#define CMDQ_ENT_SZ_SHIFT 4
+#define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
+#define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
#define CMDQ_CONS_ERR GENMASK(30, 24)
#define CMDQ_ERR_CERROR_NONE_IDX 0
@@ -336,14 +344,16 @@
#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
/* Event queue */
-#define EVTQ_ENT_DWORDS 4
-#define EVTQ_MAX_SZ_SHIFT 7
+#define EVTQ_ENT_SZ_SHIFT 5
+#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
+#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
#define EVTQ_0_ID GENMASK_ULL(7, 0)
/* PRI queue */
-#define PRIQ_ENT_DWORDS 2
-#define PRIQ_MAX_SZ_SHIFT 8
+#define PRIQ_ENT_SZ_SHIFT 4
+#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
+#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
#define PRIQ_0_SID GENMASK_ULL(31, 0)
#define PRIQ_0_SSID GENMASK_ULL(51, 32)
@@ -798,7 +808,7 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
/* High-level queue accessors */
static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
{
- memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
+ memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT);
cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
switch (ent->opcode) {
@@ -1785,13 +1795,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
+ .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
.tlb = &arm_smmu_gather_ops,
.iommu_dev = smmu->dev,
};
- if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
-
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
@@ -1884,9 +1892,13 @@ static int arm_smmu_enable_ats(struct arm_smmu_master *master)
static void arm_smmu_disable_ats(struct arm_smmu_master *master)
{
+ struct arm_smmu_cmdq_ent cmd;
+
if (!master->ats_enabled || !dev_is_pci(master->dev))
return;
+ arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
+ arm_smmu_atc_inv_master(master, &cmd);
pci_disable_ats(to_pci_dev(master->dev));
master->ats_enabled = false;
}
@@ -1906,7 +1918,6 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
master->domain = NULL;
arm_smmu_install_ste_for_dev(master);
- /* Disabling ATS invalidates all ATC entries */
arm_smmu_disable_ats(master);
}
@@ -2270,17 +2281,32 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
struct arm_smmu_queue *q,
unsigned long prod_off,
unsigned long cons_off,
- size_t dwords)
+ size_t dwords, const char *name)
{
- size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
+ size_t qsz;
+
+ do {
+ qsz = ((1 << q->max_n_shift) * dwords) << 3;
+ q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
+ GFP_KERNEL);
+ if (q->base || qsz < PAGE_SIZE)
+ break;
+
+ q->max_n_shift--;
+ } while (1);
- q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
if (!q->base) {
- dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
- qsz);
+ dev_err(smmu->dev,
+ "failed to allocate queue (0x%zx bytes) for %s\n",
+ qsz, name);
return -ENOMEM;
}
+ if (!WARN_ON(q->base_dma & (qsz - 1))) {
+ dev_info(smmu->dev, "allocated %u entries for %s\n",
+ 1 << q->max_n_shift, name);
+ }
+
q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
q->ent_dwords = dwords;
@@ -2300,13 +2326,15 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
/* cmdq */
spin_lock_init(&smmu->cmdq.lock);
ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
- ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
+ ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
+ "cmdq");
if (ret)
return ret;
/* evtq */
ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
- ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
+ ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
+ "evtq");
if (ret)
return ret;
@@ -2315,7 +2343,8 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
return 0;
return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
- ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
+ ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS,
+ "priq");
}
static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
@@ -2879,7 +2908,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
return -ENXIO;
}
- /* Queue sizes, capped at 4k */
+ /* Queue sizes, capped to ensure natural alignment */
smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
FIELD_GET(IDR1_CMDQS, reg));
if (!smmu->cmdq.q.max_n_shift) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 5aeb1dbfaa08..653b6b3dcafb 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -47,6 +47,15 @@
#include "arm-smmu-regs.h"
+/*
+ * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
+ * global register space are still, in fact, using a hypervisor to mediate it
+ * by trapping and emulating register accesses. Sadly, some deployed versions
+ * of said trapping code have bugs wherein they go horribly wrong for stores
+ * using r31 (i.e. XZR/WZR) as the source register.
+ */
+#define QCOM_DUMMY_VAL -1
+
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
@@ -411,7 +420,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
{
unsigned int spin_cnt, delay;
- writel_relaxed(0, sync);
+ writel_relaxed(QCOM_DUMMY_VAL, sync);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
@@ -883,13 +892,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
+ .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
.tlb = smmu_domain->tlb_ops,
.iommu_dev = smmu->dev,
};
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
-
if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
@@ -1751,8 +1758,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
}
/* Invalidate the TLB, just in case */
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+ writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
+ writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 129c4badf9ae..f802255219d3 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* A fairly generic DMA-API to IOMMU-API glue layer.
*
@@ -5,23 +6,13 @@
*
* based in part on arch/arm/mm/dma-mapping.c:
* Copyright (C) 2000-2004 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi_iort.h>
#include <linux/device.h>
+#include <linux/dma-contiguous.h>
#include <linux/dma-iommu.h>
+#include <linux/dma-noncoherent.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
@@ -78,11 +69,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
return cookie;
}
-int iommu_dma_init(void)
-{
- return iova_cache_get();
-}
-
/**
* iommu_get_dma_cookie - Acquire DMA-API resources for a domain
* @domain: IOMMU domain to prepare for DMA-API usage
@@ -240,8 +226,8 @@ resv_iova:
start = window->res->end - window->offset + 1;
/* If window is last entry */
if (window->node.next == &bridge->dma_ranges &&
- end != ~(dma_addr_t)0) {
- end = ~(dma_addr_t)0;
+ end != ~(phys_addr_t)0) {
+ end = ~(phys_addr_t)0;
goto resv_iova;
}
}
@@ -313,7 +299,7 @@ static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
* any change which could make prior IOVAs invalid will fail.
*/
-int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
+static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -364,7 +350,6 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
return iova_reserve_iommu_regions(dev, domain);
}
-EXPORT_SYMBOL(iommu_dma_init_domain);
/**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
@@ -375,7 +360,7 @@ EXPORT_SYMBOL(iommu_dma_init_domain);
*
* Return: corresponding IOMMU API page protection flags
*/
-int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
+static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{
int prot = coherent ? IOMMU_CACHE : 0;
@@ -452,9 +437,10 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
size >> iova_shift(iovad));
}
-static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
+static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
size_t size)
{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, dma_addr);
@@ -468,6 +454,30 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
iommu_dma_free_iova(cookie, dma_addr, size);
}
+static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
+ size_t size, int prot)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ size_t iova_off = 0;
+ dma_addr_t iova;
+
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
+ iova_off = iova_offset(&cookie->iovad, phys);
+ size = iova_align(&cookie->iovad, size + iova_off);
+ }
+
+ iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ if (!iova)
+ return DMA_MAPPING_ERROR;
+
+ if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+ iommu_dma_free_iova(cookie, iova, size);
+ return DMA_MAPPING_ERROR;
+ }
+ return iova + iova_off;
+}
+
static void __iommu_dma_free_pages(struct page **pages, int count)
{
while (count--)
@@ -533,55 +543,45 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
return pages;
}
-/**
- * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
- * @dev: Device which owns this buffer
- * @pages: Array of buffer pages as returned by iommu_dma_alloc()
- * @size: Size of buffer in bytes
- * @handle: DMA address of buffer
- *
- * Frees both the pages associated with the buffer, and the array
- * describing them
- */
-void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
- dma_addr_t *handle)
+static struct page **__iommu_dma_get_pages(void *cpu_addr)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
- __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
- *handle = DMA_MAPPING_ERROR;
+ struct vm_struct *area = find_vm_area(cpu_addr);
+
+ if (!area || !area->pages)
+ return NULL;
+ return area->pages;
}
/**
- * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
+ * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
* @dev: Device to allocate memory for. Must be a real device
* attached to an iommu_dma_domain
* @size: Size of buffer in bytes
+ * @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
* @attrs: DMA attributes for this allocation
- * @prot: IOMMU mapping flags
- * @handle: Out argument for allocated DMA handle
- * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
- * given VA/PA are visible to the given non-coherent device.
*
* If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
* but an IOMMU which supports smaller pages might not map the whole thing.
*
- * Return: Array of struct page pointers describing the buffer,
- * or NULL on failure.
+ * Return: Mapped virtual address, or NULL on failure.
*/
-struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
- unsigned long attrs, int prot, dma_addr_t *handle,
- void (*flush_page)(struct device *, const void *, phys_addr_t))
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
+ bool coherent = dev_is_dma_coherent(dev);
+ int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+ pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
dma_addr_t iova;
- unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+ void *vaddr;
- *handle = DMA_MAPPING_ERROR;
+ *dma_handle = DMA_MAPPING_ERROR;
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
@@ -607,26 +607,29 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
- if (!(prot & IOMMU_CACHE)) {
- struct sg_mapping_iter miter;
- /*
- * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
- * sufficient here, so skip it by using the "wrong" direction.
- */
- sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
- while (sg_miter_next(&miter))
- flush_page(dev, miter.addr, page_to_phys(miter.page));
- sg_miter_stop(&miter);
+ if (!(ioprot & IOMMU_CACHE)) {
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
+ arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
+ if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size)
goto out_free_sg;
- *handle = iova;
+ vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+ __builtin_return_address(0));
+ if (!vaddr)
+ goto out_unmap;
+
+ *dma_handle = iova;
sg_free_table(&sgt);
- return pages;
+ return vaddr;
+out_unmap:
+ __iommu_dma_unmap(dev, iova, size);
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
@@ -637,54 +640,94 @@ out_free_pages:
}
/**
- * iommu_dma_mmap - Map a buffer into provided user VMA
- * @pages: Array representing buffer from iommu_dma_alloc()
+ * __iommu_dma_mmap - Map a buffer into provided user VMA
+ * @pages: Array representing buffer from __iommu_dma_alloc()
* @size: Size of buffer in bytes
* @vma: VMA describing requested userspace mapping
*
* Maps the pages of the buffer in @pages into @vma. The caller is responsible
* for verifying the correct size and protection of @vma beforehand.
*/
-
-int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
+static int __iommu_dma_mmap(struct page **pages, size_t size,
+ struct vm_area_struct *vma)
{
return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
}
-static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot, struct iommu_domain *domain)
+static void iommu_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- size_t iova_off = 0;
- dma_addr_t iova;
+ phys_addr_t phys;
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
- iova_off = iova_offset(&cookie->iovad, phys);
- size = iova_align(&cookie->iovad, size + iova_off);
- }
+ if (dev_is_dma_coherent(dev))
+ return;
- iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
- if (!iova)
- return DMA_MAPPING_ERROR;
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
+ arch_sync_dma_for_cpu(dev, phys, size, dir);
+}
- if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
- iommu_dma_free_iova(cookie, iova, size);
- return DMA_MAPPING_ERROR;
- }
- return iova + iova_off;
+static void iommu_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+{
+ phys_addr_t phys;
+
+ if (dev_is_dma_coherent(dev))
+ return;
+
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
+ arch_sync_dma_for_device(dev, phys, size, dir);
+}
+
+static void iommu_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (dev_is_dma_coherent(dev))
+ return;
+
+ for_each_sg(sgl, sg, nelems, i)
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+}
+
+static void iommu_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (dev_is_dma_coherent(dev))
+ return;
+
+ for_each_sg(sgl, sg, nelems, i)
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
}
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, int prot)
+static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
- return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
- iommu_get_dma_domain(dev));
+ phys_addr_t phys = page_to_phys(page) + offset;
+ bool coherent = dev_is_dma_coherent(dev);
+ int prot = dma_info_to_prot(dir, coherent, attrs);
+ dma_addr_t dma_handle;
+
+ dma_handle =__iommu_dma_map(dev, phys, size, prot);
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ dma_handle != DMA_MAPPING_ERROR)
+ arch_sync_dma_for_device(dev, phys, size, dir);
+ return dma_handle;
}
-void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
+static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
+ __iommu_dma_unmap(dev, dma_handle, size);
}
/*
@@ -769,18 +812,22 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
* impedance-matching, to be able to hand off a suitably-aligned list,
* but still preserve the original offsets and sizes for the caller.
*/
-int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, int prot)
+static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
+ int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
dma_addr_t iova;
size_t iova_len = 0;
unsigned long mask = dma_get_seg_boundary(dev);
int i;
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
+
/*
* Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever
@@ -840,12 +887,16 @@ out_restore_sg:
return 0;
}
-void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, unsigned long attrs)
+static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t start, end;
struct scatterlist *tmp;
int i;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
@@ -857,21 +908,231 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
sg = tmp;
}
end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
+ __iommu_dma_unmap(dev, start, end - start);
}
-dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
- iommu_get_dma_domain(dev));
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
}
-void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
+ __iommu_dma_unmap(dev, handle, size);
+}
+
+static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
+{
+ size_t alloc_size = PAGE_ALIGN(size);
+ int count = alloc_size >> PAGE_SHIFT;
+ struct page *page = NULL, **pages = NULL;
+
+ /* Non-coherent atomic allocation? Easy */
+ if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+ dma_free_from_pool(cpu_addr, alloc_size))
+ return;
+
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ /*
+ * If it the address is remapped, then it's either non-coherent
+ * or highmem CMA, or an iommu_dma_alloc_remap() construction.
+ */
+ pages = __iommu_dma_get_pages(cpu_addr);
+ if (!pages)
+ page = vmalloc_to_page(cpu_addr);
+ dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
+ } else {
+ /* Lowmem means a coherent atomic or CMA allocation */
+ page = virt_to_page(cpu_addr);
+ }
+
+ if (pages)
+ __iommu_dma_free_pages(pages, count);
+ if (page && !dma_release_from_contiguous(dev, page, count))
+ __free_pages(page, get_order(alloc_size));
+}
+
+static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs)
+{
+ __iommu_dma_unmap(dev, handle, size);
+ __iommu_dma_free(dev, size, cpu_addr);
+}
+
+static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
+ struct page **pagep, gfp_t gfp, unsigned long attrs)
+{
+ bool coherent = dev_is_dma_coherent(dev);
+ size_t alloc_size = PAGE_ALIGN(size);
+ struct page *page = NULL;
+ void *cpu_addr;
+
+ if (gfpflags_allow_blocking(gfp))
+ page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
+ get_order(alloc_size),
+ gfp & __GFP_NOWARN);
+ if (!page)
+ page = alloc_pages(gfp, get_order(alloc_size));
+ if (!page)
+ return NULL;
+
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
+ pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+
+ cpu_addr = dma_common_contiguous_remap(page, alloc_size,
+ VM_USERMAP, prot, __builtin_return_address(0));
+ if (!cpu_addr)
+ goto out_free_pages;
+
+ if (!coherent)
+ arch_dma_prep_coherent(page, size);
+ } else {
+ cpu_addr = page_address(page);
+ }
+
+ *pagep = page;
+ memset(cpu_addr, 0, alloc_size);
+ return cpu_addr;
+out_free_pages:
+ if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
+ __free_pages(page, get_order(alloc_size));
+ return NULL;
+}
+
+static void *iommu_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+{
+ bool coherent = dev_is_dma_coherent(dev);
+ int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+ struct page *page = NULL;
+ void *cpu_addr;
+
+ gfp |= __GFP_ZERO;
+
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
+ !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
+ return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
+
+ if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+ !gfpflags_allow_blocking(gfp) && !coherent)
+ cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ else
+ cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
+ if (!cpu_addr)
+ return NULL;
+
+ *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
+ if (*handle == DMA_MAPPING_ERROR) {
+ __iommu_dma_free(dev, size, cpu_addr);
+ return NULL;
+ }
+
+ return cpu_addr;
+}
+
+static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn, off = vma->vm_pgoff;
+ int ret;
+
+ vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
+
+ if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
+ return -ENXIO;
+
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ struct page **pages = __iommu_dma_get_pages(cpu_addr);
+
+ if (pages)
+ return __iommu_dma_mmap(pages, size, vma);
+ pfn = vmalloc_to_pfn(cpu_addr);
+ } else {
+ pfn = page_to_pfn(virt_to_page(cpu_addr));
+ }
+
+ return remap_pfn_range(vma, vma->vm_start, pfn + off,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+ struct page *page;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ struct page **pages = __iommu_dma_get_pages(cpu_addr);
+
+ if (pages) {
+ return sg_alloc_table_from_pages(sgt, pages,
+ PAGE_ALIGN(size) >> PAGE_SHIFT,
+ 0, size, GFP_KERNEL);
+ }
+
+ page = vmalloc_to_page(cpu_addr);
+ } else {
+ page = virt_to_page(cpu_addr);
+ }
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (!ret)
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return ret;
+}
+
+static const struct dma_map_ops iommu_dma_ops = {
+ .alloc = iommu_dma_alloc,
+ .free = iommu_dma_free,
+ .mmap = iommu_dma_mmap,
+ .get_sgtable = iommu_dma_get_sgtable,
+ .map_page = iommu_dma_map_page,
+ .unmap_page = iommu_dma_unmap_page,
+ .map_sg = iommu_dma_map_sg,
+ .unmap_sg = iommu_dma_unmap_sg,
+ .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
+ .sync_single_for_device = iommu_dma_sync_single_for_device,
+ .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = iommu_dma_sync_sg_for_device,
+ .map_resource = iommu_dma_map_resource,
+ .unmap_resource = iommu_dma_unmap_resource,
+};
+
+/*
+ * The IOMMU core code allocates the default DMA domain, which the underlying
+ * IOMMU driver needs to support via the dma-iommu layer.
+ */
+void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+ if (!domain)
+ goto out_err;
+
+ /*
+ * The IOMMU core code allocates the default DMA domain, which the
+ * underlying IOMMU driver needs to support via the dma-iommu layer.
+ */
+ if (domain->type == IOMMU_DOMAIN_DMA) {
+ if (iommu_dma_init_domain(domain, dma_base, size, dev))
+ goto out_err;
+ dev->dma_ops = &iommu_dma_ops;
+ }
+
+ return;
+out_err:
+ pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ dev_name(dev));
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
@@ -892,7 +1153,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
+ iova = __iommu_dma_map(dev, msi_addr, size, prot);
if (iova == DMA_MAPPING_ERROR)
goto out_free_page;
@@ -954,3 +1215,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
msg->address_lo += lower_32_bits(msi_page->iova);
}
+
+static int iommu_dma_init(void)
+{
+ return iova_cache_get();
+}
+arch_initcall(iommu_dma_init);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 05c6bc099d62..b0c1e5f9daae 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 7fabf9b1c2dc..73a552914455 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -14,6 +14,17 @@
#include <asm/irq_remapping.h>
+#include "intel-pasid.h"
+
+struct tbl_walk {
+ u16 bus;
+ u16 devfn;
+ u32 pasid;
+ struct root_entry *rt_entry;
+ struct context_entry *ctx_entry;
+ struct pasid_entry *pasid_tbl_entry;
+};
+
struct iommu_regset {
int offset;
const char *regs;
@@ -131,16 +142,86 @@ out:
}
DEFINE_SHOW_ATTRIBUTE(iommu_regset);
-static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
- int bus)
+static inline void print_tbl_walk(struct seq_file *m)
{
- struct context_entry *context;
- int devfn;
+ struct tbl_walk *tbl_wlk = m->private;
+
+ seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t",
+ tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn),
+ PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi,
+ tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi,
+ tbl_wlk->ctx_entry->lo);
+
+ /*
+ * A legacy mode DMAR doesn't support PASID, hence default it to -1
+ * indicating that it's invalid. Also, default all PASID related fields
+ * to 0.
+ */
+ if (!tbl_wlk->pasid_tbl_entry)
+ seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1,
+ (u64)0, (u64)0, (u64)0);
+ else
+ seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
+ tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0],
+ tbl_wlk->pasid_tbl_entry->val[1],
+ tbl_wlk->pasid_tbl_entry->val[2]);
+}
- seq_printf(m, " Context Table Entries for Bus: %d\n", bus);
- seq_puts(m, " Entry\tB:D.F\tHigh\tLow\n");
+static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
+ u16 dir_idx)
+{
+ struct tbl_walk *tbl_wlk = m->private;
+ u8 tbl_idx;
+
+ for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) {
+ if (pasid_pte_is_present(tbl_entry)) {
+ tbl_wlk->pasid_tbl_entry = tbl_entry;
+ tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx;
+ print_tbl_walk(m);
+ }
+
+ tbl_entry++;
+ }
+}
+
+static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr,
+ u16 pasid_dir_size)
+{
+ struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr);
+ struct pasid_entry *pasid_tbl;
+ u16 dir_idx;
+
+ for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) {
+ pasid_tbl = get_pasid_table_from_pde(dir_entry);
+ if (pasid_tbl)
+ pasid_tbl_walk(m, pasid_tbl, dir_idx);
+
+ dir_entry++;
+ }
+}
+
+static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
+{
+ struct context_entry *context;
+ u16 devfn, pasid_dir_size;
+ u64 pasid_dir_ptr;
for (devfn = 0; devfn < 256; devfn++) {
+ struct tbl_walk tbl_wlk = {0};
+
+ /*
+ * Scalable mode root entry points to upper scalable mode
+ * context table and lower scalable mode context table. Each
+ * scalable mode context table has 128 context entries where as
+ * legacy mode context table has 256 context entries. So in
+ * scalable mode, the context entries for former 128 devices are
+ * in the lower scalable mode context table, while the latter
+ * 128 devices are in the upper scalable mode context table.
+ * In scalable mode, when devfn > 127, iommu_context_addr()
+ * automatically refers to upper scalable mode context table and
+ * hence the caller doesn't have to worry about differences
+ * between scalable mode and non scalable mode.
+ */
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context)
return;
@@ -148,33 +229,41 @@ static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
if (!context_present(context))
continue;
- seq_printf(m, " %-5d\t%02x:%02x.%x\t%-6llx\t%llx\n", devfn,
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- context[0].hi, context[0].lo);
+ tbl_wlk.bus = bus;
+ tbl_wlk.devfn = devfn;
+ tbl_wlk.rt_entry = &iommu->root_entry[bus];
+ tbl_wlk.ctx_entry = context;
+ m->private = &tbl_wlk;
+
+ if (pasid_supported(iommu) && is_pasid_enabled(context)) {
+ pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
+ pasid_dir_size = get_pasid_dir_size(context);
+ pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
+ continue;
+ }
+
+ print_tbl_walk(m);
}
}
-static void root_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu)
+static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
{
unsigned long flags;
- int bus;
+ u16 bus;
spin_lock_irqsave(&iommu->lock, flags);
- seq_printf(m, "IOMMU %s: Root Table Address:%llx\n", iommu->name,
+ seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
(u64)virt_to_phys(iommu->root_entry));
- seq_puts(m, "Root Table Entries:\n");
+ seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
- for (bus = 0; bus < 256; bus++) {
- if (!(iommu->root_entry[bus].lo & 1))
- continue;
+ /*
+ * No need to check if the root entry is present or not because
+ * iommu_context_addr() performs the same check before returning
+ * context entry.
+ */
+ for (bus = 0; bus < 256; bus++)
+ ctx_tbl_walk(m, iommu, bus);
- seq_printf(m, " Bus: %d H: %llx L: %llx\n", bus,
- iommu->root_entry[bus].hi,
- iommu->root_entry[bus].lo);
-
- ctx_tbl_entry_show(m, iommu, bus);
- seq_putc(m, '\n');
- }
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -185,7 +274,7 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
- root_tbl_entry_show(m, iommu);
+ root_tbl_walk(m, iommu);
seq_putc(m, '\n');
}
rcu_read_unlock();
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 162b3236e72c..ac4172c02244 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -294,14 +294,16 @@ static inline void context_clear_entry(struct context_entry *context)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
+/* si_domain contains mulitple devices */
+#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
+
/*
- * Domain represents a virtual machine, more than one devices
- * across iommus may be owned in one domain, e.g. kvm guest.
+ * This is a DMA domain allocated through the iommu domain allocation
+ * interface. But one or more devices belonging to this domain have
+ * been chosen to use a private domain. We should avoid to use the
+ * map/unmap/iova_to_phys APIs on it.
*/
-#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
-
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
+#define DOMAIN_FLAG_LOSE_CHILDREN BIT(1)
#define for_each_domain_iommu(idx, domain) \
for (idx = 0; idx < g_num_of_iommus; idx++) \
@@ -314,7 +316,6 @@ struct dmar_rmrr_unit {
u64 end_address; /* reserved end address */
struct dmar_dev_scope *devices; /* target devices */
int devices_cnt; /* target device count */
- struct iommu_resv_region *resv; /* reserved region handle */
};
struct dmar_atsr_unit {
@@ -342,6 +343,9 @@ static void domain_context_clear(struct intel_iommu *iommu,
struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu);
+static bool device_is_rmrr_locked(struct device *dev);
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
@@ -349,6 +353,7 @@ int dmar_disabled = 0;
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+int intel_iommu_sm;
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
@@ -356,21 +361,17 @@ static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
-static int intel_iommu_sm;
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
-#define pasid_supported(iommu) (sm_supported(iommu) && \
- ecap_pasid((iommu)->ecap))
-
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
+#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
@@ -535,22 +536,11 @@ static inline void free_devinfo_mem(void *vaddr)
kmem_cache_free(iommu_devinfo_cache, vaddr);
}
-static inline int domain_type_is_vm(struct dmar_domain *domain)
-{
- return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
-}
-
static inline int domain_type_is_si(struct dmar_domain *domain)
{
return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
}
-static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
-{
- return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
- DOMAIN_FLAG_STATIC_IDENTITY);
-}
-
static inline int domain_pfn_supported(struct dmar_domain *domain,
unsigned long pfn)
{
@@ -598,7 +588,9 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
int iommu_id;
/* si_domain and vm domain should not get here. */
- BUG_ON(domain_type_is_vm_or_si(domain));
+ if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
+ return NULL;
+
for_each_domain_iommu(iommu_id, domain)
break;
@@ -729,12 +721,39 @@ static int iommu_dummy(struct device *dev)
return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
+/**
+ * is_downstream_to_pci_bridge - test if a device belongs to the PCI
+ * sub-hierarchy of a candidate PCI-PCI bridge
+ * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
+ * @bridge: the candidate PCI-PCI bridge
+ *
+ * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
+ */
+static bool
+is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
+{
+ struct pci_dev *pdev, *pbridge;
+
+ if (!dev_is_pci(dev) || !dev_is_pci(bridge))
+ return false;
+
+ pdev = to_pci_dev(dev);
+ pbridge = to_pci_dev(bridge);
+
+ if (pbridge->subordinate &&
+ pbridge->subordinate->number <= pdev->bus->number &&
+ pbridge->subordinate->busn_res.end >= pdev->bus->number)
+ return true;
+
+ return false;
+}
+
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
struct intel_iommu *iommu;
struct device *tmp;
- struct pci_dev *ptmp, *pdev = NULL;
+ struct pci_dev *pdev = NULL;
u16 segment = 0;
int i;
@@ -780,13 +799,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
goto out;
}
- if (!pdev || !dev_is_pci(tmp))
- continue;
-
- ptmp = to_pci_dev(tmp);
- if (ptmp->subordinate &&
- ptmp->subordinate->number <= pdev->bus->number &&
- ptmp->subordinate->busn_res.end >= pdev->bus->number)
+ if (is_downstream_to_pci_bridge(dev, tmp))
goto got_pdev;
}
@@ -908,7 +921,6 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return pte;
}
-
/* return address's pte at specific level */
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
@@ -1577,7 +1589,6 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-
static int iommu_init_domains(struct intel_iommu *iommu)
{
u32 ndomains, nlongs;
@@ -1615,8 +1626,6 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return -ENOMEM;
}
-
-
/*
* If Caching mode is set, then invalid translations are tagged
* with domain-id 0, hence we need to pre-allocate it. We also
@@ -1646,32 +1655,15 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
if (!iommu->domains || !iommu->domain_ids)
return;
-again:
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
- struct dmar_domain *domain;
-
if (info->iommu != iommu)
continue;
if (!info->dev || !info->domain)
continue;
- domain = info->domain;
-
__dmar_remove_one_dev_info(info);
-
- if (!domain_type_is_vm_or_si(domain)) {
- /*
- * The domain_exit() function can't be called under
- * device_domain_lock, as it takes this lock itself.
- * So release the lock here and re-run the loop
- * afterwards.
- */
- spin_unlock_irqrestore(&device_domain_lock, flags);
- domain_exit(domain);
- goto again;
- }
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -1841,71 +1833,12 @@ static inline int guestwidth_to_adjustwidth(int gaw)
return agaw;
}
-static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
- int guest_width)
-{
- int adjust_width, agaw;
- unsigned long sagaw;
- int err;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-
- err = init_iova_flush_queue(&domain->iovad,
- iommu_flush_iova, iova_entry_free);
- if (err)
- return err;
-
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- if (guest_width > cap_mgaw(iommu->cap))
- guest_width = cap_mgaw(iommu->cap);
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- agaw = width_to_agaw(adjust_width);
- sagaw = cap_sagaw(iommu->cap);
- if (!test_bit(agaw, &sagaw)) {
- /* hardware doesn't support it, choose a bigger one */
- pr_debug("Hardware doesn't support agaw %d\n", agaw);
- agaw = find_next_bit(&sagaw, 5, agaw);
- if (agaw >= 5)
- return -ENODEV;
- }
- domain->agaw = agaw;
-
- if (ecap_coherent(iommu->ecap))
- domain->iommu_coherency = 1;
- else
- domain->iommu_coherency = 0;
-
- if (ecap_sc_support(iommu->ecap))
- domain->iommu_snooping = 1;
- else
- domain->iommu_snooping = 0;
-
- if (intel_iommu_superpage)
- domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
- else
- domain->iommu_superpage = 0;
-
- domain->nid = iommu->node;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static void domain_exit(struct dmar_domain *domain)
{
struct page *freelist;
/* Remove associated devices and clear attached or cached domains */
- rcu_read_lock();
domain_remove_dev_info(domain);
- rcu_read_unlock();
/* destroy iovas */
put_iova_domain(&domain->iovad);
@@ -2336,7 +2269,7 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
{
- int ret;
+ int iommu_id, ret;
struct intel_iommu *iommu;
/* Do the real mapping first */
@@ -2344,18 +2277,8 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
if (ret)
return ret;
- /* Notify about the new mapping */
- if (domain_type_is_vm(domain)) {
- /* VM typed domains can have more than one IOMMUs */
- int iommu_id;
-
- for_each_domain_iommu(iommu_id, domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
- }
- } else {
- /* General domains only have one IOMMU */
- iommu = domain_get_iommu(domain);
+ for_each_domain_iommu(iommu_id, domain) {
+ iommu = g_iommus[iommu_id];
__mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
}
@@ -2435,8 +2358,18 @@ static struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
+ if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
+ struct iommu_domain *domain;
+
+ dev->archdata.iommu = NULL;
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain)
+ intel_iommu_attach_device(domain, dev);
+ }
+
/* No lock here, assumes no domain exit in normal case */
info = dev->archdata.iommu;
+
if (likely(info))
return info->domain;
return NULL;
@@ -2580,6 +2513,31 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
return 0;
}
+static int domain_init(struct dmar_domain *domain, int guest_width)
+{
+ int adjust_width;
+
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+ domain_reserve_special_ranges(domain);
+
+ /* calculate AGAW */
+ domain->gaw = guest_width;
+ adjust_width = guestwidth_to_adjustwidth(guest_width);
+ domain->agaw = width_to_agaw(adjust_width);
+
+ domain->iommu_coherency = 0;
+ domain->iommu_snooping = 0;
+ domain->iommu_superpage = 0;
+ domain->max_addr = 0;
+
+ /* always allocate the top pgd */
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+ if (!domain->pgd)
+ return -ENOMEM;
+ domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+ return 0;
+}
+
static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
{
struct device_domain_info *info;
@@ -2617,13 +2575,20 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
domain = alloc_domain(0);
if (!domain)
return NULL;
- if (domain_init(domain, iommu, gaw)) {
+
+ if (domain_init(domain, gaw)) {
domain_exit(domain);
return NULL;
}
-out:
+ if (init_iova_flush_queue(&domain->iovad,
+ iommu_flush_iova,
+ iova_entry_free)) {
+ pr_warn("iova flush queue initialization failed\n");
+ intel_iommu_strict = 1;
+ }
+out:
return domain;
}
@@ -2663,29 +2628,6 @@ static struct dmar_domain *set_domain_for_dev(struct device *dev,
return domain;
}
-static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
-{
- struct dmar_domain *domain, *tmp;
-
- domain = find_domain(dev);
- if (domain)
- goto out;
-
- domain = find_or_alloc_domain(dev, gaw);
- if (!domain)
- goto out;
-
- tmp = set_domain_for_dev(dev, domain);
- if (!tmp || domain != tmp) {
- domain_exit(domain);
- domain = tmp;
- }
-
-out:
-
- return domain;
-}
-
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long long start,
unsigned long long end)
@@ -2750,75 +2692,21 @@ static int domain_prepare_identity_map(struct device *dev,
return iommu_domain_identity_map(domain, start, end);
}
-static int iommu_prepare_identity_map(struct device *dev,
- unsigned long long start,
- unsigned long long end)
-{
- struct dmar_domain *domain;
- int ret;
-
- domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- return -ENOMEM;
-
- ret = domain_prepare_identity_map(dev, domain, start, end);
- if (ret)
- domain_exit(domain);
-
- return ret;
-}
-
-static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
- struct device *dev)
-{
- if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
- return 0;
- return iommu_prepare_identity_map(dev, rmrr->base_address,
- rmrr->end_address);
-}
-
-#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
-static inline void iommu_prepare_isa(void)
-{
- struct pci_dev *pdev;
- int ret;
-
- pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- if (!pdev)
- return;
-
- pr_info("Prepare 0-16MiB unity mapping for LPC\n");
- ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
-
- if (ret)
- pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
-
- pci_dev_put(pdev);
-}
-#else
-static inline void iommu_prepare_isa(void)
-{
- return;
-}
-#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
-
-static int md_domain_init(struct dmar_domain *domain, int guest_width);
-
static int __init si_domain_init(int hw)
{
- int nid, ret;
+ struct dmar_rmrr_unit *rmrr;
+ struct device *dev;
+ int i, nid, ret;
si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain)
return -EFAULT;
- if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
return -EFAULT;
}
- pr_debug("Identity mapping domain allocated\n");
-
if (hw)
return 0;
@@ -2834,6 +2722,31 @@ static int __init si_domain_init(int hw)
}
}
+ /*
+ * Normally we use DMA domains for devices which have RMRRs. But we
+ * loose this requirement for graphic and usb devices. Identity map
+ * the RMRRs for graphic and USB devices so that they could use the
+ * si_domain.
+ */
+ for_each_rmrr_units(rmrr) {
+ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+ i, dev) {
+ unsigned long long start = rmrr->base_address;
+ unsigned long long end = rmrr->end_address;
+
+ if (device_is_rmrr_locked(dev))
+ continue;
+
+ if (WARN_ON(end < start ||
+ end >> agaw_to_width(si_domain->agaw)))
+ continue;
+
+ ret = iommu_domain_identity_map(si_domain, start, end);
+ if (ret)
+ return ret;
+ }
+ }
+
return 0;
}
@@ -2841,9 +2754,6 @@ static int identity_mapping(struct device *dev)
{
struct device_domain_info *info;
- if (likely(!iommu_identity_mapping))
- return 0;
-
info = dev->archdata.iommu;
if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
return (info->domain == si_domain);
@@ -2882,7 +2792,8 @@ static bool device_has_rmrr(struct device *dev)
*/
for_each_active_dev_scope(rmrr->devices,
rmrr->devices_cnt, i, tmp)
- if (tmp == dev) {
+ if (tmp == dev ||
+ is_downstream_to_pci_bridge(dev, tmp)) {
rcu_read_unlock();
return true;
}
@@ -2891,6 +2802,35 @@ static bool device_has_rmrr(struct device *dev)
return false;
}
+/**
+ * device_rmrr_is_relaxable - Test whether the RMRR of this device
+ * is relaxable (ie. is allowed to be not enforced under some conditions)
+ * @dev: device handle
+ *
+ * We assume that PCI USB devices with RMRRs have them largely
+ * for historical reasons and that the RMRR space is not actively used post
+ * boot. This exclusion may change if vendors begin to abuse it.
+ *
+ * The same exception is made for graphics devices, with the requirement that
+ * any use of the RMRR regions will be torn down before assigning the device
+ * to a guest.
+ *
+ * Return: true if the RMRR is relaxable, false otherwise
+ */
+static bool device_rmrr_is_relaxable(struct device *dev)
+{
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(dev))
+ return false;
+
+ pdev = to_pci_dev(dev);
+ if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
+ return true;
+ else
+ return false;
+}
+
/*
* There are a couple cases where we need to restrict the functionality of
* devices associated with RMRRs. The first is when evaluating a device for
@@ -2905,52 +2845,51 @@ static bool device_has_rmrr(struct device *dev)
* We therefore prevent devices associated with an RMRR from participating in
* the IOMMU API, which eliminates them from device assignment.
*
- * In both cases we assume that PCI USB devices with RMRRs have them largely
- * for historical reasons and that the RMRR space is not actively used post
- * boot. This exclusion may change if vendors begin to abuse it.
- *
- * The same exception is made for graphics devices, with the requirement that
- * any use of the RMRR regions will be torn down before assigning the device
- * to a guest.
+ * In both cases, devices which have relaxable RMRRs are not concerned by this
+ * restriction. See device_rmrr_is_relaxable comment.
*/
static bool device_is_rmrr_locked(struct device *dev)
{
if (!device_has_rmrr(dev))
return false;
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
- return false;
- }
+ if (device_rmrr_is_relaxable(dev))
+ return false;
return true;
}
-static int iommu_should_identity_map(struct device *dev, int startup)
+/*
+ * Return the required default domain type for a specific device.
+ *
+ * @dev: the device in query
+ * @startup: true if this is during early boot
+ *
+ * Returns:
+ * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain
+ * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain
+ * - 0: both identity and dynamic domains work for this device
+ */
+static int device_def_domain_type(struct device *dev)
{
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
if (device_is_rmrr_locked(dev))
- return 0;
+ return IOMMU_DOMAIN_DMA;
/*
* Prevent any device marked as untrusted from getting
* placed into the statically identity mapping domain.
*/
if (pdev->untrusted)
- return 0;
+ return IOMMU_DOMAIN_DMA;
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
- return 1;
+ return IOMMU_DOMAIN_IDENTITY;
if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
- return 1;
-
- if (!(iommu_identity_mapping & IDENTMAP_ALL))
- return 0;
+ return IOMMU_DOMAIN_IDENTITY;
/*
* We want to start off with all devices in the 1:1 domain, and
@@ -2971,94 +2910,18 @@ static int iommu_should_identity_map(struct device *dev, int startup)
*/
if (!pci_is_pcie(pdev)) {
if (!pci_is_root_bus(pdev->bus))
- return 0;
+ return IOMMU_DOMAIN_DMA;
if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
- return 0;
+ return IOMMU_DOMAIN_DMA;
} else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
+ return IOMMU_DOMAIN_DMA;
} else {
if (device_has_rmrr(dev))
- return 0;
+ return IOMMU_DOMAIN_DMA;
}
- /*
- * At boot time, we don't yet know if devices will be 64-bit capable.
- * Assume that they will — if they turn out not to be, then we can
- * take them out of the 1:1 domain later.
- */
- if (!startup) {
- /*
- * If the device's dma_mask is less than the system's memory
- * size then this is not a candidate for identity mapping.
- */
- u64 dma_mask = *dev->dma_mask;
-
- if (dev->coherent_dma_mask &&
- dev->coherent_dma_mask < dma_mask)
- dma_mask = dev->coherent_dma_mask;
-
- return dma_mask >= dma_get_required_mask(dev);
- }
-
- return 1;
-}
-
-static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
-{
- int ret;
-
- if (!iommu_should_identity_map(dev, 1))
- return 0;
-
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret)
- dev_info(dev, "%s identity mapping\n",
- hw ? "Hardware" : "Software");
- else if (ret == -ENODEV)
- /* device not associated with an iommu */
- ret = 0;
-
- return ret;
-}
-
-
-static int __init iommu_prepare_static_identity_mapping(int hw)
-{
- struct pci_dev *pdev = NULL;
- struct dmar_drhd_unit *drhd;
- /* To avoid a -Wunused-but-set-variable warning. */
- struct intel_iommu *iommu __maybe_unused;
- struct device *dev;
- int i;
- int ret = 0;
-
- for_each_pci_dev(pdev) {
- ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
- if (ret)
- return ret;
- }
-
- for_each_active_iommu(iommu, drhd)
- for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
- struct acpi_device_physical_node *pn;
- struct acpi_device *adev;
-
- if (dev->bus != &acpi_bus_type)
- continue;
-
- adev= to_acpi_device(dev);
- mutex_lock(&adev->physical_node_lock);
- list_for_each_entry(pn, &adev->physical_node_list, node) {
- ret = dev_prepare_static_identity_mapping(pn->dev, hw);
- if (ret)
- break;
- }
- mutex_unlock(&adev->physical_node_lock);
- if (ret)
- return ret;
- }
-
- return 0;
+ return (iommu_identity_mapping & IDENTMAP_ALL) ?
+ IOMMU_DOMAIN_IDENTITY : 0;
}
static void intel_iommu_init_qi(struct intel_iommu *iommu)
@@ -3283,11 +3146,8 @@ out_unmap:
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
- struct dmar_rmrr_unit *rmrr;
- bool copied_tables = false;
- struct device *dev;
struct intel_iommu *iommu;
- int i, ret;
+ int ret;
/*
* for each drhd
@@ -3320,7 +3180,12 @@ static int __init init_dmars(void)
goto error;
}
- for_each_active_iommu(iommu, drhd) {
+ for_each_iommu(iommu, drhd) {
+ if (drhd->ignored) {
+ iommu_disable_translation(iommu);
+ continue;
+ }
+
/*
* Find the max pasid size of all IOMMU's in the system.
* We need to ensure the system pasid table is no bigger
@@ -3380,7 +3245,6 @@ static int __init init_dmars(void)
} else {
pr_info("Copied translation tables from previous kernel for %s\n",
iommu->name);
- copied_tables = true;
}
}
@@ -3416,62 +3280,9 @@ static int __init init_dmars(void)
check_tylersburg_isoch();
- if (iommu_identity_mapping) {
- ret = si_domain_init(hw_pass_through);
- if (ret)
- goto free_iommu;
- }
-
-
- /*
- * If we copied translations from a previous kernel in the kdump
- * case, we can not assign the devices to domains now, as that
- * would eliminate the old mappings. So skip this part and defer
- * the assignment to device driver initialization time.
- */
- if (copied_tables)
- goto domains_done;
-
- /*
- * If pass through is not set or not enabled, setup context entries for
- * identity mappings for rmrr, gfx, and isa and may fall back to static
- * identity mapping if iommu_identity_mapping is set.
- */
- if (iommu_identity_mapping) {
- ret = iommu_prepare_static_identity_mapping(hw_pass_through);
- if (ret) {
- pr_crit("Failed to setup IOMMU pass-through\n");
- goto free_iommu;
- }
- }
- /*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
- */
- pr_info("Setting RMRR:\n");
- for_each_rmrr_units(rmrr) {
- /* some BIOS lists non-exist devices in DMAR table. */
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, dev) {
- ret = iommu_prepare_rmrr_dev(rmrr, dev);
- if (ret)
- pr_err("Mapping reserved region failed\n");
- }
- }
-
- iommu_prepare_isa();
-
-domains_done:
+ ret = si_domain_init(hw_pass_through);
+ if (ret)
+ goto free_iommu;
/*
* for each drhd
@@ -3509,11 +3320,6 @@ domains_done:
ret = dmar_set_interrupt(iommu);
if (ret)
goto free_iommu;
-
- if (!translation_pre_enabled(iommu))
- iommu_enable_translation(iommu);
-
- iommu_disable_protect_mem_regions(iommu);
}
return 0;
@@ -3563,16 +3369,17 @@ static unsigned long intel_alloc_iova(struct device *dev,
return iova_pfn;
}
-struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
+static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
{
struct dmar_domain *domain, *tmp;
struct dmar_rmrr_unit *rmrr;
struct device *i_dev;
int i, ret;
+ /* Device shouldn't be attached by any domains. */
domain = find_domain(dev);
if (domain)
- goto out;
+ return NULL;
domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
@@ -3602,10 +3409,10 @@ struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
}
out:
-
if (!domain)
dev_err(dev, "Allocating domain failed\n");
-
+ else
+ domain->domain.type = IOMMU_DOMAIN_DMA;
return domain;
}
@@ -3613,17 +3420,19 @@ out:
/* Check if the dev needs to go through non-identity map and unmap process.*/
static bool iommu_need_mapping(struct device *dev)
{
- int found;
+ int ret;
if (iommu_dummy(dev))
return false;
- if (!iommu_identity_mapping)
- return true;
+ ret = identity_mapping(dev);
+ if (ret) {
+ u64 dma_mask = *dev->dma_mask;
- found = identity_mapping(dev);
- if (found) {
- if (iommu_should_identity_map(dev, 0))
+ if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
+ dma_mask = dev->coherent_dma_mask;
+
+ if (dma_mask >= dma_get_required_mask(dev))
return false;
/*
@@ -3631,17 +3440,20 @@ static bool iommu_need_mapping(struct device *dev)
* non-identity mapping.
*/
dmar_remove_one_dev_info(dev);
- dev_info(dev, "32bit DMA uses non-identity mapping\n");
- } else {
- /*
- * In case of a detached 64 bit DMA device from vm, the device
- * is put into si_domain for identity mapping.
- */
- if (iommu_should_identity_map(dev, 0) &&
- !domain_add_dev_info(si_domain, dev)) {
- dev_info(dev, "64bit DMA uses identity mapping\n");
- return false;
+ ret = iommu_request_dma_domain_for_dev(dev);
+ if (ret) {
+ struct iommu_domain *domain;
+ struct dmar_domain *dmar_domain;
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain) {
+ dmar_domain = to_dmar_domain(domain);
+ dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
+ }
+ get_private_domain_for_dev(dev);
}
+
+ dev_info(dev, "32bit DMA uses non-identity mapping\n");
}
return true;
@@ -3660,7 +3472,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- domain = get_valid_domain_for_dev(dev);
+ domain = find_domain(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3875,7 +3687,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (!iommu_need_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
- domain = get_valid_domain_for_dev(dev);
+ domain = find_domain(dev);
if (!domain)
return 0;
@@ -4194,13 +4006,10 @@ static void __init init_iommu_pm_ops(void)
static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
-
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
- int prot = DMA_PTE_READ|DMA_PTE_WRITE;
struct dmar_rmrr_unit *rmrru;
- size_t length;
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru)
@@ -4211,23 +4020,15 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
- length = rmrr->end_address - rmrr->base_address + 1;
- rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
- IOMMU_RESV_DIRECT);
- if (!rmrru->resv)
- goto free_rmrru;
-
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length,
&rmrru->devices_cnt);
if (rmrru->devices_cnt && rmrru->devices == NULL)
- goto free_all;
+ goto free_rmrru;
list_add(&rmrru->list, &dmar_rmrr_units);
return 0;
-free_all:
- kfree(rmrru->resv);
free_rmrru:
kfree(rmrru);
out:
@@ -4445,7 +4246,6 @@ static void intel_iommu_free_dmars(void)
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
- kfree(rmrru->resv);
kfree(rmrru);
}
@@ -4550,42 +4350,6 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
return 0;
}
-/*
- * Here we only respond to action of unbound device from driver.
- *
- * Added device is not attached to its DMAR domain here yet. That will happen
- * when mapping the device to iova.
- */
-static int device_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
- struct dmar_domain *domain;
-
- if (iommu_dummy(dev))
- return 0;
-
- if (action == BUS_NOTIFY_REMOVED_DEVICE) {
- domain = find_domain(dev);
- if (!domain)
- return 0;
-
- dmar_remove_one_dev_info(dev);
- if (!domain_type_is_vm_or_si(domain) &&
- list_empty(&domain->devices))
- domain_exit(domain);
- } else if (action == BUS_NOTIFY_ADD_DEVICE) {
- if (iommu_should_identity_map(dev, 1))
- domain_add_dev_info(si_domain, dev);
- }
-
- return 0;
-}
-
-static struct notifier_block device_nb = {
- .notifier_call = device_notifier,
-};
-
static int intel_iommu_memory_notifier(struct notifier_block *nb,
unsigned long val, void *v)
{
@@ -4812,6 +4576,49 @@ static int __init platform_optin_force_iommu(void)
return 1;
}
+static int __init probe_acpi_namespace_devices(void)
+{
+ struct dmar_drhd_unit *drhd;
+ /* To avoid a -Wunused-but-set-variable warning. */
+ struct intel_iommu *iommu __maybe_unused;
+ struct device *dev;
+ int i, ret = 0;
+
+ for_each_active_iommu(iommu, drhd) {
+ for_each_active_dev_scope(drhd->devices,
+ drhd->devices_cnt, i, dev) {
+ struct acpi_device_physical_node *pn;
+ struct iommu_group *group;
+ struct acpi_device *adev;
+
+ if (dev->bus != &acpi_bus_type)
+ continue;
+
+ adev = to_acpi_device(dev);
+ mutex_lock(&adev->physical_node_lock);
+ list_for_each_entry(pn,
+ &adev->physical_node_list, node) {
+ group = iommu_group_get(pn->dev);
+ if (group) {
+ iommu_group_put(group);
+ continue;
+ }
+
+ pn->dev->bus->iommu_ops = &intel_iommu_ops;
+ ret = iommu_probe_device(pn->dev);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&adev->physical_node_lock);
+
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
int __init intel_iommu_init(void)
{
int ret = -ENODEV;
@@ -4901,7 +4708,6 @@ int __init intel_iommu_init(void)
goto out_free_reserved_range;
}
up_write(&dmar_global_lock);
- pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
swiotlb = 0;
@@ -4919,11 +4725,25 @@ int __init intel_iommu_init(void)
}
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
- bus_register_notifier(&pci_bus_type, &device_nb);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
intel_iommu_cpu_dead);
+
+ down_read(&dmar_global_lock);
+ if (probe_acpi_namespace_devices())
+ pr_warn("ACPI name space devices didn't probe correctly\n");
+ up_read(&dmar_global_lock);
+
+ /* Finally, we enable the DMA remapping hardware. */
+ for_each_iommu(iommu, drhd) {
+ if (!drhd->ignored && !translation_pre_enabled(iommu))
+ iommu_enable_translation(iommu);
+
+ iommu_disable_protect_mem_regions(iommu);
+ }
+ pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
+
intel_iommu_enabled = 1;
intel_iommu_debugfs_init();
@@ -4962,6 +4782,7 @@ static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
{
+ struct dmar_domain *domain;
struct intel_iommu *iommu;
unsigned long flags;
@@ -4971,6 +4792,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
return;
iommu = info->iommu;
+ domain = info->domain;
if (info->dev) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
@@ -4985,9 +4807,14 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
unlink_domain_info(info);
spin_lock_irqsave(&iommu->lock, flags);
- domain_detach_iommu(info->domain, iommu);
+ domain_detach_iommu(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
+ /* free the private domain */
+ if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
+ domain_exit(info->domain);
+
free_devinfo_mem(info);
}
@@ -5002,62 +4829,55 @@ static void dmar_remove_one_dev_info(struct device *dev)
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-static int md_domain_init(struct dmar_domain *domain, int guest_width)
-{
- int adjust_width;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- domain->agaw = width_to_agaw(adjust_width);
-
- domain->iommu_coherency = 0;
- domain->iommu_snooping = 0;
- domain->iommu_superpage = 0;
- domain->max_addr = 0;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
+ switch (type) {
+ case IOMMU_DOMAIN_DMA:
+ /* fallthrough */
+ case IOMMU_DOMAIN_UNMANAGED:
+ dmar_domain = alloc_domain(0);
+ if (!dmar_domain) {
+ pr_err("Can't allocate dmar_domain\n");
+ return NULL;
+ }
+ if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ pr_err("Domain initialization failed\n");
+ domain_exit(dmar_domain);
+ return NULL;
+ }
- dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
- if (!dmar_domain) {
- pr_err("Can't allocate dmar_domain\n");
- return NULL;
- }
- if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- pr_err("Domain initialization failed\n");
- domain_exit(dmar_domain);
+ if (type == IOMMU_DOMAIN_DMA &&
+ init_iova_flush_queue(&dmar_domain->iovad,
+ iommu_flush_iova, iova_entry_free)) {
+ pr_warn("iova flush queue initialization failed\n");
+ intel_iommu_strict = 1;
+ }
+
+ domain_update_iommu_cap(dmar_domain);
+
+ domain = &dmar_domain->domain;
+ domain->geometry.aperture_start = 0;
+ domain->geometry.aperture_end =
+ __DOMAIN_MAX_ADDR(dmar_domain->gaw);
+ domain->geometry.force_aperture = true;
+
+ return domain;
+ case IOMMU_DOMAIN_IDENTITY:
+ return &si_domain->domain;
+ default:
return NULL;
}
- domain_update_iommu_cap(dmar_domain);
-
- domain = &dmar_domain->domain;
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
- domain->geometry.force_aperture = true;
- return domain;
+ return NULL;
}
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
- domain_exit(to_dmar_domain(domain));
+ if (domain != &si_domain->domain)
+ domain_exit(to_dmar_domain(domain));
}
/*
@@ -5233,7 +5053,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
{
int ret;
- if (device_is_rmrr_locked(dev)) {
+ if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
+ device_is_rmrr_locked(dev)) {
dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
return -EPERM;
}
@@ -5246,15 +5067,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct dmar_domain *old_domain;
old_domain = find_domain(dev);
- if (old_domain) {
- rcu_read_lock();
+ if (old_domain)
dmar_remove_one_dev_info(dev);
- rcu_read_unlock();
-
- if (!domain_type_is_vm_or_si(old_domain) &&
- list_empty(&old_domain->devices))
- domain_exit(old_domain);
- }
}
ret = prepare_domain_attach_device(domain, dev);
@@ -5300,6 +5114,9 @@ static int intel_iommu_map(struct iommu_domain *domain,
int prot = 0;
int ret;
+ if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+ return -EINVAL;
+
if (iommu_prot & IOMMU_READ)
prot |= DMA_PTE_READ;
if (iommu_prot & IOMMU_WRITE)
@@ -5341,6 +5158,8 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
/* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */
BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
+ if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+ return 0;
if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -5372,6 +5191,9 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
int level = 0;
u64 phys = 0;
+ if (dmar_domain->flags & DOMAIN_FLAG_LOSE_CHILDREN)
+ return 0;
+
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
if (pte)
phys = dma_pte_addr(pte);
@@ -5427,9 +5249,12 @@ static bool intel_iommu_capable(enum iommu_cap cap)
static int intel_iommu_add_device(struct device *dev)
{
+ struct dmar_domain *dmar_domain;
+ struct iommu_domain *domain;
struct intel_iommu *iommu;
struct iommu_group *group;
u8 bus, devfn;
+ int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
@@ -5437,12 +5262,45 @@ static int intel_iommu_add_device(struct device *dev)
iommu_device_link(&iommu->iommu, dev);
+ if (translation_pre_enabled(iommu))
+ dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
iommu_group_put(group);
+
+ domain = iommu_get_domain_for_dev(dev);
+ dmar_domain = to_dmar_domain(domain);
+ if (domain->type == IOMMU_DOMAIN_DMA) {
+ if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
+ ret = iommu_request_dm_for_dev(dev);
+ if (ret) {
+ dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
+ domain_add_dev_info(si_domain, dev);
+ dev_info(dev,
+ "Device uses a private identity domain.\n");
+ }
+ }
+ } else {
+ if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
+ ret = iommu_request_dma_domain_for_dev(dev);
+ if (ret) {
+ dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
+ if (!get_private_domain_for_dev(dev)) {
+ dev_warn(dev,
+ "Failed to get a private domain.\n");
+ return -ENOMEM;
+ }
+
+ dev_info(dev,
+ "Device uses a private dma domain.\n");
+ }
+ }
+ }
+
return 0;
}
@@ -5463,22 +5321,51 @@ static void intel_iommu_remove_device(struct device *dev)
static void intel_iommu_get_resv_regions(struct device *device,
struct list_head *head)
{
+ int prot = DMA_PTE_READ | DMA_PTE_WRITE;
struct iommu_resv_region *reg;
struct dmar_rmrr_unit *rmrr;
struct device *i_dev;
int i;
- rcu_read_lock();
+ down_read(&dmar_global_lock);
for_each_rmrr_units(rmrr) {
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
i, i_dev) {
- if (i_dev != device)
+ struct iommu_resv_region *resv;
+ enum iommu_resv_type type;
+ size_t length;
+
+ if (i_dev != device &&
+ !is_downstream_to_pci_bridge(device, i_dev))
continue;
- list_add_tail(&rmrr->resv->list, head);
+ length = rmrr->end_address - rmrr->base_address + 1;
+
+ type = device_rmrr_is_relaxable(device) ?
+ IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
+
+ resv = iommu_alloc_resv_region(rmrr->base_address,
+ length, prot, type);
+ if (!resv)
+ break;
+
+ list_add_tail(&resv->list, head);
}
}
- rcu_read_unlock();
+ up_read(&dmar_global_lock);
+
+#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
+ if (dev_is_pci(device)) {
+ struct pci_dev *pdev = to_pci_dev(device);
+
+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
+ reg = iommu_alloc_resv_region(0, 1UL << 24, 0,
+ IOMMU_RESV_DIRECT);
+ if (reg)
+ list_add_tail(&reg->list, head);
+ }
+ }
+#endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
@@ -5493,10 +5380,8 @@ static void intel_iommu_put_resv_regions(struct device *dev,
{
struct iommu_resv_region *entry, *next;
- list_for_each_entry_safe(entry, next, head, list) {
- if (entry->type == IOMMU_RESV_MSI)
- kfree(entry);
- }
+ list_for_each_entry_safe(entry, next, head, list)
+ kfree(entry);
}
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
@@ -5508,7 +5393,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
u64 ctx_lo;
int ret;
- domain = get_valid_domain_for_dev(dev);
+ domain = find_domain(dev);
if (!domain)
return -EINVAL;
@@ -5550,6 +5435,19 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
return ret;
}
+static void intel_iommu_apply_resv_region(struct device *dev,
+ struct iommu_domain *domain,
+ struct iommu_resv_region *region)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long start, end;
+
+ start = IOVA_PFN(region->start);
+ end = IOVA_PFN(region->start + region->length - 1);
+
+ WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end));
+}
+
#ifdef CONFIG_INTEL_IOMMU_SVM
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
@@ -5699,6 +5597,12 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
dmar_domain->default_pasid : -EINVAL;
}
+static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
+ struct device *dev)
+{
+ return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@@ -5715,11 +5619,13 @@ const struct iommu_ops intel_iommu_ops = {
.remove_device = intel_iommu_remove_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = intel_iommu_put_resv_regions,
+ .apply_resv_region = intel_iommu_apply_resv_region,
.device_group = pci_device_group,
.dev_has_feat = intel_iommu_dev_has_feat,
.dev_feat_enabled = intel_iommu_dev_feat_enabled,
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
+ .is_attach_deferred = intel_iommu_is_attach_deferred,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/intel-pasid.c b/drivers/iommu/intel-pasid.c
index 2fefeafda437..040a445be300 100644
--- a/drivers/iommu/intel-pasid.c
+++ b/drivers/iommu/intel-pasid.c
@@ -169,23 +169,6 @@ attach_out:
return 0;
}
-/* Get PRESENT bit of a PASID directory entry. */
-static inline bool
-pasid_pde_is_present(struct pasid_dir_entry *pde)
-{
- return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
-}
-
-/* Get PASID table from a PASID directory entry. */
-static inline struct pasid_entry *
-get_pasid_table_from_pde(struct pasid_dir_entry *pde)
-{
- if (!pasid_pde_is_present(pde))
- return NULL;
-
- return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
-}
-
void intel_pasid_free_table(struct device *dev)
{
struct device_domain_info *info;
@@ -389,7 +372,7 @@ static inline void pasid_set_present(struct pasid_entry *pe)
*/
static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
{
- pasid_set_bits(&pe->val[1], 1 << 23, value);
+ pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
}
/*
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index 23537b3f34e3..fc8cd8f17de1 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -18,6 +18,10 @@
#define PDE_PFN_MASK PAGE_MASK
#define PASID_PDE_SHIFT 6
#define MAX_NR_PASID_BITS 20
+#define PASID_TBL_ENTRIES BIT(PASID_PDE_SHIFT)
+
+#define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1)
+#define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7))
/*
* Domain ID reserved for pasid entries programmed for first-level
@@ -49,6 +53,28 @@ struct pasid_table {
struct list_head dev; /* device list */
};
+/* Get PRESENT bit of a PASID directory entry. */
+static inline bool pasid_pde_is_present(struct pasid_dir_entry *pde)
+{
+ return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
+}
+
+/* Get PASID table from a PASID directory entry. */
+static inline struct pasid_entry *
+get_pasid_table_from_pde(struct pasid_dir_entry *pde)
+{
+ if (!pasid_pde_is_present(pde))
+ return NULL;
+
+ return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
+}
+
+/* Get PRESENT bit of a PASID table entry. */
+static inline bool pasid_pte_is_present(struct pasid_entry *pte)
+{
+ return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
+}
+
extern u32 intel_pasid_max_id;
int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
void intel_pasid_free_id(int pasid);
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index eceaa7e968ae..780de0caafe8 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -366,6 +366,21 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
}
list_add_tail(&svm->list, &global_svm_list);
+ } else {
+ /*
+ * Binding a new device with existing PASID, need to setup
+ * the PASID entry.
+ */
+ spin_lock(&iommu->lock);
+ ret = intel_pasid_setup_first_level(iommu, dev,
+ mm ? mm->pgd : init_mm.pgd,
+ svm->pasid, FLPT_DEFAULT_DID,
+ mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+ spin_unlock(&iommu->lock);
+ if (ret) {
+ kfree(sdev);
+ goto out;
+ }
}
list_add_rcu(&sdev->list, &svm->devs);
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 4160aa9f3f80..4786ca061e31 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -101,7 +101,7 @@ static void init_ir_status(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
}
-static int alloc_irte(struct intel_iommu *iommu, int irq,
+static int alloc_irte(struct intel_iommu *iommu,
struct irq_2_iommu *irq_iommu, u16 count)
{
struct ir_table *table = iommu->ir_table;
@@ -1374,7 +1374,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
goto out_free_parent;
down_read(&dmar_global_lock);
- index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs);
+ index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
up_read(&dmar_global_lock);
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 9a8a8870e267..0fc8dfab2abf 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* CPU-agnostic ARM page table allocator.
*
@@ -14,18 +15,6 @@
* - PXN
* - Domains
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Copyright (C) 2014-2015 ARM Limited
* Copyright (c) 2014-2015 MediaTek Inc.
*/
@@ -215,7 +204,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
}
- if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
+ if (table && !cfg->coherent_walk) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto out_free;
@@ -249,7 +238,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
struct device *dev = cfg->iommu_dev;
size_t size = ARM_V7S_TABLE_SIZE(lvl);
- if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
+ if (!cfg->coherent_walk)
dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
DMA_TO_DEVICE);
if (lvl == 1)
@@ -261,7 +250,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
struct io_pgtable_cfg *cfg)
{
- if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)
+ if (cfg->coherent_walk)
return;
dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
@@ -727,7 +716,6 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
IO_PGTABLE_QUIRK_NO_PERMS |
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_4GB |
- IO_PGTABLE_QUIRK_NO_DMA |
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
@@ -790,8 +778,11 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
/* TTBRs */
cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
- ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
- ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA);
+ (cfg->coherent_walk ?
+ (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
+ (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
cfg->arm_v7s_cfg.ttbr[1] = 0;
return &data->iop;
@@ -846,7 +837,8 @@ static int __init arm_v7s_do_selftests(void)
.tlb = &dummy_tlb_ops,
.oas = 32,
.ias = 32,
- .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA,
+ .coherent_walk = true,
+ .quirks = IO_PGTABLE_QUIRK_ARM_NS,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
};
unsigned int iova, size, iova_start;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4e21efbc4459..161a7d56264d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* CPU-agnostic ARM page table allocator.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Copyright (C) 2014 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
@@ -167,10 +156,12 @@
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
#define ARM_LPAE_MAIR_ATTR_NC 0x44
+#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
+#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
@@ -250,7 +241,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
return NULL;
pages = page_address(p);
- if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
+ if (!cfg->coherent_walk) {
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto out_free;
@@ -276,7 +267,7 @@ out_free:
static void __arm_lpae_free_pages(void *pages, size_t size,
struct io_pgtable_cfg *cfg)
{
- if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
+ if (!cfg->coherent_walk)
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
size, DMA_TO_DEVICE);
free_pages((unsigned long)pages, get_order(size));
@@ -294,7 +285,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
{
*ptep = pte;
- if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
+ if (!cfg->coherent_walk)
__arm_lpae_sync_pte(ptep, cfg);
}
@@ -372,8 +363,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
old = cmpxchg64_relaxed(ptep, curr, new);
- if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
- (old & ARM_LPAE_PTE_SW_SYNC))
+ if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
return old;
/* Even if it's not ours, there's no point waiting; just kick it */
@@ -414,8 +404,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
if (pte)
__arm_lpae_free_pages(cptep, tblsz, cfg);
- } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
- !(pte & ARM_LPAE_PTE_SW_SYNC)) {
+ } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
__arm_lpae_sync_pte(ptep, cfg);
}
@@ -470,6 +459,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
+ else if (prot & IOMMU_QCOM_SYS_CACHE)
+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
+ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
if (prot & IOMMU_NOEXEC)
@@ -794,7 +786,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
u64 reg;
struct arm_lpae_io_pgtable *data;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
@@ -803,9 +795,15 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
return NULL;
/* TCR */
- reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
- (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
- (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+ if (cfg->coherent_walk) {
+ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+ } else {
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
+ }
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
@@ -857,7 +855,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_MAIR_ATTR_WBRWA
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
(ARM_LPAE_MAIR_ATTR_DEVICE
- << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
+ (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
cfg->arm_lpae_s1_cfg.mair[0] = reg;
cfg->arm_lpae_s1_cfg.mair[1] = 0;
@@ -887,8 +887,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
/* The NS quirk doesn't apply at stage 2 */
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
- IO_PGTABLE_QUIRK_NON_STRICT))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -1223,7 +1222,7 @@ static int __init arm_lpae_do_selftests(void)
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.oas = 48,
- .quirks = IO_PGTABLE_QUIRK_NO_DMA,
+ .coherent_walk = true,
};
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 5227cfdbb65b..ced53e5b72b5 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic page table allocator for IOMMUs.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Copyright (C) 2014 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index 44127d54e943..e436ff813e7e 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* IOMMU sysfs class support
*
* Copyright (C) 2014 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f9cacce909d3..0c674d80c37f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -61,10 +61,11 @@ struct iommu_group_attribute {
};
static const char * const iommu_group_resv_type_string[] = {
- [IOMMU_RESV_DIRECT] = "direct",
- [IOMMU_RESV_RESERVED] = "reserved",
- [IOMMU_RESV_MSI] = "msi",
- [IOMMU_RESV_SW_MSI] = "msi",
+ [IOMMU_RESV_DIRECT] = "direct",
+ [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
+ [IOMMU_RESV_RESERVED] = "reserved",
+ [IOMMU_RESV_MSI] = "msi",
+ [IOMMU_RESV_SW_MSI] = "msi",
};
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
@@ -95,15 +96,43 @@ void iommu_device_unregister(struct iommu_device *iommu)
spin_unlock(&iommu_device_lock);
}
+static struct iommu_param *iommu_get_dev_param(struct device *dev)
+{
+ struct iommu_param *param = dev->iommu_param;
+
+ if (param)
+ return param;
+
+ param = kzalloc(sizeof(*param), GFP_KERNEL);
+ if (!param)
+ return NULL;
+
+ mutex_init(&param->lock);
+ dev->iommu_param = param;
+ return param;
+}
+
+static void iommu_free_dev_param(struct device *dev)
+{
+ kfree(dev->iommu_param);
+ dev->iommu_param = NULL;
+}
+
int iommu_probe_device(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
- int ret = -EINVAL;
+ int ret;
WARN_ON(dev->iommu_group);
+ if (!ops)
+ return -EINVAL;
- if (ops)
- ret = ops->add_device(dev);
+ if (!iommu_get_dev_param(dev))
+ return -ENOMEM;
+
+ ret = ops->add_device(dev);
+ if (ret)
+ iommu_free_dev_param(dev);
return ret;
}
@@ -114,6 +143,8 @@ void iommu_release_device(struct device *dev)
if (dev->iommu_group)
ops->remove_device(dev);
+
+ iommu_free_dev_param(dev);
}
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
@@ -225,18 +256,21 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new,
pos = pos->next;
} else if ((start >= a) && (end <= b)) {
if (new->type == type)
- goto done;
+ return 0;
else
pos = pos->next;
} else {
if (new->type == type) {
phys_addr_t new_start = min(a, start);
phys_addr_t new_end = max(b, end);
+ int ret;
list_del(&entry->list);
entry->start = new_start;
entry->length = new_end - new_start + 1;
- iommu_insert_resv_region(entry, regions);
+ ret = iommu_insert_resv_region(entry, regions);
+ kfree(entry);
+ return ret;
} else {
pos = pos->next;
}
@@ -249,7 +283,6 @@ insert:
return -ENOMEM;
list_add_tail(&region->list, pos);
-done:
return 0;
}
@@ -329,7 +362,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group,
type = "unmanaged\n";
break;
case IOMMU_DOMAIN_DMA:
- type = "DMA";
+ type = "DMA\n";
break;
}
}
@@ -561,7 +594,8 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size);
- if (entry->type != IOMMU_RESV_DIRECT)
+ if (entry->type != IOMMU_RESV_DIRECT &&
+ entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
continue;
for (addr = start; addr < end; addr += pg_size) {
@@ -843,6 +877,206 @@ int iommu_group_unregister_notifier(struct iommu_group *group,
EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
/**
+ * iommu_register_device_fault_handler() - Register a device fault handler
+ * @dev: the device
+ * @handler: the fault handler
+ * @data: private data passed as argument to the handler
+ *
+ * When an IOMMU fault event is received, this handler gets called with the
+ * fault event and data as argument. The handler should return 0 on success. If
+ * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
+ * complete the fault by calling iommu_page_response() with one of the following
+ * response code:
+ * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
+ * - IOMMU_PAGE_RESP_INVALID: terminate the fault
+ * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
+ * page faults if possible.
+ *
+ * Return 0 if the fault handler was installed successfully, or an error.
+ */
+int iommu_register_device_fault_handler(struct device *dev,
+ iommu_dev_fault_handler_t handler,
+ void *data)
+{
+ struct iommu_param *param = dev->iommu_param;
+ int ret = 0;
+
+ if (!param)
+ return -EINVAL;
+
+ mutex_lock(&param->lock);
+ /* Only allow one fault handler registered for each device */
+ if (param->fault_param) {
+ ret = -EBUSY;
+ goto done_unlock;
+ }
+
+ get_device(dev);
+ param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
+ if (!param->fault_param) {
+ put_device(dev);
+ ret = -ENOMEM;
+ goto done_unlock;
+ }
+ param->fault_param->handler = handler;
+ param->fault_param->data = data;
+ mutex_init(&param->fault_param->lock);
+ INIT_LIST_HEAD(&param->fault_param->faults);
+
+done_unlock:
+ mutex_unlock(&param->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
+
+/**
+ * iommu_unregister_device_fault_handler() - Unregister the device fault handler
+ * @dev: the device
+ *
+ * Remove the device fault handler installed with
+ * iommu_register_device_fault_handler().
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_unregister_device_fault_handler(struct device *dev)
+{
+ struct iommu_param *param = dev->iommu_param;
+ int ret = 0;
+
+ if (!param)
+ return -EINVAL;
+
+ mutex_lock(&param->lock);
+
+ if (!param->fault_param)
+ goto unlock;
+
+ /* we cannot unregister handler if there are pending faults */
+ if (!list_empty(&param->fault_param->faults)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ kfree(param->fault_param);
+ param->fault_param = NULL;
+ put_device(dev);
+unlock:
+ mutex_unlock(&param->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
+
+/**
+ * iommu_report_device_fault() - Report fault event to device driver
+ * @dev: the device
+ * @evt: fault event data
+ *
+ * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
+ * handler. When this function fails and the fault is recoverable, it is the
+ * caller's responsibility to complete the fault.
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
+{
+ struct iommu_param *param = dev->iommu_param;
+ struct iommu_fault_event *evt_pending = NULL;
+ struct iommu_fault_param *fparam;
+ int ret = 0;
+
+ if (!param || !evt)
+ return -EINVAL;
+
+ /* we only report device fault if there is a handler registered */
+ mutex_lock(&param->lock);
+ fparam = param->fault_param;
+ if (!fparam || !fparam->handler) {
+ ret = -EINVAL;
+ goto done_unlock;
+ }
+
+ if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
+ (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
+ evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
+ GFP_KERNEL);
+ if (!evt_pending) {
+ ret = -ENOMEM;
+ goto done_unlock;
+ }
+ mutex_lock(&fparam->lock);
+ list_add_tail(&evt_pending->list, &fparam->faults);
+ mutex_unlock(&fparam->lock);
+ }
+
+ ret = fparam->handler(&evt->fault, fparam->data);
+ if (ret && evt_pending) {
+ mutex_lock(&fparam->lock);
+ list_del(&evt_pending->list);
+ mutex_unlock(&fparam->lock);
+ kfree(evt_pending);
+ }
+done_unlock:
+ mutex_unlock(&param->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_report_device_fault);
+
+int iommu_page_response(struct device *dev,
+ struct iommu_page_response *msg)
+{
+ bool pasid_valid;
+ int ret = -EINVAL;
+ struct iommu_fault_event *evt;
+ struct iommu_fault_page_request *prm;
+ struct iommu_param *param = dev->iommu_param;
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+
+ if (!domain || !domain->ops->page_response)
+ return -ENODEV;
+
+ if (!param || !param->fault_param)
+ return -EINVAL;
+
+ if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
+ msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
+ return -EINVAL;
+
+ /* Only send response if there is a fault report pending */
+ mutex_lock(&param->fault_param->lock);
+ if (list_empty(&param->fault_param->faults)) {
+ dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
+ goto done_unlock;
+ }
+ /*
+ * Check if we have a matching page request pending to respond,
+ * otherwise return -EINVAL
+ */
+ list_for_each_entry(evt, &param->fault_param->faults, list) {
+ prm = &evt->fault.prm;
+ pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+
+ if ((pasid_valid && prm->pasid != msg->pasid) ||
+ prm->grpid != msg->grpid)
+ continue;
+
+ /* Sanitize the reply */
+ msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
+
+ ret = domain->ops->page_response(dev, evt, msg);
+ list_del(&evt->list);
+ kfree(evt);
+ break;
+ }
+
+done_unlock:
+ mutex_unlock(&param->fault_param->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_page_response);
+
+/**
* iommu_group_id - Return ID for a group
* @group: the group to ID
*
@@ -1895,24 +2129,23 @@ struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
return region;
}
-/* Request that a device is direct mapped by the IOMMU */
-int iommu_request_dm_for_dev(struct device *dev)
+static int
+request_default_domain_for_dev(struct device *dev, unsigned long type)
{
- struct iommu_domain *dm_domain;
+ struct iommu_domain *domain;
struct iommu_group *group;
int ret;
/* Device must already be in a group before calling this function */
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ group = iommu_group_get(dev);
+ if (!group)
+ return -EINVAL;
mutex_lock(&group->mutex);
/* Check if the default domain is already direct mapped */
ret = 0;
- if (group->default_domain &&
- group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
+ if (group->default_domain && group->default_domain->type == type)
goto out;
/* Don't change mappings of existing devices */
@@ -1922,23 +2155,26 @@ int iommu_request_dm_for_dev(struct device *dev)
/* Allocate a direct mapped domain */
ret = -ENOMEM;
- dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
- if (!dm_domain)
+ domain = __iommu_domain_alloc(dev->bus, type);
+ if (!domain)
goto out;
/* Attach the device to the domain */
- ret = __iommu_attach_group(dm_domain, group);
+ ret = __iommu_attach_group(domain, group);
if (ret) {
- iommu_domain_free(dm_domain);
+ iommu_domain_free(domain);
goto out;
}
+ iommu_group_create_direct_mappings(group, dev);
+
/* Make the direct mapped domain the default for this group */
if (group->default_domain)
iommu_domain_free(group->default_domain);
- group->default_domain = dm_domain;
+ group->default_domain = domain;
- dev_info(dev, "Using iommu direct mapping\n");
+ dev_info(dev, "Using iommu %s mapping\n",
+ type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
ret = 0;
out:
@@ -1948,6 +2184,18 @@ out:
return ret;
}
+/* Request that a device is direct mapped by the IOMMU */
+int iommu_request_dm_for_dev(struct device *dev)
+{
+ return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
+}
+
+/* Request that a device can't be direct mapped by the IOMMU */
+int iommu_request_dma_domain_for_dev(struct device *dev)
+{
+ return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
+}
+
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
{
const struct iommu_ops *ops = NULL;
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 9a380c10655e..ad0098c0c87c 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -36,12 +36,16 @@
#define arm_iommu_detach_device(...) do {} while (0)
#endif
-#define IPMMU_CTX_MAX 8
+#define IPMMU_CTX_MAX 8U
+#define IPMMU_CTX_INVALID -1
+
+#define IPMMU_UTLB_MAX 48U
struct ipmmu_features {
bool use_ns_alias_offset;
bool has_cache_leaf_nodes;
unsigned int number_of_contexts;
+ unsigned int num_utlbs;
bool setup_imbuscr;
bool twobit_imttbcr_sl0;
bool reserved_context;
@@ -53,11 +57,11 @@ struct ipmmu_vmsa_device {
struct iommu_device iommu;
struct ipmmu_vmsa_device *root;
const struct ipmmu_features *features;
- unsigned int num_utlbs;
unsigned int num_ctx;
spinlock_t lock; /* Protects ctx and domains[] */
DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
+ s8 utlb_ctx[IPMMU_UTLB_MAX];
struct iommu_group *group;
struct dma_iommu_mapping *mapping;
@@ -186,7 +190,8 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IMMAIR_ATTR_IDX_WBRWA 1
#define IMMAIR_ATTR_IDX_DEV 2
-#define IMEAR 0x0030
+#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */
+#define IMEUAR 0x0034 /* R-Car Gen3 only */
#define IMPCTR 0x0200
#define IMPSTR 0x0208
@@ -334,6 +339,7 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
ipmmu_write(mmu, IMUCTR(utlb),
IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
IMUCTR_MMUEN);
+ mmu->utlb_ctx[utlb] = domain->context_id;
}
/*
@@ -345,6 +351,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
struct ipmmu_vmsa_device *mmu = domain->mmu;
ipmmu_write(mmu, IMUCTR(utlb), 0);
+ mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
}
static void ipmmu_tlb_flush_all(void *cookie)
@@ -403,52 +410,10 @@ static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
spin_unlock_irqrestore(&mmu->lock, flags);
}
-static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
+static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
{
u64 ttbr;
u32 tmp;
- int ret;
-
- /*
- * Allocate the page table operations.
- *
- * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
- * access, Long-descriptor format" that the NStable bit being set in a
- * table descriptor will result in the NStable and NS bits of all child
- * entries being ignored and considered as being set. The IPMMU seems
- * not to comply with this, as it generates a secure access page fault
- * if any of the NStable and NS bits isn't set when running in
- * non-secure mode.
- */
- domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
- domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
- domain->cfg.ias = 32;
- domain->cfg.oas = 40;
- domain->cfg.tlb = &ipmmu_gather_ops;
- domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
- domain->io_domain.geometry.force_aperture = true;
- /*
- * TODO: Add support for coherent walk through CCI with DVM and remove
- * cache handling. For now, delegate it to the io-pgtable code.
- */
- domain->cfg.iommu_dev = domain->mmu->root->dev;
-
- /*
- * Find an unused context.
- */
- ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
- if (ret < 0)
- return ret;
-
- domain->context_id = ret;
-
- domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
- domain);
- if (!domain->iop) {
- ipmmu_domain_free_context(domain->mmu->root,
- domain->context_id);
- return -EINVAL;
- }
/* TTBR0 */
ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
@@ -494,7 +459,55 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
*/
ipmmu_ctx_write_all(domain, IMCTR,
IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
+}
+
+static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
+{
+ int ret;
+
+ /*
+ * Allocate the page table operations.
+ *
+ * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
+ * access, Long-descriptor format" that the NStable bit being set in a
+ * table descriptor will result in the NStable and NS bits of all child
+ * entries being ignored and considered as being set. The IPMMU seems
+ * not to comply with this, as it generates a secure access page fault
+ * if any of the NStable and NS bits isn't set when running in
+ * non-secure mode.
+ */
+ domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
+ domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
+ domain->cfg.ias = 32;
+ domain->cfg.oas = 40;
+ domain->cfg.tlb = &ipmmu_gather_ops;
+ domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ domain->io_domain.geometry.force_aperture = true;
+ /*
+ * TODO: Add support for coherent walk through CCI with DVM and remove
+ * cache handling. For now, delegate it to the io-pgtable code.
+ */
+ domain->cfg.coherent_walk = false;
+ domain->cfg.iommu_dev = domain->mmu->root->dev;
+
+ /*
+ * Find an unused context.
+ */
+ ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
+ if (ret < 0)
+ return ret;
+
+ domain->context_id = ret;
+
+ domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
+ domain);
+ if (!domain->iop) {
+ ipmmu_domain_free_context(domain->mmu->root,
+ domain->context_id);
+ return -EINVAL;
+ }
+ ipmmu_domain_setup_context(domain);
return 0;
}
@@ -522,14 +535,16 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
{
const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
struct ipmmu_vmsa_device *mmu = domain->mmu;
+ unsigned long iova;
u32 status;
- u32 iova;
status = ipmmu_ctx_read_root(domain, IMSTR);
if (!(status & err_mask))
return IRQ_NONE;
- iova = ipmmu_ctx_read_root(domain, IMEAR);
+ iova = ipmmu_ctx_read_root(domain, IMELAR);
+ if (IS_ENABLED(CONFIG_64BIT))
+ iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
/*
* Clear the error status flags. Unlike traditional interrupt flag
@@ -541,10 +556,10 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
/* Log fatal errors. */
if (status & IMSTR_MHIT)
- dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
+ dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
iova);
if (status & IMSTR_ABORT)
- dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
+ dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
iova);
if (!(status & (IMSTR_PF | IMSTR_TF)))
@@ -560,7 +575,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
return IRQ_HANDLED;
dev_err_ratelimited(mmu->dev,
- "Unhandled fault: status 0x%08x iova 0x%08x\n",
+ "Unhandled fault: status 0x%08x iova 0x%lx\n",
status, iova);
return IRQ_HANDLED;
@@ -885,27 +900,37 @@ error:
static int ipmmu_add_device(struct device *dev)
{
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct iommu_group *group;
+ int ret;
/*
* Only let through devices that have been verified in xlate()
*/
- if (!to_ipmmu(dev))
+ if (!mmu)
return -ENODEV;
- if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
- return ipmmu_init_arm_mapping(dev);
+ if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) {
+ ret = ipmmu_init_arm_mapping(dev);
+ if (ret)
+ return ret;
+ } else {
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
- group = iommu_group_get_for_dev(dev);
- if (IS_ERR(group))
- return PTR_ERR(group);
+ iommu_group_put(group);
+ }
- iommu_group_put(group);
+ iommu_device_link(&mmu->iommu, dev);
return 0;
}
static void ipmmu_remove_device(struct device *dev)
{
+ struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
+
+ iommu_device_unlink(&mmu->iommu, dev);
arm_iommu_detach_device(dev);
iommu_group_remove_device(dev);
}
@@ -959,6 +984,7 @@ static const struct ipmmu_features ipmmu_features_default = {
.use_ns_alias_offset = true,
.has_cache_leaf_nodes = false,
.number_of_contexts = 1, /* software only tested with one context */
+ .num_utlbs = 32,
.setup_imbuscr = true,
.twobit_imttbcr_sl0 = false,
.reserved_context = false,
@@ -968,6 +994,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.use_ns_alias_offset = false,
.has_cache_leaf_nodes = true,
.number_of_contexts = 8,
+ .num_utlbs = 48,
.setup_imbuscr = false,
.twobit_imttbcr_sl0 = true,
.reserved_context = true,
@@ -1020,10 +1047,10 @@ static int ipmmu_probe(struct platform_device *pdev)
}
mmu->dev = &pdev->dev;
- mmu->num_utlbs = 48;
spin_lock_init(&mmu->lock);
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
mmu->features = of_device_get_match_data(&pdev->dev);
+ memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
/* Map I/O memory and request IRQ. */
@@ -1047,8 +1074,7 @@ static int ipmmu_probe(struct platform_device *pdev)
if (mmu->features->use_ns_alias_offset)
mmu->base += IM_NS_ALIAS_OFFSET;
- mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
- mmu->features->number_of_contexts);
+ mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
irq = platform_get_irq(pdev, 0);
@@ -1140,10 +1166,48 @@ static int ipmmu_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int ipmmu_resume_noirq(struct device *dev)
+{
+ struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
+ unsigned int i;
+
+ /* Reset root MMU and restore contexts */
+ if (ipmmu_is_root(mmu)) {
+ ipmmu_device_reset(mmu);
+
+ for (i = 0; i < mmu->num_ctx; i++) {
+ if (!mmu->domains[i])
+ continue;
+
+ ipmmu_domain_setup_context(mmu->domains[i]);
+ }
+ }
+
+ /* Re-enable active micro-TLBs */
+ for (i = 0; i < mmu->features->num_utlbs; i++) {
+ if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
+ continue;
+
+ ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops ipmmu_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
+};
+#define DEV_PM_OPS &ipmmu_pm
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
.of_match_table = of_match_ptr(ipmmu_of_ids),
+ .pm = DEV_PM_OPS,
},
.probe = ipmmu_probe,
.remove = ipmmu_remove,
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 4abc0ef522a8..8e19bfa94121 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* omap iommu: debugfs interface
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/err.h>
@@ -239,17 +236,6 @@ DEBUG_FOPS_RO(regs);
DEFINE_SHOW_ATTRIBUTE(tlb);
DEFINE_SHOW_ATTRIBUTE(pagetable);
-#define __DEBUG_ADD_FILE(attr, mode) \
- { \
- struct dentry *dent; \
- dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
- obj, &attr##_fops); \
- if (!dent) \
- goto err; \
- }
-
-#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
-
void omap_iommu_debugfs_add(struct omap_iommu *obj)
{
struct dentry *d;
@@ -257,23 +243,13 @@ void omap_iommu_debugfs_add(struct omap_iommu *obj)
if (!iommu_debug_root)
return;
- obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
- if (!obj->debug_dir)
- return;
-
- d = debugfs_create_u32("nr_tlb_entries", 0400, obj->debug_dir,
- &obj->nr_tlb_entries);
- if (!d)
- return;
-
- DEBUG_ADD_FILE_RO(regs);
- DEBUG_ADD_FILE_RO(tlb);
- DEBUG_ADD_FILE_RO(pagetable);
+ d = debugfs_create_dir(obj->name, iommu_debug_root);
+ obj->debug_dir = d;
- return;
-
-err:
- debugfs_remove_recursive(obj->debug_dir);
+ debugfs_create_u32("nr_tlb_entries", 0400, d, &obj->nr_tlb_entries);
+ debugfs_create_file("regs", 0400, d, obj, &regs_fops);
+ debugfs_create_file("tlb", 0400, d, obj, &tlb_fops);
+ debugfs_create_file("pagetable", 0400, d, obj, &pagetable_fops);
}
void omap_iommu_debugfs_remove(struct omap_iommu *obj)
@@ -287,8 +263,6 @@ void omap_iommu_debugfs_remove(struct omap_iommu *obj)
void __init omap_iommu_debugfs_init(void)
{
iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
- if (!iommu_debug_root)
- pr_err("can't create debugfs dir\n");
}
void __exit omap_iommu_debugfs_exit(void)
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index d2fb347aa4ff..dfb961d8c21b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* omap iommu: tlb and pagetable primitives
*
@@ -6,10 +7,6 @@
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
* Paul Mundt and Toshihiro Kobayashi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/dma-mapping.h>
@@ -38,8 +35,7 @@
static const struct iommu_ops omap_iommu_ops;
-#define to_iommu(dev) \
- ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
+#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
/* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 1703159ef5af..09968a02d291 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* omap iommu: main structures
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _OMAP_IOMMU_H
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
index 01a315227bf0..1a4adb59a859 100644
--- a/drivers/iommu/omap-iopgtable.h
+++ b/drivers/iommu/omap-iopgtable.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* omap iommu: pagetable definitions
*
* Copyright (C) 2008-2010 Nokia Corporation
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _OMAP_IOPGTABLE_H
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 8cdd3f059513..34d0b9783b3e 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* IOMMU API for QCOM secure IOMMUs. Somewhat based on arm-smmu.c
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Copyright (C) 2013 ARM Limited
* Copyright (C) 2017 Red Hat
*/
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 77d4bd93fe4b..dc26d74d79c2 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* IOMMU API for Rockchip
*
* Module Authors: Simon Xue <xxm@rock-chips.com>
* Daniel Kurtz <djkurtz@chromium.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 463ee08f7d3a..c4a652b227f8 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>