summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-12-02 20:05:00 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2019-12-02 20:05:00 +0100
commit1daa56bcfd8b329447e0c1b1e91c3925d08489b7 (patch)
tree1adc9bc2f832de5761348eb0808daeec6bd97adc
parentMerge tag 'dmaengine-5.5-rc1' of git://git.infradead.org/users/vkoul/slave-dma (diff)
parentMerge branches 'iommu/fixes', 'arm/qcom', 'arm/renesas', 'arm/rockchip', 'arm... (diff)
downloadlinux-1daa56bcfd8b329447e0c1b1e91c3925d08489b7.tar.xz
linux-1daa56bcfd8b329447e0c1b1e91c3925d08489b7.zip
Merge tag 'iommu-updates-v5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel: - Conversion of the AMD IOMMU driver to use the dma-iommu code for imlementing the DMA-API. This gets rid of quite some code in the driver itself, but also has some potential for regressions (non are known at the moment). - Support for the Qualcomm SMMUv2 implementation in the SDM845 SoC. This also includes some firmware interface changes, but those are acked by the respective maintainers. - Preparatory work to support two distinct page-tables per domain in the ARM-SMMU driver - Power management improvements for the ARM SMMUv2 - Custom PASID allocator support - Multiple PCI DMA alias support for the AMD IOMMU driver - Adaption of the Mediatek driver to the changed IO/TLB flush interface of the IOMMU core code. - Preparatory patches for the Renesas IOMMU driver to support future hardware. * tag 'iommu-updates-v5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (62 commits) iommu/rockchip: Don't provoke WARN for harmless IRQs iommu/vt-d: Turn off translations at shutdown iommu/vt-d: Check VT-d RMRR region in BIOS is reported as reserved iommu/arm-smmu: Remove duplicate error message iommu/arm-smmu-v3: Don't display an error when IRQ lines are missing iommu/ipmmu-vmsa: Add utlb_offset_base iommu/ipmmu-vmsa: Add helper functions for "uTLB" registers iommu/ipmmu-vmsa: Calculate context registers' offset instead of a macro iommu/ipmmu-vmsa: Add helper functions for MMU "context" registers iommu/ipmmu-vmsa: tidyup register definitions iommu/ipmmu-vmsa: Remove all unused register definitions iommu/mediatek: Reduce the tlb flush timeout value iommu/mediatek: Get rid of the pgtlock iommu/mediatek: Move the tlb_sync into tlb_flush iommu/mediatek: Delete the leaf in the tlb_flush iommu/mediatek: Use gather to achieve the tlb range flush iommu/mediatek: Add a new tlb_lock for tlb_flush iommu/mediatek: Correct the flush_iotlb_all callback iommu/io-pgtable-arm: Rename IOMMU_QCOM_SYS_CACHE and improve doc iommu/io-pgtable-arm: Rationalise MAIR handling ...
Diffstat (limited to '')
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt1
-rw-r--r--arch/ia64/include/asm/iommu.h5
-rw-r--r--arch/x86/include/asm/iommu.h18
-rw-r--r--drivers/firmware/qcom_scm-32.c5
-rw-r--r--drivers/firmware/qcom_scm-64.c153
-rw-r--r--drivers/firmware/qcom_scm.c6
-rw-r--r--drivers/firmware/qcom_scm.h5
-rw-r--r--drivers/iommu/Kconfig5
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd_iommu.c893
-rw-r--r--drivers/iommu/amd_iommu_types.h3
-rw-r--r--drivers/iommu/arm-smmu-impl.c5
-rw-r--r--drivers/iommu/arm-smmu-qcom.c51
-rw-r--r--drivers/iommu/arm-smmu-v3.c12
-rw-r--r--drivers/iommu/arm-smmu.c223
-rw-r--r--drivers/iommu/arm-smmu.h16
-rw-r--r--drivers/iommu/dma-iommu.c43
-rw-r--r--drivers/iommu/dmar.c5
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel-iommu.c61
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c15
-rw-r--r--drivers/iommu/io-pgtable-arm.c130
-rw-r--r--drivers/iommu/ioasid.c422
-rw-r--r--drivers/iommu/iommu.c73
-rw-r--r--drivers/iommu/ipmmu-vmsa.c223
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c90
-rw-r--r--drivers/iommu/mtk_iommu.h2
-rw-r--r--drivers/iommu/mtk_iommu_v1.c2
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/qcom_iommu.c10
-rw-r--r--drivers/iommu/rockchip-iommu.c11
-rw-r--r--drivers/iommu/s390-iommu.c2
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/tegra-smmu.c38
-rw-r--r--drivers/iommu/virtio-iommu.c5
-rw-r--r--drivers/memory/mtk-smi.c4
-rw-r--r--include/linux/dmar.h2
-rw-r--r--include/linux/io-pgtable.h2
-rw-r--r--include/linux/ioasid.h76
-rw-r--r--include/linux/iommu.h65
-rw-r--r--include/linux/qcom_scm.h2
-rw-r--r--include/uapi/linux/iommu.h169
43 files changed, 1651 insertions, 1213 deletions
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
index b6bfbec3a849..020d6f226efb 100644
--- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
@@ -15,6 +15,7 @@ Required Properties:
- "renesas,ipmmu-r8a7744" for the R8A7744 (RZ/G1N) IPMMU.
- "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU.
- "renesas,ipmmu-r8a774a1" for the R8A774A1 (RZ/G2M) IPMMU.
+ - "renesas,ipmmu-r8a774b1" for the R8A774B1 (RZ/G2N) IPMMU.
- "renesas,ipmmu-r8a774c0" for the R8A774C0 (RZ/G2E) IPMMU.
- "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU.
- "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU.
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index 7904f591a79b..eb0db20c9d4c 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -2,6 +2,8 @@
#ifndef _ASM_IA64_IOMMU_H
#define _ASM_IA64_IOMMU_H 1
+#include <linux/acpi.h>
+
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
@@ -9,6 +11,9 @@ extern void no_iommu_init(void);
#ifdef CONFIG_INTEL_IOMMU
extern int force_iommu, no_iommu;
extern int iommu_detected;
+
+static inline int __init
+arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { return 0; }
#else
#define no_iommu (1)
#define iommu_detected (0)
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index b91623d521d9..bf1ed2ddc74b 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -2,10 +2,28 @@
#ifndef _ASM_X86_IOMMU_H
#define _ASM_X86_IOMMU_H
+#include <linux/acpi.h>
+
+#include <asm/e820/api.h>
+
extern int force_iommu, no_iommu;
extern int iommu_detected;
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
+static inline int __init
+arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
+{
+ u64 start = rmrr->base_address;
+ u64 end = rmrr->end_address + 1;
+
+ if (e820__mapped_all(start, end, E820_TYPE_RESERVED))
+ return 0;
+
+ pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
+ start, end - 1);
+ return -EINVAL;
+}
+
#endif /* _ASM_X86_IOMMU_H */
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 215061c581e1..bee8729525ec 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -614,3 +614,8 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
addr, val);
}
+
+int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable)
+{
+ return -ENODEV;
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index 91d5ad7cf58b..e1cd933ea9ae 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -62,32 +62,72 @@ static DEFINE_MUTEX(qcom_scm_lock);
#define FIRST_EXT_ARG_IDX 3
#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
-/**
- * qcom_scm_call() - Invoke a syscall in the secure world
- * @dev: device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @desc: Descriptor structure containing arguments and return values
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- * This should *only* be called in pre-emptible context.
-*/
-static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
- const struct qcom_scm_desc *desc,
- struct arm_smccc_res *res)
+static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, u32 fn_id,
+ u64 x5, u32 type)
+{
+ u64 cmd;
+ struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
+
+ cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
+ ARM_SMCCC_OWNER_SIP, fn_id);
+
+ quirk.state.a6 = 0;
+
+ do {
+ arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2], x5,
+ quirk.state.a6, 0, res, &quirk);
+
+ if (res->a0 == QCOM_SCM_INTERRUPTED)
+ cmd = res->a0;
+
+ } while (res->a0 == QCOM_SCM_INTERRUPTED);
+}
+
+static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, u32 fn_id,
+ u64 x5, bool atomic)
+{
+ int retry_count = 0;
+
+ if (atomic) {
+ __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
+ return;
+ }
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ __qcom_scm_call_do(desc, res, fn_id, x5,
+ ARM_SMCCC_STD_CALL);
+
+ mutex_unlock(&qcom_scm_lock);
+
+ if (res->a0 == QCOM_SCM_V2_EBUSY) {
+ if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
+ break;
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ }
+ } while (res->a0 == QCOM_SCM_V2_EBUSY);
+}
+
+static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
- int retry_count = 0, i;
+ int i;
u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
- u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX];
+ u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
- struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
+ gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
if (unlikely(arglen > N_REGISTER_ARGS)) {
alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
- args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
+ args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
if (!args_virt)
return -ENOMEM;
@@ -117,46 +157,56 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
x5 = args_phys;
}
- do {
- mutex_lock(&qcom_scm_lock);
-
- cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
- qcom_smccc_convention,
- ARM_SMCCC_OWNER_SIP, fn_id);
-
- quirk.state.a6 = 0;
-
- do {
- arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
- desc->args[1], desc->args[2], x5,
- quirk.state.a6, 0, res, &quirk);
-
- if (res->a0 == QCOM_SCM_INTERRUPTED)
- cmd = res->a0;
-
- } while (res->a0 == QCOM_SCM_INTERRUPTED);
-
- mutex_unlock(&qcom_scm_lock);
-
- if (res->a0 == QCOM_SCM_V2_EBUSY) {
- if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
- break;
- msleep(QCOM_SCM_EBUSY_WAIT_MS);
- }
- } while (res->a0 == QCOM_SCM_V2_EBUSY);
+ qcom_scm_call_do(desc, res, fn_id, x5, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
- if (res->a0 < 0)
+ if ((long)res->a0 < 0)
return qcom_scm_remap_error(res->a0);
return 0;
}
/**
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ */
+static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ might_sleep();
+ return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
+}
+
+/**
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
+ * @dev: device
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @desc: Descriptor structure containing arguments and return values
+ * @res: Structure containing results from SMC/HVC call
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This can be called in atomic context.
+ */
+static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
+ const struct qcom_scm_desc *desc,
+ struct arm_smccc_res *res)
+{
+ return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
+}
+
+/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
* @cpus: The cpumask of cpus that will use the entry point
@@ -502,3 +552,16 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
&desc, &res);
}
+
+int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
+{
+ struct qcom_scm_desc desc = {0};
+ struct arm_smccc_res res;
+
+ desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
+ desc.args[1] = en;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
+ QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
+}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 4802ab170fe5..a729e05c21b8 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -345,6 +345,12 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+{
+ return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en);
+}
+EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
+
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{
return __qcom_scm_io_readl(__scm->dev, addr, val);
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 99506bd873c0..baee744dbcfe 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -91,10 +91,15 @@ extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare);
#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3
#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4
+#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
+#define QCOM_SCM_CONFIG_ERRATA1 0x3
+#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size);
extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
u32 size, u32 spare);
+extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev,
+ bool enable);
#define QCOM_MEM_PROT_ASSIGN_ID 0x16
extern int __qcom_scm_assign_mem(struct device *dev,
phys_addr_t mem_region, size_t mem_sz,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index f1086eaed41c..c0b871229d81 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -3,6 +3,10 @@
config IOMMU_IOVA
tristate
+# The IOASID library may also be used by non-IOMMU_API users
+config IOASID
+ tristate
+
# IOMMU_API always gets selected by whoever wants it.
config IOMMU_API
bool
@@ -138,6 +142,7 @@ config AMD_IOMMU
select PCI_PASID
select IOMMU_API
select IOMMU_IOVA
+ select IOMMU_DMA
depends on X86_64 && PCI && ACPI
---help---
With this option you can enable support for AMD IOMMU hardware in
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 4f405f926e73..97814cc861ea 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -7,13 +7,14 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+obj-$(CONFIG_IOASID) += ioasid.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 12e5039a7a25..bd25674ee4db 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -20,6 +20,7 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dma-direct.h>
+#include <linux/dma-iommu.h>
#include <linux/iommu-helper.h>
#include <linux/iommu.h>
#include <linux/delay.h>
@@ -88,8 +89,6 @@ const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
-static const struct dma_map_ops amd_iommu_dma_ops;
-
/*
* general struct to manage commands send to an IOMMU
*/
@@ -102,21 +101,6 @@ struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
-static void iova_domain_flush_tlb(struct iova_domain *iovad);
-
-/*
- * Data container for a dma_ops specific protection domain
- */
-struct dma_ops_domain {
- /* generic protection domain information */
- struct protection_domain domain;
-
- /* IOVA RB-Tree */
- struct iova_domain iovad;
-};
-
-static struct iova_domain reserved_iova_ranges;
-static struct lock_class_key reserved_rbtree_key;
/****************************************************************************
*
@@ -167,12 +151,6 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
return container_of(dom, struct protection_domain, domain);
}
-static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain)
-{
- BUG_ON(domain->flags != PD_DMA_OPS_MASK);
- return container_of(domain, struct dma_ops_domain, domain);
-}
-
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -206,71 +184,61 @@ static struct iommu_dev_data *search_dev_data(u16 devid)
return NULL;
}
-static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
- *(u16 *)data = alias;
- return 0;
-}
-
-static u16 get_alias(struct device *dev)
+static int clone_alias(struct pci_dev *pdev, u16 alias, void *data)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- u16 devid, ivrs_alias, pci_alias;
-
- /* The callers make sure that get_device_id() does not fail here */
- devid = get_device_id(dev);
+ u16 devid = pci_dev_id(pdev);
- /* For ACPI HID devices, we simply return the devid as such */
- if (!dev_is_pci(dev))
- return devid;
+ if (devid == alias)
+ return 0;
- ivrs_alias = amd_iommu_alias_table[devid];
+ amd_iommu_rlookup_table[alias] =
+ amd_iommu_rlookup_table[devid];
+ memcpy(amd_iommu_dev_table[alias].data,
+ amd_iommu_dev_table[devid].data,
+ sizeof(amd_iommu_dev_table[alias].data));
- pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+ return 0;
+}
- if (ivrs_alias == pci_alias)
- return ivrs_alias;
+static void clone_aliases(struct pci_dev *pdev)
+{
+ if (!pdev)
+ return;
/*
- * DMA alias showdown
- *
- * The IVRS is fairly reliable in telling us about aliases, but it
- * can't know about every screwy device. If we don't have an IVRS
- * reported alias, use the PCI reported alias. In that case we may
- * still need to initialize the rlookup and dev_table entries if the
- * alias is to a non-existent device.
+ * The IVRS alias stored in the alias table may not be
+ * part of the PCI DMA aliases if it's bus differs
+ * from the original device.
*/
- if (ivrs_alias == devid) {
- if (!amd_iommu_rlookup_table[pci_alias]) {
- amd_iommu_rlookup_table[pci_alias] =
- amd_iommu_rlookup_table[devid];
- memcpy(amd_iommu_dev_table[pci_alias].data,
- amd_iommu_dev_table[devid].data,
- sizeof(amd_iommu_dev_table[pci_alias].data));
- }
+ clone_alias(pdev, amd_iommu_alias_table[pci_dev_id(pdev)], NULL);
- return pci_alias;
- }
+ pci_for_each_dma_alias(pdev, clone_alias, NULL);
+}
- pci_info(pdev, "Using IVRS reported alias %02x:%02x.%d "
- "for device [%04x:%04x], kernel reported alias "
- "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
- PCI_FUNC(ivrs_alias), pdev->vendor, pdev->device,
- PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
- PCI_FUNC(pci_alias));
+static struct pci_dev *setup_aliases(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 ivrs_alias;
+
+ /* For ACPI HID devices, there are no aliases */
+ if (!dev_is_pci(dev))
+ return NULL;
/*
- * If we don't have a PCI DMA alias and the IVRS alias is on the same
- * bus, then the IVRS table may know about a quirk that we don't.
+ * Add the IVRS alias to the pci aliases if it is on the same
+ * bus. The IVRS table may know about a quirk that we don't.
*/
- if (pci_alias == devid &&
+ ivrs_alias = amd_iommu_alias_table[pci_dev_id(pdev)];
+ if (ivrs_alias != pci_dev_id(pdev) &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
pci_add_dma_alias(pdev, ivrs_alias & 0xff);
pci_info(pdev, "Added PCI DMA alias %02x.%d\n",
PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias));
}
- return ivrs_alias;
+ clone_aliases(pdev);
+
+ return pdev;
}
static struct iommu_dev_data *find_dev_data(u16 devid)
@@ -408,7 +376,7 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
- dev_data->alias = get_alias(dev);
+ dev_data->pdev = setup_aliases(dev);
/*
* By default we use passthrough mode for IOMMUv2 capable device.
@@ -433,20 +401,16 @@ static int iommu_init_device(struct device *dev)
static void iommu_ignore_device(struct device *dev)
{
- u16 alias;
int devid;
devid = get_device_id(dev);
if (devid < 0)
return;
- alias = get_alias(dev);
-
+ amd_iommu_rlookup_table[devid] = NULL;
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
- memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
- amd_iommu_rlookup_table[devid] = NULL;
- amd_iommu_rlookup_table[alias] = NULL;
+ setup_aliases(dev);
}
static void iommu_uninit_device(struct device *dev)
@@ -620,8 +584,7 @@ retry:
pasid, address, flags);
break;
case EVENT_TYPE_INV_PPR_REQ:
- pasid = ((event[0] >> 16) & 0xFFFF)
- | ((event[1] << 6) & 0xF0000);
+ pasid = PPR_PASID(*((u64 *)__evt));
tag = event[1] & 0x03FF;
dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
@@ -856,17 +819,18 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
struct iommu_cmd *cmd)
{
u8 *target;
-
- target = iommu->cmd_buf + iommu->cmd_buf_tail;
-
- iommu->cmd_buf_tail += sizeof(*cmd);
- iommu->cmd_buf_tail %= CMD_BUFFER_SIZE;
+ u32 tail;
/* Copy command to buffer */
+ tail = iommu->cmd_buf_tail;
+ target = iommu->cmd_buf + tail;
memcpy(target, cmd, sizeof(*cmd));
+ tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
+ iommu->cmd_buf_tail = tail;
+
/* Tell the IOMMU about it */
- writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+ writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
}
static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
@@ -1216,6 +1180,13 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
return iommu_queue_command(iommu, &cmd);
}
+static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct amd_iommu *iommu = data;
+
+ return iommu_flush_dte(iommu, alias);
+}
+
/*
* Command send function for invalidating a device table entry
*/
@@ -1226,14 +1197,22 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
int ret;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
- ret = iommu_flush_dte(iommu, dev_data->devid);
- if (!ret && alias != dev_data->devid)
- ret = iommu_flush_dte(iommu, alias);
+ if (dev_data->pdev)
+ ret = pci_for_each_dma_alias(dev_data->pdev,
+ device_flush_dte_alias, iommu);
+ else
+ ret = iommu_flush_dte(iommu, dev_data->devid);
if (ret)
return ret;
+ alias = amd_iommu_alias_table[dev_data->devid];
+ if (alias != dev_data->devid) {
+ ret = iommu_flush_dte(iommu, alias);
+ if (ret)
+ return ret;
+ }
+
if (dev_data->ats.enabled)
ret = device_flush_iotlb(dev_data, 0, ~0UL);
@@ -1282,12 +1261,6 @@ static void domain_flush_pages(struct protection_domain *domain,
__domain_flush_pages(domain, address, size, 0);
}
-/* Flush the whole IO/TLB for a given protection domain */
-static void domain_flush_tlb(struct protection_domain *domain)
-{
- __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
-}
-
/* Flush the whole IO/TLB for a given protection domain - including PDE */
static void domain_flush_tlb_pde(struct protection_domain *domain)
{
@@ -1735,43 +1708,6 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
/****************************************************************************
*
- * The next functions belong to the address allocator for the dma_ops
- * interface functions.
- *
- ****************************************************************************/
-
-
-static unsigned long dma_ops_alloc_iova(struct device *dev,
- struct dma_ops_domain *dma_dom,
- unsigned int pages, u64 dma_mask)
-{
- unsigned long pfn = 0;
-
- pages = __roundup_pow_of_two(pages);
-
- if (dma_mask > DMA_BIT_MASK(32))
- pfn = alloc_iova_fast(&dma_dom->iovad, pages,
- IOVA_PFN(DMA_BIT_MASK(32)), false);
-
- if (!pfn)
- pfn = alloc_iova_fast(&dma_dom->iovad, pages,
- IOVA_PFN(dma_mask), true);
-
- return (pfn << PAGE_SHIFT);
-}
-
-static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
- unsigned long address,
- unsigned int pages)
-{
- pages = __roundup_pow_of_two(pages);
- address >>= PAGE_SHIFT;
-
- free_iova_fast(&dma_dom->iovad, address, pages);
-}
-
-/****************************************************************************
- *
* The next functions belong to the domain allocation. A domain is
* allocated for every IOMMU as the default domain. If device isolation
* is enabled, every device get its own domain. The most important thing
@@ -1846,42 +1782,23 @@ static void free_gcr3_table(struct protection_domain *domain)
free_page((unsigned long)domain->gcr3_tbl);
}
-static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dom->domain.lock, flags);
- domain_flush_tlb(&dom->domain);
- domain_flush_complete(&dom->domain);
- spin_unlock_irqrestore(&dom->domain.lock, flags);
-}
-
-static void iova_domain_flush_tlb(struct iova_domain *iovad)
-{
- struct dma_ops_domain *dom;
-
- dom = container_of(iovad, struct dma_ops_domain, iovad);
-
- dma_ops_domain_flush_tlb(dom);
-}
-
/*
* Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table
*/
-static void dma_ops_domain_free(struct dma_ops_domain *dom)
+static void dma_ops_domain_free(struct protection_domain *domain)
{
- if (!dom)
+ if (!domain)
return;
- put_iova_domain(&dom->iovad);
+ iommu_put_dma_cookie(&domain->domain);
- free_pagetable(&dom->domain);
+ free_pagetable(domain);
- if (dom->domain.id)
- domain_id_free(dom->domain.id);
+ if (domain->id)
+ domain_id_free(domain->id);
- kfree(dom);
+ kfree(domain);
}
/*
@@ -1889,35 +1806,30 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
* It also initializes the page table and the address allocator data
* structures required for the dma_ops interface
*/
-static struct dma_ops_domain *dma_ops_domain_alloc(void)
+static struct protection_domain *dma_ops_domain_alloc(void)
{
- struct dma_ops_domain *dma_dom;
+ struct protection_domain *domain;
- dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
- if (!dma_dom)
+ domain = kzalloc(sizeof(struct protection_domain), GFP_KERNEL);
+ if (!domain)
return NULL;
- if (protection_domain_init(&dma_dom->domain))
- goto free_dma_dom;
-
- dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
- dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
- dma_dom->domain.flags = PD_DMA_OPS_MASK;
- if (!dma_dom->domain.pt_root)
- goto free_dma_dom;
-
- init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
+ if (protection_domain_init(domain))
+ goto free_domain;
- if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
- goto free_dma_dom;
+ domain->mode = PAGE_MODE_3_LEVEL;
+ domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
+ domain->flags = PD_DMA_OPS_MASK;
+ if (!domain->pt_root)
+ goto free_domain;
- /* Initialize reserved ranges */
- copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
+ if (iommu_get_dma_cookie(&domain->domain) == -ENOMEM)
+ goto free_domain;
- return dma_dom;
+ return domain;
-free_dma_dom:
- dma_ops_domain_free(dma_dom);
+free_domain:
+ dma_ops_domain_free(domain);
return NULL;
}
@@ -2015,11 +1927,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
struct protection_domain *domain)
{
struct amd_iommu *iommu;
- u16 alias;
bool ats;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
ats = dev_data->ats.enabled;
/* Update data structures */
@@ -2032,8 +1942,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
/* Update device table */
set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
- if (alias != dev_data->devid)
- set_dte_entry(alias, domain, ats, dev_data->iommu_v2);
+ clone_aliases(dev_data->pdev);
device_flush_dte(dev_data);
}
@@ -2042,17 +1951,14 @@ static void do_detach(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
- u16 alias;
iommu = amd_iommu_rlookup_table[dev_data->devid];
- alias = dev_data->alias;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
clear_dte_entry(dev_data->devid);
- if (alias != dev_data->devid)
- clear_dte_entry(alias);
+ clone_aliases(dev_data->pdev);
/* Flush the DTE entry */
device_flush_dte(dev_data);
@@ -2285,8 +2191,8 @@ static int amd_iommu_add_device(struct device *dev)
domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true;
- else
- dev->dma_ops = &amd_iommu_dma_ops;
+ else if (domain->type == IOMMU_DOMAIN_DMA)
+ iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
out:
iommu_completion_wait(iommu);
@@ -2320,43 +2226,32 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
return acpihid_device_group(dev);
}
+static int amd_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ return -ENODEV;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = !amd_iommu_unmap_flush;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+}
+
/*****************************************************************************
*
* The next functions belong to the dma_ops mapping/unmapping code.
*
*****************************************************************************/
-/*
- * In the dma_ops path we only have the struct device. This function
- * finds the corresponding IOMMU, the protection domain and the
- * requestor id for a given device.
- * If the device is not yet associated with a domain this is also done
- * in this function.
- */
-static struct protection_domain *get_domain(struct device *dev)
-{
- struct protection_domain *domain;
- struct iommu_domain *io_domain;
-
- if (!check_device(dev))
- return ERR_PTR(-EINVAL);
-
- domain = get_dev_data(dev)->domain;
- if (domain == NULL && get_dev_data(dev)->defer_attach) {
- get_dev_data(dev)->defer_attach = false;
- io_domain = iommu_get_domain_for_dev(dev);
- domain = to_pdomain(io_domain);
- attach_device(dev, domain);
- }
- if (domain == NULL)
- return ERR_PTR(-EBUSY);
-
- if (!dma_ops_domain(domain))
- return ERR_PTR(-EBUSY);
-
- return domain;
-}
-
static void update_device_table(struct protection_domain *domain)
{
struct iommu_dev_data *dev_data;
@@ -2364,13 +2259,7 @@ static void update_device_table(struct protection_domain *domain)
list_for_each_entry(dev_data, &domain->dev_list, list) {
set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
dev_data->iommu_v2);
-
- if (dev_data->devid == dev_data->alias)
- continue;
-
- /* There is an alias, update device table entry for it */
- set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled,
- dev_data->iommu_v2);
+ clone_aliases(dev_data->pdev);
}
}
@@ -2382,458 +2271,6 @@ static void update_domain(struct protection_domain *domain)
domain_flush_tlb_pde(domain);
}
-static int dir2prot(enum dma_data_direction direction)
-{
- if (direction == DMA_TO_DEVICE)
- return IOMMU_PROT_IR;
- else if (direction == DMA_FROM_DEVICE)
- return IOMMU_PROT_IW;
- else if (direction == DMA_BIDIRECTIONAL)
- return IOMMU_PROT_IW | IOMMU_PROT_IR;
- else
- return 0;
-}
-
-/*
- * This function contains common code for mapping of a physically
- * contiguous memory region into DMA address space. It is used by all
- * mapping functions provided with this IOMMU driver.
- * Must be called with the domain lock held.
- */
-static dma_addr_t __map_single(struct device *dev,
- struct dma_ops_domain *dma_dom,
- phys_addr_t paddr,
- size_t size,
- enum dma_data_direction direction,
- u64 dma_mask)
-{
- dma_addr_t offset = paddr & ~PAGE_MASK;
- dma_addr_t address, start, ret;
- unsigned long flags;
- unsigned int pages;
- int prot = 0;
- int i;
-
- pages = iommu_num_pages(paddr, size, PAGE_SIZE);
- paddr &= PAGE_MASK;
-
- address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
- if (!address)
- goto out;
-
- prot = dir2prot(direction);
-
- start = address;
- for (i = 0; i < pages; ++i) {
- ret = iommu_map_page(&dma_dom->domain, start, paddr,
- PAGE_SIZE, prot, GFP_ATOMIC);
- if (ret)
- goto out_unmap;
-
- paddr += PAGE_SIZE;
- start += PAGE_SIZE;
- }
- address += offset;
-
- domain_flush_np_cache(&dma_dom->domain, address, size);
-
-out:
- return address;
-
-out_unmap:
-
- for (--i; i >= 0; --i) {
- start -= PAGE_SIZE;
- iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
- }
-
- spin_lock_irqsave(&dma_dom->domain.lock, flags);
- domain_flush_tlb(&dma_dom->domain);
- domain_flush_complete(&dma_dom->domain);
- spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
-
- dma_ops_free_iova(dma_dom, address, pages);
-
- return DMA_MAPPING_ERROR;
-}
-
-/*
- * Does the reverse of the __map_single function. Must be called with
- * the domain lock held too
- */
-static void __unmap_single(struct dma_ops_domain *dma_dom,
- dma_addr_t dma_addr,
- size_t size,
- int dir)
-{
- dma_addr_t i, start;
- unsigned int pages;
-
- pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
- dma_addr &= PAGE_MASK;
- start = dma_addr;
-
- for (i = 0; i < pages; ++i) {
- iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
- start += PAGE_SIZE;
- }
-
- if (amd_iommu_unmap_flush) {
- unsigned long flags;
-
- spin_lock_irqsave(&dma_dom->domain.lock, flags);
- domain_flush_tlb(&dma_dom->domain);
- domain_flush_complete(&dma_dom->domain);
- spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
- dma_ops_free_iova(dma_dom, dma_addr, pages);
- } else {
- pages = __roundup_pow_of_two(pages);
- queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);
- }
-}
-
-/*
- * The exported map_single function for dma_ops.
- */
-static dma_addr_t map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- phys_addr_t paddr = page_to_phys(page) + offset;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- u64 dma_mask;
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL)
- return (dma_addr_t)paddr;
- else if (IS_ERR(domain))
- return DMA_MAPPING_ERROR;
-
- dma_mask = *dev->dma_mask;
- dma_dom = to_dma_ops_domain(domain);
-
- return __map_single(dev, dma_dom, paddr, size, dir, dma_mask);
-}
-
-/*
- * The exported unmap_single function for dma_ops.
- */
-static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return;
-
- dma_dom = to_dma_ops_domain(domain);
-
- __unmap_single(dma_dom, dma_addr, size, dir);
-}
-
-static int sg_num_pages(struct device *dev,
- struct scatterlist *sglist,
- int nelems)
-{
- unsigned long mask, boundary_size;
- struct scatterlist *s;
- int i, npages = 0;
-
- mask = dma_get_seg_boundary(dev);
- boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
- 1UL << (BITS_PER_LONG - PAGE_SHIFT);
-
- for_each_sg(sglist, s, nelems, i) {
- int p, n;
-
- s->dma_address = npages << PAGE_SHIFT;
- p = npages % boundary_size;
- n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
- if (p + n > boundary_size)
- npages += boundary_size - p;
- npages += n;
- }
-
- return npages;
-}
-
-/*
- * The exported map_sg function for dma_ops (handles scatter-gather
- * lists).
- */
-static int map_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction,
- unsigned long attrs)
-{
- int mapped_pages = 0, npages = 0, prot = 0, i;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct scatterlist *s;
- unsigned long address;
- u64 dma_mask;
- int ret;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return 0;
-
- dma_dom = to_dma_ops_domain(domain);
- dma_mask = *dev->dma_mask;
-
- npages = sg_num_pages(dev, sglist, nelems);
-
- address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
- if (!address)
- goto out_err;
-
- prot = dir2prot(direction);
-
- /* Map all sg entries */
- for_each_sg(sglist, s, nelems, i) {
- int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
-
- for (j = 0; j < pages; ++j) {
- unsigned long bus_addr, phys_addr;
-
- bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
- phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
- ret = iommu_map_page(domain, bus_addr, phys_addr,
- PAGE_SIZE, prot,
- GFP_ATOMIC | __GFP_NOWARN);
- if (ret)
- goto out_unmap;
-
- mapped_pages += 1;
- }
- }
-
- /* Everything is mapped - write the right values into s->dma_address */
- for_each_sg(sglist, s, nelems, i) {
- /*
- * Add in the remaining piece of the scatter-gather offset that
- * was masked out when we were determining the physical address
- * via (sg_phys(s) & PAGE_MASK) earlier.
- */
- s->dma_address += address + (s->offset & ~PAGE_MASK);
- s->dma_length = s->length;
- }
-
- if (s)
- domain_flush_np_cache(domain, s->dma_address, s->dma_length);
-
- return nelems;
-
-out_unmap:
- dev_err(dev, "IOMMU mapping error in map_sg (io-pages: %d reason: %d)\n",
- npages, ret);
-
- for_each_sg(sglist, s, nelems, i) {
- int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
-
- for (j = 0; j < pages; ++j) {
- unsigned long bus_addr;
-
- bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
- iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
-
- if (--mapped_pages == 0)
- goto out_free_iova;
- }
- }
-
-out_free_iova:
- free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
-
-out_err:
- return 0;
-}
-
-/*
- * The exported map_sg function for dma_ops (handles scatter-gather
- * lists).
- */
-static void unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- unsigned long startaddr;
- int npages;
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- return;
-
- startaddr = sg_dma_address(sglist) & PAGE_MASK;
- dma_dom = to_dma_ops_domain(domain);
- npages = sg_num_pages(dev, sglist, nelems);
-
- __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
-}
-
-/*
- * The exported alloc_coherent function for dma_ops.
- */
-static void *alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag,
- unsigned long attrs)
-{
- u64 dma_mask = dev->coherent_dma_mask;
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct page *page;
-
- domain = get_domain(dev);
- if (PTR_ERR(domain) == -EINVAL) {
- page = alloc_pages(flag, get_order(size));
- *dma_addr = page_to_phys(page);
- return page_address(page);
- } else if (IS_ERR(domain))
- return NULL;
-
- dma_dom = to_dma_ops_domain(domain);
- size = PAGE_ALIGN(size);
- dma_mask = dev->coherent_dma_mask;
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
- flag |= __GFP_ZERO;
-
- page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
- if (!page) {
- if (!gfpflags_allow_blocking(flag))
- return NULL;
-
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flag & __GFP_NOWARN);
- if (!page)
- return NULL;
- }
-
- if (!dma_mask)
- dma_mask = *dev->dma_mask;
-
- *dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
- size, DMA_BIDIRECTIONAL, dma_mask);
-
- if (*dma_addr == DMA_MAPPING_ERROR)
- goto out_free;
-
- return page_address(page);
-
-out_free:
-
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, get_order(size));
-
- return NULL;
-}
-
-/*
- * The exported free_coherent function for dma_ops.
- */
-static void free_coherent(struct device *dev, size_t size,
- void *virt_addr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
- struct page *page;
-
- page = virt_to_page(virt_addr);
- size = PAGE_ALIGN(size);
-
- domain = get_domain(dev);
- if (IS_ERR(domain))
- goto free_mem;
-
- dma_dom = to_dma_ops_domain(domain);
-
- __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
-
-free_mem:
- if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
- __free_pages(page, get_order(size));
-}
-
-/*
- * This function is called by the DMA layer to find out if we can handle a
- * particular device. It is part of the dma_ops.
- */
-static int amd_iommu_dma_supported(struct device *dev, u64 mask)
-{
- if (!dma_direct_supported(dev, mask))
- return 0;
- return check_device(dev);
-}
-
-static const struct dma_map_ops amd_iommu_dma_ops = {
- .alloc = alloc_coherent,
- .free = free_coherent,
- .map_page = map_page,
- .unmap_page = unmap_page,
- .map_sg = map_sg,
- .unmap_sg = unmap_sg,
- .dma_supported = amd_iommu_dma_supported,
- .mmap = dma_common_mmap,
- .get_sgtable = dma_common_get_sgtable,
-};
-
-static int init_reserved_iova_ranges(void)
-{
- struct pci_dev *pdev = NULL;
- struct iova *val;
-
- init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
-
- lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
- &reserved_rbtree_key);
-
- /* MSI memory range */
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
- if (!val) {
- pr_err("Reserving MSI range failed\n");
- return -ENOMEM;
- }
-
- /* HT memory range */
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
- if (!val) {
- pr_err("Reserving HT range failed\n");
- return -ENOMEM;
- }
-
- /*
- * Memory used for PCI resources
- * FIXME: Check whether we can reserve the PCI-hole completly
- */
- for_each_pci_dev(pdev) {
- int i;
-
- for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
- struct resource *r = &pdev->resource[i];
-
- if (!(r->flags & IORESOURCE_MEM))
- continue;
-
- val = reserve_iova(&reserved_iova_ranges,
- IOVA_PFN(r->start),
- IOVA_PFN(r->end));
- if (!val) {
- pci_err(pdev, "Reserve pci-resource range %pR failed\n", r);
- return -ENOMEM;
- }
- }
- }
-
- return 0;
-}
-
int __init amd_iommu_init_api(void)
{
int ret, err = 0;
@@ -2842,10 +2279,6 @@ int __init amd_iommu_init_api(void)
if (ret)
return ret;
- ret = init_reserved_iova_ranges();
- if (ret)
- return ret;
-
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
if (err)
return err;
@@ -2916,7 +2349,6 @@ static void protection_domain_free(struct protection_domain *domain)
static int protection_domain_init(struct protection_domain *domain)
{
spin_lock_init(&domain->lock);
- mutex_init(&domain->api_lock);
domain->id = domain_id_alloc();
if (!domain->id)
return -ENOMEM;
@@ -2947,7 +2379,6 @@ out_err:
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
- struct dma_ops_domain *dma_domain;
switch (type) {
case IOMMU_DOMAIN_UNMANAGED:
@@ -2968,12 +2399,11 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
break;
case IOMMU_DOMAIN_DMA:
- dma_domain = dma_ops_domain_alloc();
- if (!dma_domain) {
+ pdomain = dma_ops_domain_alloc();
+ if (!pdomain) {
pr_err("Failed to allocate\n");
return NULL;
}
- pdomain = &dma_domain->domain;
break;
case IOMMU_DOMAIN_IDENTITY:
pdomain = protection_domain_alloc();
@@ -2992,7 +2422,6 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
- struct dma_ops_domain *dma_dom;
domain = to_pdomain(dom);
@@ -3007,8 +2436,7 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
switch (dom->type) {
case IOMMU_DOMAIN_DMA:
/* Now release the domain */
- dma_dom = to_dma_ops_domain(domain);
- dma_ops_domain_free(dma_dom);
+ dma_ops_domain_free(domain);
break;
default:
if (domain->mode != PAGE_MODE_NONE)
@@ -3064,6 +2492,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
return -EINVAL;
dev_data = dev->archdata.iommu;
+ dev_data->defer_attach = false;
iommu = amd_iommu_rlookup_table[dev_data->devid];
if (!iommu)
@@ -3089,7 +2518,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, size_t page_size, int iommu_prot)
+ phys_addr_t paddr, size_t page_size, int iommu_prot,
+ gfp_t gfp)
{
struct protection_domain *domain = to_pdomain(dom);
int prot = 0;
@@ -3103,9 +2533,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- mutex_lock(&domain->api_lock);
- ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
- mutex_unlock(&domain->api_lock);
+ ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
domain_flush_np_cache(domain, iova, page_size);
@@ -3117,16 +2545,11 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct iommu_iotlb_gather *gather)
{
struct protection_domain *domain = to_pdomain(dom);
- size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
return 0;
- mutex_lock(&domain->api_lock);
- unmap_size = iommu_unmap_page(domain, iova, page_size);
- mutex_unlock(&domain->api_lock);
-
- return unmap_size;
+ return iommu_unmap_page(domain, iova, page_size);
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -3227,19 +2650,6 @@ static void amd_iommu_put_resv_regions(struct device *dev,
kfree(entry);
}
-static void amd_iommu_apply_resv_region(struct device *dev,
- struct iommu_domain *domain,
- struct iommu_resv_region *region)
-{
- struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
- unsigned long start, end;
-
- start = IOVA_PFN(region->start);
- end = IOVA_PFN(region->start + region->length - 1);
-
- WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
-}
-
static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev)
{
@@ -3276,9 +2686,9 @@ const struct iommu_ops amd_iommu_ops = {
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
.device_group = amd_iommu_device_group,
+ .domain_get_attr = amd_iommu_domain_get_attr,
.get_resv_regions = amd_iommu_get_resv_regions,
.put_resv_regions = amd_iommu_put_resv_regions,
- .apply_resv_region = amd_iommu_apply_resv_region,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
@@ -3590,9 +3000,23 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
struct protection_domain *pdomain;
+ struct iommu_domain *io_domain;
+ struct device *dev = &pdev->dev;
+
+ if (!check_device(dev))
+ return NULL;
- pdomain = get_domain(&pdev->dev);
- if (IS_ERR(pdomain))
+ pdomain = get_dev_data(dev)->domain;
+ if (pdomain == NULL && get_dev_data(dev)->defer_attach) {
+ get_dev_data(dev)->defer_attach = false;
+ io_domain = iommu_get_domain_for_dev(dev);
+ pdomain = to_pdomain(io_domain);
+ attach_device(dev, pdomain);
+ }
+ if (pdomain == NULL)
+ return NULL;
+
+ if (!dma_ops_domain(pdomain))
return NULL;
/* Only return IOMMUv2 domains */
@@ -3732,7 +3156,20 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
iommu_flush_dte(iommu, devid);
}
-static struct irq_remap_table *alloc_irq_table(u16 devid)
+static int set_remap_table_entry_alias(struct pci_dev *pdev, u16 alias,
+ void *data)
+{
+ struct irq_remap_table *table = data;
+
+ irq_lookup_table[alias] = table;
+ set_dte_irq_entry(alias, table);
+
+ iommu_flush_dte(amd_iommu_rlookup_table[alias], alias);
+
+ return 0;
+}
+
+static struct irq_remap_table *alloc_irq_table(u16 devid, struct pci_dev *pdev)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
@@ -3778,7 +3215,12 @@ static struct irq_remap_table *alloc_irq_table(u16 devid)
table = new_table;
new_table = NULL;
- set_remap_table_entry(iommu, devid, table);
+ if (pdev)
+ pci_for_each_dma_alias(pdev, set_remap_table_entry_alias,
+ table);
+ else
+ set_remap_table_entry(iommu, devid, table);
+
if (devid != alias)
set_remap_table_entry(iommu, alias, table);
@@ -3795,7 +3237,8 @@ out_unlock:
return table;
}
-static int alloc_irq_index(u16 devid, int count, bool align)
+static int alloc_irq_index(u16 devid, int count, bool align,
+ struct pci_dev *pdev)
{
struct irq_remap_table *table;
int index, c, alignment = 1;
@@ -3805,7 +3248,7 @@ static int alloc_irq_index(u16 devid, int count, bool align)
if (!iommu)
return -ENODEV;
- table = alloc_irq_table(devid);
+ table = alloc_irq_table(devid, pdev);
if (!table)
return -ENODEV;
@@ -4238,7 +3681,7 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
struct irq_remap_table *table;
struct amd_iommu *iommu;
- table = alloc_irq_table(devid);
+ table = alloc_irq_table(devid, NULL);
if (table) {
if (!table->min_index) {
/*
@@ -4255,11 +3698,15 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
} else {
index = -ENOMEM;
}
- } else {
+ } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
+ info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
- index = alloc_irq_index(devid, nr_irqs, align);
+ index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
+ } else {
+ index = alloc_irq_index(devid, nr_irqs, false, NULL);
}
+
if (index < 0) {
pr_warn("Failed to allocate IRTE\n");
ret = index;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 17bd5a349119..f52f59d5c6bd 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -468,7 +468,6 @@ struct protection_domain {
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/
- struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
@@ -639,8 +638,8 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
+ struct pci_dev *pdev;
u16 devid; /* PCI Device ID */
- u16 alias; /* Alias Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Device is identity mapped */
struct {
diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm-smmu-impl.c
index 5c87a38620c4..b2fe72a8f019 100644
--- a/drivers/iommu/arm-smmu-impl.c
+++ b/drivers/iommu/arm-smmu-impl.c
@@ -109,7 +109,7 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm
#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
-static int arm_mmu500_reset(struct arm_smmu_device *smmu)
+int arm_mmu500_reset(struct arm_smmu_device *smmu)
{
u32 reg, major;
int i;
@@ -170,5 +170,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
"calxeda,smmu-secure-config-access"))
smmu->impl = &calxeda_impl;
+ if (of_device_is_compatible(smmu->dev->of_node, "qcom,sdm845-smmu-500"))
+ return qcom_smmu_impl_init(smmu);
+
return smmu;
}
diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c
new file mode 100644
index 000000000000..24c071c1d8b0
--- /dev/null
+++ b/drivers/iommu/arm-smmu-qcom.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/qcom_scm.h>
+
+#include "arm-smmu.h"
+
+struct qcom_smmu {
+ struct arm_smmu_device smmu;
+};
+
+static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
+{
+ int ret;
+
+ arm_mmu500_reset(smmu);
+
+ /*
+ * To address performance degradation in non-real time clients,
+ * such as USB and UFS, turn off wait-for-safe on sdm845 based boards,
+ * such as MTP and db845, whose firmwares implement secure monitor
+ * call handlers to turn on/off the wait-for-safe logic.
+ */
+ ret = qcom_scm_qsmmu500_wait_safe_toggle(0);
+ if (ret)
+ dev_warn(smmu->dev, "Failed to turn off SAFE logic\n");
+
+ return ret;
+}
+
+static const struct arm_smmu_impl qcom_smmu_impl = {
+ .reset = qcom_sdm845_smmu500_reset,
+};
+
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
+{
+ struct qcom_smmu *qsmmu;
+
+ qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL);
+ if (!qsmmu)
+ return ERR_PTR(-ENOMEM);
+
+ qsmmu->smmu = *smmu;
+
+ qsmmu->smmu.impl = &qcom_smmu_impl;
+ devm_kfree(smmu->dev, smmu);
+
+ return &qsmmu->smmu;
+}
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8da93e730d6f..effe72eb89e7 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2172,7 +2172,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
cfg->cd.asid = (u16)asid;
cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
- cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
+ cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
return 0;
out_free_asid:
@@ -2448,7 +2448,7 @@ out_unlock:
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
@@ -3611,19 +3611,19 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
/* Interrupt lines */
- irq = platform_get_irq_byname(pdev, "combined");
+ irq = platform_get_irq_byname_optional(pdev, "combined");
if (irq > 0)
smmu->combined_irq = irq;
else {
- irq = platform_get_irq_byname(pdev, "eventq");
+ irq = platform_get_irq_byname_optional(pdev, "eventq");
if (irq > 0)
smmu->evtq.q.irq = irq;
- irq = platform_get_irq_byname(pdev, "priq");
+ irq = platform_get_irq_byname_optional(pdev, "priq");
if (irq > 0)
smmu->priq.q.irq = irq;
- irq = platform_get_irq_byname(pdev, "gerror");
+ irq = platform_get_irq_byname_optional(pdev, "gerror");
if (irq > 0)
smmu->gerr_irq = irq;
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 7c503a6bc585..4f1a350d9529 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/amba/bus.h>
@@ -122,7 +123,7 @@ static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu)
static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
{
if (pm_runtime_enabled(smmu->dev))
- pm_runtime_put(smmu->dev);
+ pm_runtime_put_autosuspend(smmu->dev);
}
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
@@ -244,6 +245,9 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
unsigned int spin_cnt, delay;
u32 reg;
+ if (smmu->impl && unlikely(smmu->impl->tlb_sync))
+ return smmu->impl->tlb_sync(smmu, page, sync, status);
+
arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
@@ -268,9 +272,8 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}
-static void arm_smmu_tlb_sync_context(void *cookie)
+static void arm_smmu_tlb_sync_context(struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long flags;
@@ -280,13 +283,6 @@ static void arm_smmu_tlb_sync_context(void *cookie)
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
-static void arm_smmu_tlb_sync_vmid(void *cookie)
-{
- struct arm_smmu_domain *smmu_domain = cookie;
-
- arm_smmu_tlb_sync_global(smmu_domain->smmu);
-}
-
static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
@@ -297,7 +293,7 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
wmb();
arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
- arm_smmu_tlb_sync_context(cookie);
+ arm_smmu_tlb_sync_context(smmu_domain);
}
static void arm_smmu_tlb_inv_context_s2(void *cookie)
@@ -312,18 +308,16 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
}
static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+ size_t granule, void *cookie, int reg)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- int reg, idx = cfg->cbndx;
+ int idx = cfg->cbndx;
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
- reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
-
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
iova = (iova >> 12) << 12;
iova |= cfg->asid;
@@ -342,16 +336,15 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
}
static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+ size_t granule, void *cookie, int reg)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
- int reg, idx = smmu_domain->cfg.cbndx;
+ int idx = smmu_domain->cfg.cbndx;
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
- reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12;
do {
if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
@@ -362,85 +355,98 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
} while (size -= granule);
}
-/*
- * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
- * almost negligible, but the benefit of getting the first one in as far ahead
- * of the sync as possible is significant, hence we don't just make this a
- * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
- */
-static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf, void *cookie)
+static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
-
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- wmb();
+ arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVA);
+ arm_smmu_tlb_sync_context(cookie);
+}
- arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
+static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVAL);
+ arm_smmu_tlb_sync_context(cookie);
}
-static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie,
+ ARM_SMMU_CB_S1_TLBIVAL);
+}
- ops->tlb_inv_range(iova, size, granule, false, cookie);
- ops->tlb_sync(cookie);
+static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2);
+ arm_smmu_tlb_sync_context(cookie);
}
-static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
+static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2L);
+ arm_smmu_tlb_sync_context(cookie);
+}
- ops->tlb_inv_range(iova, size, granule, true, cookie);
- ops->tlb_sync(cookie);
+static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie,
+ ARM_SMMU_CB_S2_TLBIIPAS2L);
}
-static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule,
- void *cookie)
+static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ arm_smmu_tlb_inv_context_s2(cookie);
+}
+/*
+ * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
+ * almost negligible, but the benefit of getting the first one in as far ahead
+ * of the sync as possible is significant, hence we don't just make this a
+ * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
+ * think.
+ */
+static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
- ops->tlb_inv_range(iova, granule, granule, true, cookie);
+ arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
}
-static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_range_s1,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
+ .tlb_add_page = arm_smmu_tlb_add_page_s1,
};
-static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_range_s2,
- .tlb_sync = arm_smmu_tlb_sync_context,
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
+ .tlb_add_page = arm_smmu_tlb_add_page_s2,
};
-static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
- .tlb = {
- .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
- .tlb_flush_walk = arm_smmu_tlb_inv_walk,
- .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
- .tlb_add_page = arm_smmu_tlb_add_page,
- },
- .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
- .tlb_sync = arm_smmu_tlb_sync_vmid,
+static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
+ .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
+ .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
+ .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
+ .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -472,6 +478,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
{
u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
struct arm_smmu_device *smmu = dev;
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
gfsr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSR);
gfsynr0 = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sGFSYNR0);
@@ -481,11 +489,19 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
if (!gfsr)
return IRQ_NONE;
- dev_err_ratelimited(smmu->dev,
- "Unexpected global fault, this could be serious\n");
- dev_err_ratelimited(smmu->dev,
- "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
- gfsr, gfsynr0, gfsynr1, gfsynr2);
+ if (__ratelimit(&rs)) {
+ if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
+ (gfsr & sGFSR_USF))
+ dev_err(smmu->dev,
+ "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
+ (u16)gfsynr1);
+ else
+ dev_err(smmu->dev,
+ "Unexpected global fault, this could be serious\n");
+ dev_err(smmu->dev,
+ "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+ gfsr, gfsynr0, gfsynr1, gfsynr2);
+ }
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sGFSR, gfsr);
return IRQ_HANDLED;
@@ -536,8 +552,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
} else {
- cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
- cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+ cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
+ cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
}
}
}
@@ -770,7 +786,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.ias = ias,
.oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
- .tlb = &smmu_domain->flush_ops->tlb,
+ .tlb = smmu_domain->flush_ops,
.iommu_dev = smmu->dev,
};
@@ -1039,8 +1055,6 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
- if (!group)
- group = ERR_PTR(-ENOMEM);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_err;
@@ -1154,13 +1168,27 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Looks ok, so add the device to the domain */
ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
+ /*
+ * Setup an autosuspend delay to avoid bouncing runpm state.
+ * Otherwise, if a driver for a suspended consumer device
+ * unmaps buffers, it will runpm resume/suspend for each one.
+ *
+ * For example, when used by a GPU device, when an application
+ * or game exits, it can trigger unmapping 100s or 1000s of
+ * buffers. With a runpm cycle for each buffer, that adds up
+ * to 5-10sec worth of reprogramming the context bank, while
+ * the system appears to be locked up to the user.
+ */
+ pm_runtime_set_autosuspend_delay(smmu->dev, 20);
+ pm_runtime_use_autosuspend(smmu->dev);
+
rpm_put:
arm_smmu_rpm_put(smmu);
return ret;
}
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
@@ -1200,7 +1228,7 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
- smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
+ smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}
@@ -1211,11 +1239,16 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
- if (smmu_domain->flush_ops) {
- arm_smmu_rpm_get(smmu);
- smmu_domain->flush_ops->tlb_sync(smmu_domain);
- arm_smmu_rpm_put(smmu);
- }
+ if (!smmu)
+ return;
+
+ arm_smmu_rpm_get(smmu);
+ if (smmu->version == ARM_SMMU_V2 ||
+ smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
+ arm_smmu_tlb_sync_context(smmu_domain);
+ else
+ arm_smmu_tlb_sync_global(smmu);
+ arm_smmu_rpm_put(smmu);
}
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -2062,10 +2095,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
for (i = 0; i < num_irqs; ++i) {
int irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- dev_err(dev, "failed to get irq index %d\n", i);
+ if (irq < 0)
return -ENODEV;
- }
smmu->irqs[i] = irq;
}
diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h
index b19b6cae9b5e..62b9f0cec49b 100644
--- a/drivers/iommu/arm-smmu.h
+++ b/drivers/iommu/arm-smmu.h
@@ -79,6 +79,8 @@
#define ID7_MINOR GENMASK(3, 0)
#define ARM_SMMU_GR0_sGFSR 0x48
+#define sGFSR_USF BIT(1)
+
#define ARM_SMMU_GR0_sGFSYNR0 0x50
#define ARM_SMMU_GR0_sGFSYNR1 0x54
#define ARM_SMMU_GR0_sGFSYNR2 0x58
@@ -304,17 +306,10 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_BYPASS,
};
-struct arm_smmu_flush_ops {
- struct iommu_flush_ops tlb;
- void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
- bool leaf, void *cookie);
- void (*tlb_sync)(void *cookie);
-};
-
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
- const struct arm_smmu_flush_ops *flush_ops;
+ const struct iommu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
@@ -335,6 +330,8 @@ struct arm_smmu_impl {
int (*cfg_probe)(struct arm_smmu_device *smmu);
int (*reset)(struct arm_smmu_device *smmu);
int (*init_context)(struct arm_smmu_domain *smmu_domain);
+ void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
+ int status);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)
@@ -398,5 +395,8 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v))
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
+struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
+
+int arm_mmu500_reset(struct arm_smmu_device *smmu);
#endif /* _ARM_SMMU_H */
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 646332fbf3d7..0cc702a70a96 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
struct iommu_dma_msi_page {
struct list_head list;
@@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
return iova_reserve_iommu_regions(dev, domain);
}
+static int iommu_dma_deferred_attach(struct device *dev,
+ struct iommu_domain *domain)
+{
+ const struct iommu_ops *ops = domain->ops;
+
+ if (!is_kdump_kernel())
+ return 0;
+
+ if (unlikely(ops->is_attach_deferred &&
+ ops->is_attach_deferred(domain, dev)))
+ return iommu_attach_device(domain, dev);
+
+ return 0;
+}
+
/**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags.
@@ -461,7 +477,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot)
+ size_t size, int prot, dma_addr_t dma_mask)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -469,13 +485,16 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t iova_off = iova_offset(iovad, phys);
dma_addr_t iova;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return DMA_MAPPING_ERROR;
+
size = iova_align(iovad, size + iova_off);
- iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;
- if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
+ if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
return DMA_MAPPING_ERROR;
}
@@ -578,6 +597,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
*dma_handle = DMA_MAPPING_ERROR;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return NULL;
+
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
min_size = PAGE_SIZE;
@@ -610,7 +632,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
+ if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size)
goto out_free_sg;
@@ -710,7 +732,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle;
- dma_handle =__iommu_dma_map(dev, phys, size, prot);
+ dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
arch_sync_dma_for_device(phys, size, dir);
@@ -820,6 +842,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long mask = dma_get_seg_boundary(dev);
int i;
+ if (unlikely(iommu_dma_deferred_attach(dev, domain)))
+ return 0;
+
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
@@ -870,7 +895,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
+ if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova);
@@ -910,7 +935,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+ dma_get_mask(dev));
}
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -1016,7 +1042,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (!cpu_addr)
return NULL;
- *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
+ *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
+ dev->coherent_dma_mask);
if (*handle == DMA_MAPPING_ERROR) {
__iommu_dma_free(dev, size, cpu_addr);
return NULL;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index eecd6a421667..3acfa6a25fa2 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -895,8 +895,11 @@ int __init detect_intel_iommu(void)
}
#ifdef CONFIG_X86
- if (!ret)
+ if (!ret) {
x86_init.iommu.iommu_init = intel_iommu_init;
+ x86_platform.iommu_shutdown = intel_iommu_shutdown;
+ }
+
#endif
if (dmar_tbl) {
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 9c94e16fb127..186ff5cc975c 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1073,7 +1073,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*/
static int exynos_iommu_map(struct iommu_domain *iommu_domain,
unsigned long l_iova, phys_addr_t paddr, size_t size,
- int prot)
+ int prot, gfp_t gfp)
{
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_pte_t *entry;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6db6d969e31c..0c8d81f56a30 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2420,14 +2420,24 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-/*
- * find_domain
- * Note: we use struct device->archdata.iommu stores the info
- */
static struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
+ if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO ||
+ dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
+ return NULL;
+
+ /* No lock here, assumes no domain exit in normal case */
+ info = dev->archdata.iommu;
+ if (likely(info))
+ return info->domain;
+
+ return NULL;
+}
+
+static struct dmar_domain *deferred_attach_domain(struct device *dev)
+{
if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
struct iommu_domain *domain;
@@ -2437,12 +2447,7 @@ static struct dmar_domain *find_domain(struct device *dev)
intel_iommu_attach_device(domain, dev);
}
- /* No lock here, assumes no domain exit in normal case */
- info = dev->archdata.iommu;
-
- if (likely(info))
- return info->domain;
- return NULL;
+ return find_domain(dev);
}
static inline struct device_domain_info *
@@ -3512,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3732,7 +3737,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (!iommu_need_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (!domain)
return 0;
@@ -3827,7 +3832,7 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
int prot = 0;
int ret;
- domain = find_domain(dev);
+ domain = deferred_attach_domain(dev);
if (WARN_ON(dir == DMA_NONE || !domain))
return DMA_MAPPING_ERROR;
@@ -4314,13 +4319,19 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
+ int ret;
+
+ rmrr = (struct acpi_dmar_reserved_memory *)header;
+ ret = arch_rmrr_sanity_check(rmrr);
+ if (ret)
+ return ret;
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru)
goto out;
rmrru->hdr = header;
- rmrr = (struct acpi_dmar_reserved_memory *)header;
+
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
@@ -4759,6 +4770,26 @@ static void intel_disable_iommus(void)
iommu_disable_translation(iommu);
}
+void intel_iommu_shutdown(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu = NULL;
+
+ if (no_iommu || dmar_disabled)
+ return;
+
+ down_write(&dmar_global_lock);
+
+ /* Disable PMRs explicitly here. */
+ for_each_iommu(iommu, drhd)
+ iommu_disable_protect_mem_regions(iommu);
+
+ /* Make sure the IOMMUs are switched off */
+ intel_disable_iommus();
+
+ up_write(&dmar_global_lock);
+}
+
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
@@ -5440,7 +5471,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot)
+ size_t size, int iommu_prot, gfp_t gfp)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 4cb394937700..7c3bd2c3cdca 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -846,27 +846,28 @@ struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
-static struct io_pgtable_cfg *cfg_cookie;
+static struct io_pgtable_cfg *cfg_cookie __initdata;
-static void dummy_tlb_flush_all(void *cookie)
+static void __init dummy_tlb_flush_all(void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
- void *cookie)
+static void __init dummy_tlb_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule, void *cookie)
+static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
dummy_tlb_flush(iova, granule, granule, cookie);
}
-static const struct iommu_flush_ops dummy_tlb_ops = {
+static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_flush_walk = dummy_tlb_flush,
.tlb_flush_leaf = dummy_tlb_flush,
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index ca51036aa53c..bdf47f745268 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -32,39 +32,31 @@
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
/*
- * For consistency with the architecture, we always consider
- * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
- */
-#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
-
-/*
* Calculate the right shift amount to get to the portion describing level l
* in a virtual address mapped by the pagetable in d.
*/
#define ARM_LPAE_LVL_SHIFT(l,d) \
- ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
- * (d)->bits_per_level) + (d)->pg_shift)
+ (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
+ ilog2(sizeof(arm_lpae_iopte)))
-#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
-
-#define ARM_LPAE_PAGES_PER_PGD(d) \
- DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
+#define ARM_LPAE_GRANULE(d) \
+ (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
+#define ARM_LPAE_PGD_SIZE(d) \
+ (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
/*
* Calculate the index at level l used to map virtual address a using the
* pagetable in d.
*/
#define ARM_LPAE_PGD_IDX(l,d) \
- ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
+ ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
#define ARM_LPAE_LVL_IDX(a,l,d) \
(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
/* Calculate the block/page mapping size at level l for pagetable in d. */
-#define ARM_LPAE_BLOCK_SIZE(l,d) \
- (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
- ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
+#define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
/* Page table bits */
#define ARM_LPAE_PTE_TYPE_SHIFT 0
@@ -180,10 +172,9 @@
struct arm_lpae_io_pgtable {
struct io_pgtable iop;
- int levels;
- size_t pgd_size;
- unsigned long pg_shift;
- unsigned long bits_per_level;
+ int pgd_bits;
+ int start_level;
+ int bits_per_level;
void *pgd;
};
@@ -213,7 +204,7 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
{
u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
- if (data->pg_shift < 16)
+ if (ARM_LPAE_GRANULE(data) < SZ_64K)
return paddr;
/* Rotate the packed high-order bits back to the top */
@@ -392,7 +383,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
/* If we can install a leaf entry at this level, then do so */
- if (size == block_size && (size & cfg->pgsize_bitmap))
+ if (size == block_size)
return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
/* We can't allocate tables at the final level */
@@ -464,7 +455,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
- else if (prot & IOMMU_QCOM_SYS_CACHE)
+ else if (prot & IOMMU_SYS_CACHE_ONLY)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
}
@@ -479,16 +470,19 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int iommu_prot)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
- int ret, lvl = ARM_LPAE_START_LVL(data);
+ int ret, lvl = data->start_level;
arm_lpae_iopte prot;
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
- if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
- paddr >= (1ULL << data->iop.cfg.oas)))
+ if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ return -EINVAL;
+
+ if (WARN_ON(iova >> data->iop.cfg.ias || paddr >> data->iop.cfg.oas))
return -ERANGE;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
@@ -508,8 +502,8 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *start, *end;
unsigned long table_size;
- if (lvl == ARM_LPAE_START_LVL(data))
- table_size = data->pgd_size;
+ if (lvl == data->start_level)
+ table_size = ARM_LPAE_PGD_SIZE(data);
else
table_size = ARM_LPAE_GRANULE(data);
@@ -537,7 +531,7 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
{
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
- __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
kfree(data);
}
@@ -652,13 +646,16 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_lpae_iopte *ptep = data->pgd;
- int lvl = ARM_LPAE_START_LVL(data);
- if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
+ if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
+ return 0;
+
+ if (WARN_ON(iova >> data->iop.cfg.ias))
return 0;
- return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
+ return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
}
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
@@ -666,7 +663,7 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte pte, *ptep = data->pgd;
- int lvl = ARM_LPAE_START_LVL(data);
+ int lvl = data->start_level;
do {
/* Valid IOPTE pointer? */
@@ -743,8 +740,8 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
{
- unsigned long va_bits, pgd_bits;
struct arm_lpae_io_pgtable *data;
+ int levels, va_bits, pg_shift;
arm_lpae_restrict_pgsizes(cfg);
@@ -766,15 +763,15 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
if (!data)
return NULL;
- data->pg_shift = __ffs(cfg->pgsize_bitmap);
- data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
+ pg_shift = __ffs(cfg->pgsize_bitmap);
+ data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
- va_bits = cfg->ias - data->pg_shift;
- data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+ va_bits = cfg->ias - pg_shift;
+ levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
+ data->start_level = ARM_LPAE_MAX_LEVELS - levels;
/* Calculate the actual size of our pgd (without concatenation) */
- pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
- data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
+ data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
data->iop.ops = (struct io_pgtable_ops) {
.map = arm_lpae_map,
@@ -864,11 +861,11 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_MAIR_ATTR_INC_OWBRWA
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
- cfg->arm_lpae_s1_cfg.mair[0] = reg;
- cfg->arm_lpae_s1_cfg.mair[1] = 0;
+ cfg->arm_lpae_s1_cfg.mair = reg;
/* Looking good; allocate a pgd */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
+ GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
@@ -903,13 +900,13 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
* Concatenate PGDs at level 1 if possible in order to reduce
* the depth of the stage-2 walk.
*/
- if (data->levels == ARM_LPAE_MAX_LEVELS) {
+ if (data->start_level == 0) {
unsigned long pgd_pages;
- pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
+ pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
- data->pgd_size = pgd_pages << data->pg_shift;
- data->levels--;
+ data->pgd_bits += data->bits_per_level;
+ data->start_level++;
}
}
@@ -919,7 +916,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
- sl = ARM_LPAE_START_LVL(data);
+ sl = data->start_level;
switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K:
@@ -965,7 +962,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
+ GFP_KERNEL, cfg);
if (!data->pgd)
goto out_free_data;
@@ -1034,9 +1032,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
return NULL;
/* Mali seems to need a full 4-level table regardless of IAS */
- if (data->levels < ARM_LPAE_MAX_LEVELS) {
- data->levels = ARM_LPAE_MAX_LEVELS;
- data->pgd_size = sizeof(arm_lpae_iopte);
+ if (data->start_level > 0) {
+ data->start_level = 0;
+ data->pgd_bits = 0;
}
/*
* MEMATTR: Mali has no actual notion of a non-cacheable type, so the
@@ -1053,7 +1051,8 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_MALI_LPAE_MEMATTR_IMP_DEF
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+ data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
+ cfg);
if (!data->pgd)
goto out_free_data;
@@ -1097,22 +1096,23 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
-static struct io_pgtable_cfg *cfg_cookie;
+static struct io_pgtable_cfg *cfg_cookie __initdata;
-static void dummy_tlb_flush_all(void *cookie)
+static void __init dummy_tlb_flush_all(void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
-static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
- void *cookie)
+static void __init dummy_tlb_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
-static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule, void *cookie)
+static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
{
dummy_tlb_flush(iova, granule, granule, cookie);
}
@@ -1131,9 +1131,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
cfg->pgsize_bitmap, cfg->ias);
- pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
- data->levels, data->pgd_size, data->pg_shift,
- data->bits_per_level, data->pgd);
+ pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
+ ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
+ ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
}
#define __FAIL(ops, i) ({ \
@@ -1145,7 +1145,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
{
- static const enum io_pgtable_fmt fmts[] = {
+ static const enum io_pgtable_fmt fmts[] __initconst = {
ARM_64_LPAE_S1,
ARM_64_LPAE_S2,
};
@@ -1244,13 +1244,13 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
static int __init arm_lpae_do_selftests(void)
{
- static const unsigned long pgsize[] = {
+ static const unsigned long pgsize[] __initconst = {
SZ_4K | SZ_2M | SZ_1G,
SZ_16K | SZ_32M,
SZ_64K | SZ_512M,
};
- static const unsigned int ias[] = {
+ static const unsigned int ias[] __initconst = {
32, 36, 40, 42, 44, 48,
};
diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
new file mode 100644
index 000000000000..0f8dd377aada
--- /dev/null
+++ b/drivers/iommu/ioasid.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I/O Address Space ID allocator. There is one global IOASID space, split into
+ * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
+ * free IOASIDs with ioasid_alloc and ioasid_free.
+ */
+#include <linux/ioasid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+
+struct ioasid_data {
+ ioasid_t id;
+ struct ioasid_set *set;
+ void *private;
+ struct rcu_head rcu;
+};
+
+/*
+ * struct ioasid_allocator_data - Internal data structure to hold information
+ * about an allocator. There are two types of allocators:
+ *
+ * - Default allocator always has its own XArray to track the IOASIDs allocated.
+ * - Custom allocators may share allocation helpers with different private data.
+ * Custom allocators that share the same helper functions also share the same
+ * XArray.
+ * Rules:
+ * 1. Default allocator is always available, not dynamically registered. This is
+ * to prevent race conditions with early boot code that want to register
+ * custom allocators or allocate IOASIDs.
+ * 2. Custom allocators take precedence over the default allocator.
+ * 3. When all custom allocators sharing the same helper functions are
+ * unregistered (e.g. due to hotplug), all outstanding IOASIDs must be
+ * freed. Otherwise, outstanding IOASIDs will be lost and orphaned.
+ * 4. When switching between custom allocators sharing the same helper
+ * functions, outstanding IOASIDs are preserved.
+ * 5. When switching between custom allocator and default allocator, all IOASIDs
+ * must be freed to ensure unadulterated space for the new allocator.
+ *
+ * @ops: allocator helper functions and its data
+ * @list: registered custom allocators
+ * @slist: allocators share the same ops but different data
+ * @flags: attributes of the allocator
+ * @xa: xarray holds the IOASID space
+ * @rcu: used for kfree_rcu when unregistering allocator
+ */
+struct ioasid_allocator_data {
+ struct ioasid_allocator_ops *ops;
+ struct list_head list;
+ struct list_head slist;
+#define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */
+ unsigned long flags;
+ struct xarray xa;
+ struct rcu_head rcu;
+};
+
+static DEFINE_SPINLOCK(ioasid_allocator_lock);
+static LIST_HEAD(allocators_list);
+
+static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
+static void default_free(ioasid_t ioasid, void *opaque);
+
+static struct ioasid_allocator_ops default_ops = {
+ .alloc = default_alloc,
+ .free = default_free,
+};
+
+static struct ioasid_allocator_data default_allocator = {
+ .ops = &default_ops,
+ .flags = 0,
+ .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
+};
+
+static struct ioasid_allocator_data *active_allocator = &default_allocator;
+
+static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
+{
+ ioasid_t id;
+
+ if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
+ pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
+ return INVALID_IOASID;
+ }
+
+ return id;
+}
+
+static void default_free(ioasid_t ioasid, void *opaque)
+{
+ struct ioasid_data *ioasid_data;
+
+ ioasid_data = xa_erase(&default_allocator.xa, ioasid);
+ kfree_rcu(ioasid_data, rcu);
+}
+
+/* Allocate and initialize a new custom allocator with its helper functions */
+static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *ia_data;
+
+ ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
+ if (!ia_data)
+ return NULL;
+
+ xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
+ INIT_LIST_HEAD(&ia_data->slist);
+ ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
+ ia_data->ops = ops;
+
+ /* For tracking custom allocators that share the same ops */
+ list_add_tail(&ops->list, &ia_data->slist);
+
+ return ia_data;
+}
+
+static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
+{
+ return (a->free == b->free) && (a->alloc == b->alloc);
+}
+
+/**
+ * ioasid_register_allocator - register a custom allocator
+ * @ops: the custom allocator ops to be registered
+ *
+ * Custom allocators take precedence over the default xarray based allocator.
+ * Private data associated with the IOASID allocated by the custom allocators
+ * are managed by IOASID framework similar to data stored in xa by default
+ * allocator.
+ *
+ * There can be multiple allocators registered but only one is active. In case
+ * of runtime removal of a custom allocator, the next one is activated based
+ * on the registration ordering.
+ *
+ * Multiple allocators can share the same alloc() function, in this case the
+ * IOASID space is shared.
+ */
+int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *ia_data;
+ struct ioasid_allocator_data *pallocator;
+ int ret = 0;
+
+ spin_lock(&ioasid_allocator_lock);
+
+ ia_data = ioasid_alloc_allocator(ops);
+ if (!ia_data) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /*
+ * No particular preference, we activate the first one and keep
+ * the later registered allocators in a list in case the first one gets
+ * removed due to hotplug.
+ */
+ if (list_empty(&allocators_list)) {
+ WARN_ON(active_allocator != &default_allocator);
+ /* Use this new allocator if default is not active */
+ if (xa_empty(&active_allocator->xa)) {
+ rcu_assign_pointer(active_allocator, ia_data);
+ list_add_tail(&ia_data->list, &allocators_list);
+ goto out_unlock;
+ }
+ pr_warn("Default allocator active with outstanding IOASID\n");
+ ret = -EAGAIN;
+ goto out_free;
+ }
+
+ /* Check if the allocator is already registered */
+ list_for_each_entry(pallocator, &allocators_list, list) {
+ if (pallocator->ops == ops) {
+ pr_err("IOASID allocator already registered\n");
+ ret = -EEXIST;
+ goto out_free;
+ } else if (use_same_ops(pallocator->ops, ops)) {
+ /*
+ * If the new allocator shares the same ops,
+ * then they will share the same IOASID space.
+ * We should put them under the same xarray.
+ */
+ list_add_tail(&ops->list, &pallocator->slist);
+ goto out_free;
+ }
+ }
+ list_add_tail(&ia_data->list, &allocators_list);
+
+ spin_unlock(&ioasid_allocator_lock);
+ return 0;
+out_free:
+ kfree(ia_data);
+out_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioasid_register_allocator);
+
+/**
+ * ioasid_unregister_allocator - Remove a custom IOASID allocator ops
+ * @ops: the custom allocator to be removed
+ *
+ * Remove an allocator from the list, activate the next allocator in
+ * the order it was registered. Or revert to default allocator if all
+ * custom allocators are unregistered without outstanding IOASIDs.
+ */
+void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
+{
+ struct ioasid_allocator_data *pallocator;
+ struct ioasid_allocator_ops *sops;
+
+ spin_lock(&ioasid_allocator_lock);
+ if (list_empty(&allocators_list)) {
+ pr_warn("No custom IOASID allocators active!\n");
+ goto exit_unlock;
+ }
+
+ list_for_each_entry(pallocator, &allocators_list, list) {
+ if (!use_same_ops(pallocator->ops, ops))
+ continue;
+
+ if (list_is_singular(&pallocator->slist)) {
+ /* No shared helper functions */
+ list_del(&pallocator->list);
+ /*
+ * All IOASIDs should have been freed before
+ * the last allocator that shares the same ops
+ * is unregistered.
+ */
+ WARN_ON(!xa_empty(&pallocator->xa));
+ if (list_empty(&allocators_list)) {
+ pr_info("No custom IOASID allocators, switch to default.\n");
+ rcu_assign_pointer(active_allocator, &default_allocator);
+ } else if (pallocator == active_allocator) {
+ rcu_assign_pointer(active_allocator,
+ list_first_entry(&allocators_list,
+ struct ioasid_allocator_data, list));
+ pr_info("IOASID allocator changed");
+ }
+ kfree_rcu(pallocator, rcu);
+ break;
+ }
+ /*
+ * Find the matching shared ops to delete,
+ * but keep outstanding IOASIDs
+ */
+ list_for_each_entry(sops, &pallocator->slist, list) {
+ if (sops == ops) {
+ list_del(&ops->list);
+ break;
+ }
+ }
+ break;
+ }
+
+exit_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+}
+EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
+
+/**
+ * ioasid_set_data - Set private data for an allocated ioasid
+ * @ioasid: the ID to set data
+ * @data: the private data
+ *
+ * For IOASID that is already allocated, private data can be set
+ * via this API. Future lookup can be done via ioasid_find.
+ */
+int ioasid_set_data(ioasid_t ioasid, void *data)
+{
+ struct ioasid_data *ioasid_data;
+ int ret = 0;
+
+ spin_lock(&ioasid_allocator_lock);
+ ioasid_data = xa_load(&active_allocator->xa, ioasid);
+ if (ioasid_data)
+ rcu_assign_pointer(ioasid_data->private, data);
+ else
+ ret = -ENOENT;
+ spin_unlock(&ioasid_allocator_lock);
+
+ /*
+ * Wait for readers to stop accessing the old private data, so the
+ * caller can free it.
+ */
+ if (!ret)
+ synchronize_rcu();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioasid_set_data);
+
+/**
+ * ioasid_alloc - Allocate an IOASID
+ * @set: the IOASID set
+ * @min: the minimum ID (inclusive)
+ * @max: the maximum ID (inclusive)
+ * @private: data private to the caller
+ *
+ * Allocate an ID between @min and @max. The @private pointer is stored
+ * internally and can be retrieved with ioasid_find().
+ *
+ * Return: the allocated ID on success, or %INVALID_IOASID on failure.
+ */
+ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
+ void *private)
+{
+ struct ioasid_data *data;
+ void *adata;
+ ioasid_t id;
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC);
+ if (!data)
+ return INVALID_IOASID;
+
+ data->set = set;
+ data->private = private;
+
+ /*
+ * Custom allocator needs allocator data to perform platform specific
+ * operations.
+ */
+ spin_lock(&ioasid_allocator_lock);
+ adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
+ id = active_allocator->ops->alloc(min, max, adata);
+ if (id == INVALID_IOASID) {
+ pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
+ goto exit_free;
+ }
+
+ if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
+ xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
+ /* Custom allocator needs framework to store and track allocation results */
+ pr_err("Failed to alloc ioasid from %d\n", id);
+ active_allocator->ops->free(id, active_allocator->ops->pdata);
+ goto exit_free;
+ }
+ data->id = id;
+
+ spin_unlock(&ioasid_allocator_lock);
+ return id;
+exit_free:
+ spin_unlock(&ioasid_allocator_lock);
+ kfree(data);
+ return INVALID_IOASID;
+}
+EXPORT_SYMBOL_GPL(ioasid_alloc);
+
+/**
+ * ioasid_free - Free an IOASID
+ * @ioasid: the ID to remove
+ */
+void ioasid_free(ioasid_t ioasid)
+{
+ struct ioasid_data *ioasid_data;
+
+ spin_lock(&ioasid_allocator_lock);
+ ioasid_data = xa_load(&active_allocator->xa, ioasid);
+ if (!ioasid_data) {
+ pr_err("Trying to free unknown IOASID %u\n", ioasid);
+ goto exit_unlock;
+ }
+
+ active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
+ /* Custom allocator needs additional steps to free the xa element */
+ if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
+ ioasid_data = xa_erase(&active_allocator->xa, ioasid);
+ kfree_rcu(ioasid_data, rcu);
+ }
+
+exit_unlock:
+ spin_unlock(&ioasid_allocator_lock);
+}
+EXPORT_SYMBOL_GPL(ioasid_free);
+
+/**
+ * ioasid_find - Find IOASID data
+ * @set: the IOASID set
+ * @ioasid: the IOASID to find
+ * @getter: function to call on the found object
+ *
+ * The optional getter function allows to take a reference to the found object
+ * under the rcu lock. The function can also check if the object is still valid:
+ * if @getter returns false, then the object is invalid and NULL is returned.
+ *
+ * If the IOASID exists, return the private pointer passed to ioasid_alloc.
+ * Private data can be NULL if not set. Return an error if the IOASID is not
+ * found, or if @set is not NULL and the IOASID does not belong to the set.
+ */
+void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
+ bool (*getter)(void *))
+{
+ void *priv;
+ struct ioasid_data *ioasid_data;
+ struct ioasid_allocator_data *idata;
+
+ rcu_read_lock();
+ idata = rcu_dereference(active_allocator);
+ ioasid_data = xa_load(&idata->xa, ioasid);
+ if (!ioasid_data) {
+ priv = ERR_PTR(-ENOENT);
+ goto unlock;
+ }
+ if (set && ioasid_data->set != set) {
+ /* data found but does not belong to the set */
+ priv = ERR_PTR(-EACCES);
+ goto unlock;
+ }
+ /* Now IOASID and its set is verified, we can return the private data */
+ priv = rcu_dereference(ioasid_data->private);
+ if (getter && !getter(priv))
+ priv = NULL;
+unlock:
+ rcu_read_unlock();
+
+ return priv;
+}
+EXPORT_SYMBOL_GPL(ioasid_find);
+
+MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
+MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d658c7c6a2ab..db7bfd4f2d20 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1665,6 +1665,36 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
+int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ if (unlikely(!domain->ops->cache_invalidate))
+ return -ENODEV;
+
+ return domain->ops->cache_invalidate(domain, dev, inv_info);
+}
+EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
+
+int iommu_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data)
+{
+ if (unlikely(!domain->ops->sva_bind_gpasid))
+ return -ENODEV;
+
+ return domain->ops->sva_bind_gpasid(domain, dev, data);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
+
+int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid)
+{
+ if (unlikely(!domain->ops->sva_unbind_gpasid))
+ return -ENODEV;
+
+ return domain->ops->sva_unbind_gpasid(dev, pasid);
+}
+EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
+
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -1854,8 +1884,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize;
}
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+int __iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova;
@@ -1892,8 +1922,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
+ ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
- ret = ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
break;
@@ -1913,8 +1943,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
+
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ might_sleep();
+ return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(iommu_map);
+int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iommu_map_atomic);
+
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
@@ -1991,8 +2035,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
+size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
{
size_t len = 0, mapped = 0;
phys_addr_t start;
@@ -2003,7 +2048,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
phys_addr_t s_phys = sg_phys(sg);
if (len && s_phys != start + len) {
- ret = iommu_map(domain, iova + mapped, start, len, prot);
+ ret = __iommu_map(domain, iova + mapped, start,
+ len, prot, gfp);
+
if (ret)
goto out_err;
@@ -2031,8 +2078,22 @@ out_err:
return 0;
}
+
+size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ might_sleep();
+ return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
+}
EXPORT_SYMBOL_GPL(iommu_map_sg);
+size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
+}
+EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
+
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
{
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 2639fc718117..d02edd2751f3 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -50,6 +50,9 @@ struct ipmmu_features {
bool twobit_imttbcr_sl0;
bool reserved_context;
bool cache_snoop;
+ unsigned int ctx_offset_base;
+ unsigned int ctx_offset_stride;
+ unsigned int utlb_offset_base;
};
struct ipmmu_vmsa_device {
@@ -99,125 +102,49 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IM_NS_ALIAS_OFFSET 0x800
-#define IM_CTX_SIZE 0x40
-
-#define IMCTR 0x0000
-#define IMCTR_TRE (1 << 17)
-#define IMCTR_AFE (1 << 16)
-#define IMCTR_RTSEL_MASK (3 << 4)
-#define IMCTR_RTSEL_SHIFT 4
-#define IMCTR_TREN (1 << 3)
-#define IMCTR_INTEN (1 << 2)
-#define IMCTR_FLUSH (1 << 1)
-#define IMCTR_MMUEN (1 << 0)
-
-#define IMCAAR 0x0004
-
-#define IMTTBCR 0x0008
-#define IMTTBCR_EAE (1 << 31)
-#define IMTTBCR_PMB (1 << 30)
-#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_SH1_MASK (3 << 28) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_NC (0 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WB_WA (1 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WT (2 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_WB (3 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN1_MASK (3 << 26) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_NC (0 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WB_WA (1 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WT (2 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_WB (3 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN1_MASK (3 << 24) /* R-Car Gen2 only */
-#define IMTTBCR_TSZ1_MASK (7 << 16)
-#define IMTTBCR_TSZ1_SHIFT 16
-#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) /* R-Car Gen2 only */
+/* MMU "context" registers */
+#define IMCTR 0x0000 /* R-Car Gen2/3 */
+#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
+#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
+#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
+
+#define IMTTBCR 0x0008 /* R-Car Gen2/3 */
+#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_SH0_MASK (3 << 12) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_NC (0 << 10) /* R-Car Gen2 only */
#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_WT (2 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_WB (3 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_ORGN0_MASK (3 << 10) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_NC (0 << 8) /* R-Car Gen2 only */
#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_WT (2 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_WB (3 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_IRGN0_MASK (3 << 8) /* R-Car Gen2 only */
-#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) /* R-Car Gen3 only */
-#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) /* R-Car Gen3 only */
#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
-#define IMTTBCR_SL0_LVL_2 (0 << 4)
-#define IMTTBCR_SL0_LVL_1 (1 << 4)
-#define IMTTBCR_TSZ0_MASK (7 << 0)
-#define IMTTBCR_TSZ0_SHIFT O
-
-#define IMBUSCR 0x000c
-#define IMBUSCR_DVM (1 << 2)
-#define IMBUSCR_BUSSEL_SYS (0 << 0)
-#define IMBUSCR_BUSSEL_CCI (1 << 0)
-#define IMBUSCR_BUSSEL_IMCAAR (2 << 0)
-#define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0)
-#define IMBUSCR_BUSSEL_MASK (3 << 0)
-
-#define IMTTLBR0 0x0010
-#define IMTTUBR0 0x0014
-#define IMTTLBR1 0x0018
-#define IMTTUBR1 0x001c
-
-#define IMSTR 0x0020
-#define IMSTR_ERRLVL_MASK (3 << 12)
-#define IMSTR_ERRLVL_SHIFT 12
-#define IMSTR_ERRCODE_TLB_FORMAT (1 << 8)
-#define IMSTR_ERRCODE_ACCESS_PERM (4 << 8)
-#define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8)
-#define IMSTR_ERRCODE_MASK (7 << 8)
-#define IMSTR_MHIT (1 << 4)
-#define IMSTR_ABORT (1 << 2)
-#define IMSTR_PF (1 << 1)
-#define IMSTR_TF (1 << 0)
-
-#define IMMAIR0 0x0028
-#define IMMAIR1 0x002c
-#define IMMAIR_ATTR_MASK 0xff
-#define IMMAIR_ATTR_DEVICE 0x04
-#define IMMAIR_ATTR_NC 0x44
-#define IMMAIR_ATTR_WBRWA 0xff
-#define IMMAIR_ATTR_SHIFT(n) ((n) << 3)
-#define IMMAIR_ATTR_IDX_NC 0
-#define IMMAIR_ATTR_IDX_WBRWA 1
-#define IMMAIR_ATTR_IDX_DEV 2
-
-#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */
-#define IMEUAR 0x0034 /* R-Car Gen3 only */
-
-#define IMPCTR 0x0200
-#define IMPSTR 0x0208
-#define IMPEAR 0x020c
-#define IMPMBA(n) (0x0280 + ((n) * 4))
-#define IMPMBD(n) (0x02c0 + ((n) * 4))
+#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
+
+#define IMBUSCR 0x000c /* R-Car Gen2 only */
+#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
+#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
+
+#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
+#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
+
+#define IMSTR 0x0020 /* R-Car Gen2/3 */
+#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
+#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
+#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
+#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
+#define IMMAIR0 0x0028 /* R-Car Gen2/3 */
+
+#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
+#define IMEUAR 0x0034 /* R-Car Gen3 only */
+
+/* uTLB registers */
#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
-#define IMUCTR0(n) (0x0300 + ((n) * 16))
-#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16))
-#define IMUCTR_FIXADDEN (1 << 31)
-#define IMUCTR_FIXADD_MASK (0xff << 16)
-#define IMUCTR_FIXADD_SHIFT 16
-#define IMUCTR_TTSEL_MMU(n) ((n) << 4)
-#define IMUCTR_TTSEL_PMB (8 << 4)
-#define IMUCTR_TTSEL_MASK (15 << 4)
-#define IMUCTR_FLUSH (1 << 1)
-#define IMUCTR_MMUEN (1 << 0)
+#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
+#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
+#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
+#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
+#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
-#define IMUASID0(n) (0x0308 + ((n) * 16))
-#define IMUASID32(n) (0x0608 + (((n) - 32) * 16))
-#define IMUASID_ASID8_MASK (0xff << 8)
-#define IMUASID_ASID8_SHIFT 8
-#define IMUASID_ASID0_MASK (0xff << 0)
-#define IMUASID_ASID0_SHIFT 0
+#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
+#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
/* -----------------------------------------------------------------------------
* Root device handling
@@ -264,29 +191,61 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
iowrite32(data, mmu->base + offset);
}
+static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg)
+{
+ return mmu->features->ctx_offset_base +
+ context_id * mmu->features->ctx_offset_stride + reg;
+}
+
+static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg)
+{
+ return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
+}
+
+static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int context_id, unsigned int reg, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
+}
+
static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
unsigned int reg)
{
- return ipmmu_read(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg);
+ return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
}
static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
unsigned int reg, u32 data)
{
- ipmmu_write(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
}
static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
unsigned int reg, u32 data)
{
if (domain->mmu != domain->mmu->root)
- ipmmu_write(domain->mmu,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
- ipmmu_write(domain->mmu->root,
- domain->context_id * IM_CTX_SIZE + reg, data);
+ ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
+}
+
+static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
+{
+ return mmu->features->utlb_offset_base + reg;
+}
+
+static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int utlb, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
+}
+
+static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
+ unsigned int utlb, u32 data)
+{
+ ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
}
/* -----------------------------------------------------------------------------
@@ -334,11 +293,10 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
*/
/* TODO: What should we set the ASID to ? */
- ipmmu_write(mmu, IMUASID(utlb), 0);
+ ipmmu_imuasid_write(mmu, utlb, 0);
/* TODO: Do we need to flush the microTLB ? */
- ipmmu_write(mmu, IMUCTR(utlb),
- IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
- IMUCTR_MMUEN);
+ ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
+ IMUCTR_FLUSH | IMUCTR_MMUEN);
mmu->utlb_ctx[utlb] = domain->context_id;
}
@@ -350,7 +308,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
{
struct ipmmu_vmsa_device *mmu = domain->mmu;
- ipmmu_write(mmu, IMUCTR(utlb), 0);
+ ipmmu_imuctr_write(mmu, utlb, 0);
mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
}
@@ -438,7 +396,7 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
/* MAIR0 */
ipmmu_ctx_write_root(domain, IMMAIR0,
- domain->cfg.arm_lpae_s1_cfg.mair[0]);
+ domain->cfg.arm_lpae_s1_cfg.mair);
/* IMBUSCR */
if (domain->mmu->features->setup_imbuscr)
@@ -724,7 +682,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
}
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
@@ -783,6 +741,7 @@ static int ipmmu_init_platform_device(struct device *dev,
static const struct soc_device_attribute soc_rcar_gen3[] = {
{ .soc_id = "r8a774a1", },
+ { .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
{ .soc_id = "r8a7795", },
{ .soc_id = "r8a7796", },
@@ -794,6 +753,7 @@ static const struct soc_device_attribute soc_rcar_gen3[] = {
};
static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
+ { .soc_id = "r8a774b1", },
{ .soc_id = "r8a774c0", },
{ .soc_id = "r8a7795", .revision = "ES3.*" },
{ .soc_id = "r8a77965", },
@@ -985,7 +945,7 @@ static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
/* Disable all contexts. */
for (i = 0; i < mmu->num_ctx; ++i)
- ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0);
+ ipmmu_ctx_write(mmu, i, IMCTR, 0);
}
static const struct ipmmu_features ipmmu_features_default = {
@@ -997,6 +957,9 @@ static const struct ipmmu_features ipmmu_features_default = {
.twobit_imttbcr_sl0 = false,
.reserved_context = false,
.cache_snoop = true,
+ .ctx_offset_base = 0,
+ .ctx_offset_stride = 0x40,
+ .utlb_offset_base = 0,
};
static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
@@ -1008,6 +971,9 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.twobit_imttbcr_sl0 = true,
.reserved_context = true,
.cache_snoop = false,
+ .ctx_offset_base = 0,
+ .ctx_offset_stride = 0x40,
+ .utlb_offset_base = 0,
};
static const struct of_device_id ipmmu_of_ids[] = {
@@ -1018,6 +984,9 @@ static const struct of_device_id ipmmu_of_ids[] = {
.compatible = "renesas,ipmmu-r8a774a1",
.data = &ipmmu_features_rcar_gen3,
}, {
+ .compatible = "renesas,ipmmu-r8a774b1",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
.compatible = "renesas,ipmmu-r8a774c0",
.data = &ipmmu_features_rcar_gen3,
}, {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index be99d408cf35..93f14bca26ee 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -504,7 +504,7 @@ fail:
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t len, int prot)
+ phys_addr_t pa, size_t len, int prot, gfp_t gfp)
{
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 67a483c1a935..6fc1f5ecf91e 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -101,8 +101,6 @@
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
struct mtk_iommu_domain {
- spinlock_t pgtlock; /* lock for page table */
-
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
@@ -173,13 +171,16 @@ static void mtk_iommu_tlb_flush_all(void *cookie)
}
}
-static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
- size_t granule, bool leaf,
- void *cookie)
+static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
{
struct mtk_iommu_data *data = cookie;
+ unsigned long flags;
+ int ret;
+ u32 tmp;
for_each_m4u(data) {
+ spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + REG_MMU_INV_SEL);
@@ -188,23 +189,10 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
data->base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE,
data->base + REG_MMU_INVALIDATE);
- data->tlb_flush_active = true;
- }
-}
-
-static void mtk_iommu_tlb_sync(void *cookie)
-{
- struct mtk_iommu_data *data = cookie;
- int ret;
- u32 tmp;
-
- for_each_m4u(data) {
- /* Avoid timing out if there's nothing to wait for */
- if (!data->tlb_flush_active)
- return;
+ /* tlb sync */
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
- tmp, tmp != 0, 10, 100000);
+ tmp, tmp != 0, 10, 1000);
if (ret) {
dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n");
@@ -212,35 +200,24 @@ static void mtk_iommu_tlb_sync(void *cookie)
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
- data->tlb_flush_active = false;
+ spin_unlock_irqrestore(&data->tlb_lock, flags);
}
}
-static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
- mtk_iommu_tlb_sync(cookie);
-}
-
-static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
- mtk_iommu_tlb_sync(cookie);
-}
-
static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
{
- mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
+ struct mtk_iommu_data *data = cookie;
+ struct iommu_domain *domain = &data->m4u_dom->domain;
+
+ iommu_iotlb_gather_add_page(domain, gather, iova, granule);
}
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
.tlb_flush_all = mtk_iommu_tlb_flush_all,
- .tlb_flush_walk = mtk_iommu_tlb_flush_walk,
- .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
+ .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
+ .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
.tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
};
@@ -316,8 +293,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
{
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- spin_lock_init(&dom->pgtlock);
-
dom->cfg = (struct io_pgtable_cfg) {
.quirks = IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
@@ -412,22 +387,17 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- unsigned long flags;
- int ret;
/* The "4GB mode" M4U physically can not use the lower remap of Dram. */
if (data->enable_4GB)
paddr |= BIT_ULL(32);
- spin_lock_irqsave(&dom->pgtlock, flags);
- ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
-
- return ret;
+ /* Synchronize with the tlb_lock */
+ return dom->iop->map(dom->iop, iova, paddr, size, prot);
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
@@ -435,25 +405,26 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- unsigned long flags;
- size_t unmapsz;
-
- spin_lock_irqsave(&dom->pgtlock, flags);
- unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
- return unmapsz;
+ return dom->iop->unmap(dom->iop, iova, size, gather);
}
static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+ mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
}
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
+ struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ size_t length = gather->end - gather->start;
+
+ if (gather->start == ULONG_MAX)
+ return;
+
+ mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
+ data);
}
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -461,13 +432,9 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
- unsigned long flags;
phys_addr_t pa;
- spin_lock_irqsave(&dom->pgtlock, flags);
pa = dom->iop->iova_to_phys(dom->iop, iova);
- spin_unlock_irqrestore(&dom->pgtlock, flags);
-
if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
pa &= ~BIT_ULL(32);
@@ -733,6 +700,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
+ spin_lock_init(&data->tlb_lock);
list_add_tail(&data->list, &m4ulist);
if (!iommu_present(&platform_bus_type))
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index fc0f16eabacd..ea949a324e33 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -57,7 +57,7 @@ struct mtk_iommu_data {
struct mtk_iommu_domain *m4u_dom;
struct iommu_group *m4u_group;
bool enable_4GB;
- bool tlb_flush_active;
+ spinlock_t tlb_lock; /* lock for tlb range flush */
struct iommu_device iommu;
const struct mtk_iommu_plat_data *plat_data;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b5efd6dac953..e93b94ecac45 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
}
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 09c6e1c680db..be551cc34be4 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev;
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index c31e7bc4ccbe..52f38292df5b 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -284,9 +284,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* MAIRs (stage-1 only) */
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
- pgtbl_cfg.arm_lpae_s1_cfg.mair[0]);
+ pgtbl_cfg.arm_lpae_s1_cfg.mair);
iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
- pgtbl_cfg.arm_lpae_s1_cfg.mair[1]);
+ pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
/* SCTLR */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
@@ -423,7 +423,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
}
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
int ret;
unsigned long flags;
@@ -539,8 +539,8 @@ static int qcom_iommu_add_device(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
- if (IS_ERR_OR_NULL(group))
- return PTR_ERR_OR_ZERO(group);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
iommu_group_put(group);
iommu_device_link(&qcom_iommu->iommu, dev);
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4dcbf68dfda4..b33cdd5aad81 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -527,7 +527,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
int i, err;
err = pm_runtime_get_if_in_use(iommu->dev);
- if (WARN_ON_ONCE(err <= 0))
+ if (!err || WARN_ON_ONCE(err < 0))
return ret;
if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
@@ -758,7 +758,7 @@ unwind:
}
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
@@ -980,13 +980,13 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
if (!dma_dev)
return NULL;
- rk_domain = devm_kzalloc(dma_dev, sizeof(*rk_domain), GFP_KERNEL);
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
return NULL;
if (type == IOMMU_DOMAIN_DMA &&
iommu_get_dma_cookie(&rk_domain->domain))
- return NULL;
+ goto err_free_domain;
/*
* rk32xx iommus use a 2 level pagetable.
@@ -1021,6 +1021,8 @@ err_free_dt:
err_put_cookie:
if (type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
+err_free_domain:
+ kfree(rk_domain);
return NULL;
}
@@ -1049,6 +1051,7 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
if (domain->type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(&rk_domain->domain);
+ kfree(rk_domain);
}
static int rk_iommu_add_device(struct device *dev)
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 3b0b18e23187..1137f3ddcb85 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -265,7 +265,7 @@ undo_cpu_trans:
}
static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
int flags = ZPCI_PTE_VALID, rc = 0;
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 3924f7c05544..3fb7ba72507d 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
}
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t pa, size_t bytes, int prot)
+ phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{
struct gart_device *gart = gart_handle;
int ret;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 7293fc3f796d..63a147b623e6 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -159,9 +159,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
return (addr & smmu->pfn_mask) == addr;
}
-static dma_addr_t smmu_pde_to_dma(u32 pde)
+static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
{
- return pde << 12;
+ return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
}
static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
@@ -240,7 +240,7 @@ static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
static inline void smmu_flush(struct tegra_smmu *smmu)
{
- smmu_readl(smmu, SMMU_CONFIG);
+ smmu_readl(smmu, SMMU_PTB_ASID);
}
static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
@@ -351,6 +351,20 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
unsigned int i;
u32 value;
+ group = tegra_smmu_find_swgroup(smmu, swgroup);
+ if (group) {
+ value = smmu_readl(smmu, group->reg);
+ value &= ~SMMU_ASID_MASK;
+ value |= SMMU_ASID_VALUE(asid);
+ value |= SMMU_ASID_ENABLE;
+ smmu_writel(smmu, value, group->reg);
+ } else {
+ pr_warn("%s group from swgroup %u not found\n", __func__,
+ swgroup);
+ /* No point moving ahead if group was not found */
+ return;
+ }
+
for (i = 0; i < smmu->soc->num_clients; i++) {
const struct tegra_mc_client *client = &smmu->soc->clients[i];
@@ -361,15 +375,6 @@ static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
value |= BIT(client->smmu.bit);
smmu_writel(smmu, value, client->smmu.reg);
}
-
- group = tegra_smmu_find_swgroup(smmu, swgroup);
- if (group) {
- value = smmu_readl(smmu, group->reg);
- value &= ~SMMU_ASID_MASK;
- value |= SMMU_ASID_VALUE(asid);
- value |= SMMU_ASID_ENABLE;
- smmu_writel(smmu, value, group->reg);
- }
}
static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
@@ -549,6 +554,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
dma_addr_t *dmap)
{
unsigned int pd_index = iova_pd_index(iova);
+ struct tegra_smmu *smmu = as->smmu;
struct page *pt_page;
u32 *pd;
@@ -557,7 +563,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
return NULL;
pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pd_index]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
return tegra_smmu_pte_offset(pt_page, iova);
}
@@ -599,7 +605,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
} else {
u32 *pd = page_address(as->pd);
- *dmap = smmu_pde_to_dma(pd[pde]);
+ *dmap = smmu_pde_to_dma(smmu, pd[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@@ -624,7 +630,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd);
- dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+ dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
tegra_smmu_set_pde(as, iova, 0);
@@ -650,7 +656,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
}
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 3ea9d7682999..315c7cc4f99d 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -153,7 +153,6 @@ static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
*/
static int __viommu_sync_req(struct viommu_dev *viommu)
{
- int ret = 0;
unsigned int len;
size_t write_len;
struct viommu_request *req;
@@ -182,7 +181,7 @@ static int __viommu_sync_req(struct viommu_dev *viommu)
kfree(req);
}
- return ret;
+ return 0;
}
static int viommu_sync_req(struct viommu_dev *viommu)
@@ -713,7 +712,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
static int viommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
int ret;
u32 flags;
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 439d7d886873..a113e811faab 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -366,6 +366,8 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
static const struct dev_pm_ops smi_larb_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver mtk_smi_larb_driver = {
@@ -507,6 +509,8 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
static const struct dev_pm_ops smi_common_pm_ops = {
SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver mtk_smi_common_driver = {
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index a7cf3599d9a1..f64ca27dc210 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -129,6 +129,7 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
+extern void intel_iommu_shutdown(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
@@ -137,6 +138,7 @@ extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
+static inline void intel_iommu_shutdown(void) { }
#define dmar_parse_one_rmrr dmar_res_noop
#define dmar_parse_one_atsr dmar_res_noop
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index ec7a13405f10..ee21eedafe98 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -102,7 +102,7 @@ struct io_pgtable_cfg {
struct {
u64 ttbr[2];
u64 tcr;
- u64 mair[2];
+ u64 mair;
} arm_lpae_s1_cfg;
struct {
diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h
new file mode 100644
index 000000000000..6f000d7a0ddc
--- /dev/null
+++ b/include/linux/ioasid.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_IOASID_H
+#define __LINUX_IOASID_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#define INVALID_IOASID ((ioasid_t)-1)
+typedef unsigned int ioasid_t;
+typedef ioasid_t (*ioasid_alloc_fn_t)(ioasid_t min, ioasid_t max, void *data);
+typedef void (*ioasid_free_fn_t)(ioasid_t ioasid, void *data);
+
+struct ioasid_set {
+ int dummy;
+};
+
+/**
+ * struct ioasid_allocator_ops - IOASID allocator helper functions and data
+ *
+ * @alloc: helper function to allocate IOASID
+ * @free: helper function to free IOASID
+ * @list: for tracking ops that share helper functions but not data
+ * @pdata: data belong to the allocator, provided when calling alloc()
+ */
+struct ioasid_allocator_ops {
+ ioasid_alloc_fn_t alloc;
+ ioasid_free_fn_t free;
+ struct list_head list;
+ void *pdata;
+};
+
+#define DECLARE_IOASID_SET(name) struct ioasid_set name = { 0 }
+
+#if IS_ENABLED(CONFIG_IOASID)
+ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
+ void *private);
+void ioasid_free(ioasid_t ioasid);
+void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
+ bool (*getter)(void *));
+int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
+void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator);
+int ioasid_set_data(ioasid_t ioasid, void *data);
+
+#else /* !CONFIG_IOASID */
+static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
+ ioasid_t max, void *private)
+{
+ return INVALID_IOASID;
+}
+
+static inline void ioasid_free(ioasid_t ioasid)
+{
+}
+
+static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
+ bool (*getter)(void *))
+{
+ return NULL;
+}
+
+static inline int ioasid_register_allocator(struct ioasid_allocator_ops *allocator)
+{
+ return -ENOTSUPP;
+}
+
+static inline void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator)
+{
+}
+
+static inline int ioasid_set_data(ioasid_t ioasid, void *data)
+{
+ return -ENOTSUPP;
+}
+
+#endif /* CONFIG_IOASID */
+#endif /* __LINUX_IOASID_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 29bac5345563..f2223cbb5fd5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/ioasid.h>
#include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0)
@@ -31,11 +32,11 @@
*/
#define IOMMU_PRIV (1 << 5)
/*
- * Non-coherent masters on few Qualcomm SoCs can use this page protection flag
- * to set correct cacheability attributes to use an outer level of cache -
- * last level cache, aka system cache.
+ * Non-coherent masters can use this page protection flag to set cacheable
+ * memory attributes for only a transparent outer level of cache, also known as
+ * the last-level or system cache.
*/
-#define IOMMU_QCOM_SYS_CACHE (1 << 6)
+#define IOMMU_SYS_CACHE_ONLY (1 << 6)
struct iommu_ops;
struct iommu_group;
@@ -244,7 +245,10 @@ struct iommu_iotlb_gather {
* @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response
+ * @cache_invalidate: invalidate translation caches
* @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @sva_bind_gpasid: bind guest pasid and mm
+ * @sva_unbind_gpasid: unbind guest pasid and mm
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
@@ -256,7 +260,7 @@ struct iommu_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
@@ -306,6 +310,12 @@ struct iommu_ops {
int (*page_response)(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg);
+ int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info);
+ int (*sva_bind_gpasid)(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data);
+
+ int (*sva_unbind_gpasid)(struct device *dev, int pasid);
unsigned long pgsize_bitmap;
};
@@ -417,10 +427,19 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
+extern int iommu_cache_invalidate(struct iommu_domain *domain,
+ struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info);
+extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data);
+extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
+extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
@@ -428,6 +447,9 @@ extern size_t iommu_unmap_fast(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents, int prot);
+extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@@ -662,6 +684,13 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
}
+static inline int iommu_map_atomic(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ return -ENODEV;
+}
+
static inline size_t iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
@@ -682,6 +711,13 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
return 0;
}
+static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return 0;
+}
+
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
{
}
@@ -1005,6 +1041,25 @@ static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
return IOMMU_PASID_INVALID;
}
+static inline int
+iommu_cache_invalidate(struct iommu_domain *domain,
+ struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ return -ENODEV;
+}
+static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, int pasid)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_IOMMU_API */
#ifdef CONFIG_IOMMU_DEBUGFS
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 2d5eff506e13..ffd72b3b14ee 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -58,6 +58,7 @@ extern int qcom_scm_set_remote_state(u32 state, u32 id);
extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
#else
@@ -97,6 +98,7 @@ qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
+static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return -ENODEV; }
static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; }
#endif
diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h
index fc00c5d4741b..4ad3496e5c43 100644
--- a/include/uapi/linux/iommu.h
+++ b/include/uapi/linux/iommu.h
@@ -152,4 +152,173 @@ struct iommu_page_response {
__u32 code;
};
+/* defines the granularity of the invalidation */
+enum iommu_inv_granularity {
+ IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */
+ IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */
+ IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */
+ IOMMU_INV_GRANU_NR, /* number of invalidation granularities */
+};
+
+/**
+ * struct iommu_inv_addr_info - Address Selective Invalidation Structure
+ *
+ * @flags: indicates the granularity of the address-selective invalidation
+ * - If the PASID bit is set, the @pasid field is populated and the invalidation
+ * relates to cache entries tagged with this PASID and matching the address
+ * range.
+ * - If ARCHID bit is set, @archid is populated and the invalidation relates
+ * to cache entries tagged with this architecture specific ID and matching
+ * the address range.
+ * - Both PASID and ARCHID can be set as they may tag different caches.
+ * - If neither PASID or ARCHID is set, global addr invalidation applies.
+ * - The LEAF flag indicates whether only the leaf PTE caching needs to be
+ * invalidated and other paging structure caches can be preserved.
+ * @pasid: process address space ID
+ * @archid: architecture-specific ID
+ * @addr: first stage/level input address
+ * @granule_size: page/block size of the mapping in bytes
+ * @nb_granules: number of contiguous granules to be invalidated
+ */
+struct iommu_inv_addr_info {
+#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0)
+#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1)
+#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2)
+ __u32 flags;
+ __u32 archid;
+ __u64 pasid;
+ __u64 addr;
+ __u64 granule_size;
+ __u64 nb_granules;
+};
+
+/**
+ * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
+ *
+ * @flags: indicates the granularity of the PASID-selective invalidation
+ * - If the PASID bit is set, the @pasid field is populated and the invalidation
+ * relates to cache entries tagged with this PASID and matching the address
+ * range.
+ * - If the ARCHID bit is set, the @archid is populated and the invalidation
+ * relates to cache entries tagged with this architecture specific ID and
+ * matching the address range.
+ * - Both PASID and ARCHID can be set as they may tag different caches.
+ * - At least one of PASID or ARCHID must be set.
+ * @pasid: process address space ID
+ * @archid: architecture-specific ID
+ */
+struct iommu_inv_pasid_info {
+#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0)
+#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1)
+ __u32 flags;
+ __u32 archid;
+ __u64 pasid;
+};
+
+/**
+ * struct iommu_cache_invalidate_info - First level/stage invalidation
+ * information
+ * @version: API version of this structure
+ * @cache: bitfield that allows to select which caches to invalidate
+ * @granularity: defines the lowest granularity used for the invalidation:
+ * domain > PASID > addr
+ * @padding: reserved for future use (should be zero)
+ * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
+ * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
+ *
+ * Not all the combinations of cache/granularity are valid:
+ *
+ * +--------------+---------------+---------------+---------------+
+ * | type / | DEV_IOTLB | IOTLB | PASID |
+ * | granularity | | | cache |
+ * +==============+===============+===============+===============+
+ * | DOMAIN | N/A | Y | Y |
+ * +--------------+---------------+---------------+---------------+
+ * | PASID | Y | Y | Y |
+ * +--------------+---------------+---------------+---------------+
+ * | ADDR | Y | Y | N/A |
+ * +--------------+---------------+---------------+---------------+
+ *
+ * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
+ * @version and @cache.
+ *
+ * If multiple cache types are invalidated simultaneously, they all
+ * must support the used granularity.
+ */
+struct iommu_cache_invalidate_info {
+#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
+ __u32 version;
+/* IOMMU paging structure cache */
+#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
+#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
+#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
+#define IOMMU_CACHE_INV_TYPE_NR (3)
+ __u8 cache;
+ __u8 granularity;
+ __u8 padding[2];
+ union {
+ struct iommu_inv_pasid_info pasid_info;
+ struct iommu_inv_addr_info addr_info;
+ };
+};
+
+/**
+ * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
+ * SVA binding.
+ *
+ * @flags: VT-d PASID table entry attributes
+ * @pat: Page attribute table data to compute effective memory type
+ * @emt: Extended memory type
+ *
+ * Only guest vIOMMU selectable and effective options are passed down to
+ * the host IOMMU.
+ */
+struct iommu_gpasid_bind_data_vtd {
+#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */
+#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */
+#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */
+#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
+#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
+#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
+ __u64 flags;
+ __u32 pat;
+ __u32 emt;
+};
+
+/**
+ * struct iommu_gpasid_bind_data - Information about device and guest PASID binding
+ * @version: Version of this data structure
+ * @format: PASID table entry format
+ * @flags: Additional information on guest bind request
+ * @gpgd: Guest page directory base of the guest mm to bind
+ * @hpasid: Process address space ID used for the guest mm in host IOMMU
+ * @gpasid: Process address space ID used for the guest mm in guest IOMMU
+ * @addr_width: Guest virtual address width
+ * @padding: Reserved for future use (should be zero)
+ * @vtd: Intel VT-d specific data
+ *
+ * Guest to host PASID mapping can be an identity or non-identity, where guest
+ * has its own PASID space. For non-identify mapping, guest to host PASID lookup
+ * is needed when VM programs guest PASID into an assigned device. VMM may
+ * trap such PASID programming then request host IOMMU driver to convert guest
+ * PASID to host PASID based on this bind data.
+ */
+struct iommu_gpasid_bind_data {
+#define IOMMU_GPASID_BIND_VERSION_1 1
+ __u32 version;
+#define IOMMU_PASID_FORMAT_INTEL_VTD 1
+ __u32 format;
+#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
+ __u64 flags;
+ __u64 gpgd;
+ __u64 hpasid;
+ __u64 gpasid;
+ __u32 addr_width;
+ __u8 padding[12];
+ /* Vendor specific data */
+ union {
+ struct iommu_gpasid_bind_data_vtd vtd;
+ };
+};
+
#endif /* _UAPI_IOMMU_H */