summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig11
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h2
-rw-r--r--drivers/iommu/amd/iommu.c21
-rw-r--r--drivers/iommu/apple-dart.c10
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c21
-rw-r--r--drivers/iommu/arm/arm-smmu/Makefile3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-impl.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c3
-rw-r--r--drivers/iommu/dma-iommu.c200
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/iommu/ipmmu-vmsa.c32
-rw-r--r--drivers/iommu/mtk_iommu.c4
-rw-r--r--drivers/iommu/tegra-smmu.c5
13 files changed, 178 insertions, 140 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 124c41adeca1..3eb68fa1b8cc 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -308,7 +308,6 @@ config APPLE_DART
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
- depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
@@ -356,6 +355,14 @@ config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
'arm-smmu.disable_bypass' will continue to override this
config.
+config ARM_SMMU_QCOM
+ def_tristate y
+ depends on ARM_SMMU && ARCH_QCOM
+ select QCOM_SCM
+ help
+ When running on a Qualcomm platform that has the custom variant
+ of the ARM SMMU, this needs to be built into the SMMU driver.
+
config ARM_SMMU_V3
tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
depends on ARM64
@@ -438,7 +445,7 @@ config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support"
depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
- depends on QCOM_SCM=y
+ select QCOM_SCM
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 8dbe61e2b3c1..867535eb0ce9 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -138,6 +138,8 @@
#define EVENT_DOMID_MASK_HI 0xf0000
#define EVENT_FLAGS_MASK 0xfff
#define EVENT_FLAGS_SHIFT 0x10
+#define EVENT_FLAG_RW 0x020
+#define EVENT_FLAG_I 0x008
/* feature control bits */
#define CONTROL_IOMMU_EN 0x00ULL
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 1722bb161841..beadcffcc223 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -473,6 +473,12 @@ static void amd_iommu_report_rmp_fault(volatile u32 *event)
pci_dev_put(pdev);
}
+#define IS_IOMMU_MEM_TRANSACTION(flags) \
+ (((flags) & EVENT_FLAG_I) == 0)
+
+#define IS_WRITE_REQUEST(flags) \
+ ((flags) & EVENT_FLAG_RW)
+
static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
u64 address, int flags)
{
@@ -485,6 +491,20 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
dev_data = dev_iommu_priv_get(&pdev->dev);
if (dev_data) {
+ /*
+ * If this is a DMA fault (for which the I(nterrupt)
+ * bit will be unset), allow report_iommu_fault() to
+ * prevent logging it.
+ */
+ if (IS_IOMMU_MEM_TRANSACTION(flags)) {
+ if (!report_iommu_fault(&dev_data->domain->domain,
+ &pdev->dev, address,
+ IS_WRITE_REQUEST(flags) ?
+ IOMMU_FAULT_WRITE :
+ IOMMU_FAULT_READ))
+ goto out;
+ }
+
if (__ratelimit(&dev_data->rs)) {
pci_err(pdev, "Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%llx flags=0x%04x]\n",
domain_id, address, flags);
@@ -495,6 +515,7 @@ static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
domain_id, address, flags);
}
+out:
if (pdev)
pci_dev_put(pdev);
}
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index fdfa39ec2a4d..96d4a1f8de79 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -15,7 +15,6 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/dev_printk.h>
-#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -70,6 +69,8 @@
#define DART_ERROR_ADDR_HI 0x54
#define DART_ERROR_ADDR_LO 0x50
+#define DART_STREAMS_ENABLE 0xfc
+
#define DART_TCR(sid) (0x100 + 4 * (sid))
#define DART_TCR_TRANSLATE_ENABLE BIT(7)
#define DART_TCR_BYPASS0_ENABLE BIT(8)
@@ -301,6 +302,9 @@ static int apple_dart_hw_reset(struct apple_dart *dart)
apple_dart_hw_disable_dma(&stream_map);
apple_dart_hw_clear_all_ttbrs(&stream_map);
+ /* enable all streams globally since TCR is used to control isolation */
+ writel(DART_STREAM_ALL, dart->regs + DART_STREAMS_ENABLE);
+
/* clear any pending errors before the interrupt is unmasked */
writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR);
@@ -578,7 +582,6 @@ static struct iommu_domain *apple_dart_domain_alloc(unsigned int type)
if (!dart_domain)
return NULL;
- iommu_get_dma_cookie(&dart_domain->domain);
mutex_init(&dart_domain->init_lock);
/* no need to allocate pgtbl_ops or do any other finalization steps */
@@ -702,13 +705,12 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
if (!group)
goto out;
- group_master_cfg = kzalloc(sizeof(*group_master_cfg), GFP_KERNEL);
+ group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg), GFP_KERNEL);
if (!group_master_cfg) {
iommu_group_put(group);
goto out;
}
- memcpy(group_master_cfg, cfg, sizeof(*group_master_cfg));
iommu_group_set_iommudata(group, group_master_cfg,
apple_dart_release_group);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index a388e318f86e..f5848b351b19 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -409,10 +409,7 @@ static void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
/* Convert the erroneous command into a CMD_SYNC */
- if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
- dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
- return;
- }
+ arm_smmu_cmdq_build_cmd(cmd, &cmd_sync);
queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
}
@@ -860,7 +857,7 @@ static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
{
u64 cmd[CMDQ_ENT_DWORDS];
- if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
+ if (unlikely(arm_smmu_cmdq_build_cmd(cmd, ent))) {
dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
ent->opcode);
return -EINVAL;
@@ -885,11 +882,20 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
struct arm_smmu_cmdq_batch *cmds,
struct arm_smmu_cmdq_ent *cmd)
{
+ int index;
+
if (cmds->num == CMDQ_BATCH_ENTRIES) {
arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
cmds->num = 0;
}
- arm_smmu_cmdq_build_cmd(&cmds->cmds[cmds->num * CMDQ_ENT_DWORDS], cmd);
+
+ index = cmds->num * CMDQ_ENT_DWORDS;
+ if (unlikely(arm_smmu_cmdq_build_cmd(&cmds->cmds[index], cmd))) {
+ dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
+ cmd->opcode);
+ return;
+ }
+
cmds->num++;
}
@@ -1764,10 +1770,11 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
{
int i;
struct arm_smmu_cmdq_ent cmd;
- struct arm_smmu_cmdq_batch cmds = {};
+ struct arm_smmu_cmdq_batch cmds;
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
+ cmds.num = 0;
for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd);
diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile
index e240a7bcf310..b0cc01aa20c9 100644
--- a/drivers/iommu/arm/arm-smmu/Makefile
+++ b/drivers/iommu/arm/arm-smmu/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
-arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o
+arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
index 9f465e146799..2c25cce38060 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c
@@ -215,7 +215,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
of_device_is_compatible(np, "nvidia,tegra186-smmu"))
return nvidia_smmu_impl_init(smmu);
- smmu = qcom_smmu_impl_init(smmu);
+ if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM))
+ smmu = qcom_smmu_impl_init(smmu);
if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
smmu->impl = &mrvl_mmu500_impl;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 55690af1b25d..ca736b065dd0 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -231,6 +231,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" },
{ .compatible = "qcom,sc7280-mdss" },
+ { .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
@@ -403,12 +404,14 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,msm8998-smmu-v2" },
+ { .compatible = "qcom,qcm2290-smmu-500" },
{ .compatible = "qcom,sc7180-smmu-500" },
{ .compatible = "qcom,sc7280-smmu-500" },
{ .compatible = "qcom,sc8180x-smmu-500" },
{ .compatible = "qcom,sdm630-smmu-v2" },
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm6125-smmu-500" },
+ { .compatible = "qcom,sm6350-smmu-500" },
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
{ .compatible = "qcom,sm8350-smmu-500" },
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 896bea04c347..b42e38a0dbe2 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -98,9 +98,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
/**
* iommu_get_dma_cookie - Acquire DMA-API resources for a domain
* @domain: IOMMU domain to prepare for DMA-API usage
- *
- * IOMMU drivers should normally call this from their domain_alloc
- * callback when domain->type == IOMMU_DOMAIN_DMA.
*/
int iommu_get_dma_cookie(struct iommu_domain *domain)
{
@@ -113,7 +110,6 @@ int iommu_get_dma_cookie(struct iommu_domain *domain)
return 0;
}
-EXPORT_SYMBOL(iommu_get_dma_cookie);
/**
* iommu_get_msi_cookie - Acquire just MSI remapping resources
@@ -151,8 +147,6 @@ EXPORT_SYMBOL(iommu_get_msi_cookie);
* iommu_put_dma_cookie - Release a domain's DMA mapping resources
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
* iommu_get_msi_cookie()
- *
- * IOMMU drivers should normally call this from their domain_free callback.
*/
void iommu_put_dma_cookie(struct iommu_domain *domain)
{
@@ -172,7 +166,6 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
kfree(cookie);
domain->iova_cookie = NULL;
}
-EXPORT_SYMBOL(iommu_put_dma_cookie);
/**
* iommu_dma_get_resv_regions - Reserved region driver helper
@@ -317,6 +310,11 @@ static bool dev_is_untrusted(struct device *dev)
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
}
+static bool dev_use_swiotlb(struct device *dev)
+{
+ return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
+}
+
/* sysfs updates are serialised by the mutex of the group owning @domain */
int iommu_dma_init_fq(struct iommu_domain *domain)
{
@@ -510,23 +508,6 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
}
-static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct iommu_domain *domain = iommu_get_dma_domain(dev);
- phys_addr_t phys;
-
- phys = iommu_iova_to_phys(domain, dma_addr);
- if (WARN_ON(!phys))
- return;
-
- __iommu_dma_unmap(dev, dma_addr, size);
-
- if (unlikely(is_swiotlb_buffer(dev, phys)))
- swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
-}
-
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, u64 dma_mask)
{
@@ -553,52 +534,6 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
return iova + iova_off;
}
-static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
- size_t org_size, dma_addr_t dma_mask, bool coherent,
- enum dma_data_direction dir, unsigned long attrs)
-{
- int prot = dma_info_to_prot(dir, coherent, attrs);
- struct iommu_domain *domain = iommu_get_dma_domain(dev);
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
- size_t aligned_size = org_size;
- void *padding_start;
- size_t padding_size;
- dma_addr_t iova;
-
- /*
- * If both the physical buffer start address and size are
- * page aligned, we don't need to use a bounce page.
- */
- if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
- iova_offset(iovad, phys | org_size)) {
- aligned_size = iova_align(iovad, org_size);
- phys = swiotlb_tbl_map_single(dev, phys, org_size,
- aligned_size, dir, attrs);
-
- if (phys == DMA_MAPPING_ERROR)
- return DMA_MAPPING_ERROR;
-
- /* Cleanup the padding area. */
- padding_start = phys_to_virt(phys);
- padding_size = aligned_size;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE ||
- dir == DMA_BIDIRECTIONAL)) {
- padding_start += org_size;
- padding_size -= org_size;
- }
-
- memset(padding_start, 0, padding_size);
- }
-
- iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
- if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
- swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
- return iova;
-}
-
static void __iommu_dma_free_pages(struct page **pages, int count)
{
while (count--)
@@ -616,7 +551,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
if (!order_mask)
return NULL;
- pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
+ pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
@@ -794,7 +729,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
{
phys_addr_t phys;
- if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
+ if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
@@ -810,7 +745,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
{
phys_addr_t phys;
- if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
+ if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
@@ -828,17 +763,13 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg;
int i;
- if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
- return;
-
- for_each_sg(sgl, sg, nelems, i) {
- if (!dev_is_dma_coherent(dev))
+ if (dev_use_swiotlb(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+ sg->length, dir);
+ else if (!dev_is_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
-
- if (is_swiotlb_buffer(dev, sg_phys(sg)))
- swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
- sg->length, dir);
- }
}
static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -848,17 +779,14 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg;
int i;
- if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
- return;
-
- for_each_sg(sgl, sg, nelems, i) {
- if (is_swiotlb_buffer(dev, sg_phys(sg)))
- swiotlb_sync_single_for_device(dev, sg_phys(sg),
- sg->length, dir);
-
- if (!dev_is_dma_coherent(dev))
+ if (dev_use_swiotlb(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ iommu_dma_sync_single_for_device(dev,
+ sg_dma_address(sg),
+ sg->length, dir);
+ else if (!dev_is_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
- }
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -867,22 +795,66 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
- dma_addr_t dma_handle;
+ int prot = dma_info_to_prot(dir, coherent, attrs);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ dma_addr_t iova, dma_mask = dma_get_mask(dev);
+
+ /*
+ * If both the physical buffer start address and size are
+ * page aligned, we don't need to use a bounce page.
+ */
+ if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) {
+ void *padding_start;
+ size_t padding_size, aligned_size;
+
+ aligned_size = iova_align(iovad, size);
+ phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
+ iova_mask(iovad), dir, attrs);
+
+ if (phys == DMA_MAPPING_ERROR)
+ return DMA_MAPPING_ERROR;
- dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
- coherent, dir, attrs);
- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- dma_handle != DMA_MAPPING_ERROR)
+ /* Cleanup the padding area. */
+ padding_start = phys_to_virt(phys);
+ padding_size = aligned_size;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
+ padding_start += size;
+ padding_size -= size;
+ }
+
+ memset(padding_start, 0, padding_size);
+ }
+
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(phys, size, dir);
- return dma_handle;
+
+ iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
+ if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
+ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+ return iova;
}
static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
- __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ phys_addr_t phys;
+
+ phys = iommu_iova_to_phys(domain, dma_handle);
+ if (WARN_ON(!phys))
+ return;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu(phys, size, dir);
+
+ __iommu_dma_unmap(dev, dma_handle, size);
+
+ if (unlikely(is_swiotlb_buffer(dev, phys)))
+ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
/*
@@ -967,7 +939,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s
int i;
for_each_sg(sg, s, nents, i)
- __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
+ iommu_dma_unmap_page(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}
@@ -978,9 +950,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i) {
- sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
- s->length, dma_get_mask(dev),
- dev_is_dma_coherent(dev), dir, attrs);
+ sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
+ s->offset, s->length, dir, attrs);
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(s) = s->length;
@@ -1016,15 +987,16 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
ret = iommu_deferred_attach(dev, domain);
- goto out;
+ if (ret)
+ goto out;
}
+ if (dev_use_swiotlb(dev))
+ return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
+
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
- if (dev_is_untrusted(dev))
- return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
-
/*
* Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever
@@ -1097,14 +1069,14 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
struct scatterlist *tmp;
int i;
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
-
- if (dev_is_untrusted(dev)) {
+ if (dev_use_swiotlb(dev)) {
iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
return;
}
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
/*
* The scatterlist segments are mapped into a single
* contiguous IOVA allocation, so this is incredibly easy.
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3303d707bab4..a80f13867bed 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1953,8 +1953,7 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
/* Assume all sizes by default; the driver may override this later */
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
- /* Temporarily avoid -EEXIST while drivers still get their own cookies */
- if (iommu_is_dma_domain(domain) && !domain->iova_cookie && iommu_get_dma_cookie(domain)) {
+ if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
iommu_domain_free(domain);
domain = NULL;
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index d38ff29a76e8..ca752bdc710f 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -33,10 +33,10 @@
#define arm_iommu_detach_device(...) do {} while (0)
#endif
-#define IPMMU_CTX_MAX 8U
+#define IPMMU_CTX_MAX 16U
#define IPMMU_CTX_INVALID -1
-#define IPMMU_UTLB_MAX 48U
+#define IPMMU_UTLB_MAX 64U
struct ipmmu_features {
bool use_ns_alias_offset;
@@ -189,8 +189,12 @@ static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
unsigned int context_id, unsigned int reg)
{
- return mmu->features->ctx_offset_base +
- context_id * mmu->features->ctx_offset_stride + reg;
+ unsigned int base = mmu->features->ctx_offset_base;
+
+ if (context_id > 7)
+ base += 0x800 - 8 * 0x40;
+
+ return base + context_id * mmu->features->ctx_offset_stride + reg;
}
static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
@@ -922,6 +926,20 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.utlb_offset_base = 0,
};
+static const struct ipmmu_features ipmmu_features_r8a779a0 = {
+ .use_ns_alias_offset = false,
+ .has_cache_leaf_nodes = true,
+ .number_of_contexts = 16,
+ .num_utlbs = 64,
+ .setup_imbuscr = false,
+ .twobit_imttbcr_sl0 = true,
+ .reserved_context = true,
+ .cache_snoop = false,
+ .ctx_offset_base = 0x10000,
+ .ctx_offset_stride = 0x1040,
+ .utlb_offset_base = 0x3000,
+};
+
static const struct of_device_id ipmmu_of_ids[] = {
{
.compatible = "renesas,ipmmu-vmsa",
@@ -954,12 +972,18 @@ static const struct of_device_id ipmmu_of_ids[] = {
.compatible = "renesas,ipmmu-r8a77970",
.data = &ipmmu_features_rcar_gen3,
}, {
+ .compatible = "renesas,ipmmu-r8a77980",
+ .data = &ipmmu_features_rcar_gen3,
+ }, {
.compatible = "renesas,ipmmu-r8a77990",
.data = &ipmmu_features_rcar_gen3,
}, {
.compatible = "renesas,ipmmu-r8a77995",
.data = &ipmmu_features_rcar_gen3,
}, {
+ .compatible = "renesas,ipmmu-r8a779a0",
+ .data = &ipmmu_features_r8a779a0,
+ }, {
/* Terminator */
},
};
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index d837adfd1da5..25b834104790 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -550,7 +550,9 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
phys_addr_t pa;
pa = dom->iop->iova_to_phys(dom->iop, iova);
- if (dom->data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
+ if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) &&
+ dom->data->enable_4GB &&
+ pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
pa &= ~BIT_ULL(32);
return pa;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 0a281833f611..e900e3c46903 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -1079,7 +1079,6 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
struct tegra_mc *mc)
{
struct tegra_smmu *smmu;
- size_t size;
u32 value;
int err;
@@ -1097,9 +1096,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
*/
mc->smmu = smmu;
- size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
-
- smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
+ smmu->asids = devm_bitmap_zalloc(dev, soc->num_asids, GFP_KERNEL);
if (!smmu->asids)
return ERR_PTR(-ENOMEM);