summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/tegra-smmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/tegra-smmu.c')
-rw-r--r--drivers/iommu/tegra-smmu.c138
1 files changed, 118 insertions, 20 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 124c8848ab7e..0becdbfea306 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <soc/tegra/ahb.h>
@@ -19,8 +20,10 @@
struct tegra_smmu_group {
struct list_head list;
+ struct tegra_smmu *smmu;
const struct tegra_smmu_group_soc *soc;
struct iommu_group *group;
+ unsigned int swgroup;
};
struct tegra_smmu {
@@ -49,6 +52,7 @@ struct tegra_smmu_as {
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
+ spinlock_t lock;
u32 *count;
struct page **pts;
struct page *pd;
@@ -127,6 +131,11 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_SHIFT 22
#define SMMU_PTE_SHIFT 12
+#define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
+#define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
+#define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
+#define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
+
#define SMMU_PD_READABLE (1 << 31)
#define SMMU_PD_WRITABLE (1 << 30)
#define SMMU_PD_NONSECURE (1 << 29)
@@ -308,6 +317,8 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
return NULL;
}
+ spin_lock_init(&as->lock);
+
/* setup aperture */
as->domain.geometry.aperture_start = 0;
as->domain.geometry.aperture_end = 0xffffffff;
@@ -569,19 +580,14 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
}
static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
- dma_addr_t *dmap)
+ dma_addr_t *dmap, struct page *page)
{
unsigned int pde = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
if (!as->pts[pde]) {
- struct page *page;
dma_addr_t dma;
- page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
- if (!page)
- return NULL;
-
dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
DMA_TO_DEVICE);
if (dma_mapping_error(smmu->dev, dma)) {
@@ -644,7 +650,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
u32 *pte, dma_addr_t pte_dma, u32 val)
{
struct tegra_smmu *smmu = as->smmu;
- unsigned long offset = offset_in_page(pte);
+ unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
*pte = val;
@@ -655,15 +661,61 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
smmu_flush(smmu);
}
-static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static struct page *as_get_pde_page(struct tegra_smmu_as *as,
+ unsigned long iova, gfp_t gfp,
+ unsigned long *flags)
+{
+ unsigned int pde = iova_pd_index(iova);
+ struct page *page = as->pts[pde];
+
+ /* at first check whether allocation needs to be done at all */
+ if (page)
+ return page;
+
+ /*
+ * In order to prevent exhaustion of the atomic memory pool, we
+ * allocate page in a sleeping context if GFP flags permit. Hence
+ * spinlock needs to be unlocked and re-locked after allocation.
+ */
+ if (!(gfp & __GFP_ATOMIC))
+ spin_unlock_irqrestore(&as->lock, *flags);
+
+ page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
+
+ if (!(gfp & __GFP_ATOMIC))
+ spin_lock_irqsave(&as->lock, *flags);
+
+ /*
+ * In a case of blocking allocation, a concurrent mapping may win
+ * the PDE allocation. In this case the allocated page isn't needed
+ * if allocation succeeded and the allocation failure isn't fatal.
+ */
+ if (as->pts[pde]) {
+ if (page)
+ __free_page(page);
+
+ page = as->pts[pde];
+ }
+
+ return page;
+}
+
+static int
+__tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp,
+ unsigned long *flags)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
+ struct page *page;
u32 pte_attrs;
u32 *pte;
- pte = as_get_pte(as, iova, &pte_dma);
+ page = as_get_pde_page(as, iova, gfp, flags);
+ if (!page)
+ return -ENOMEM;
+
+ pte = as_get_pte(as, iova, &pte_dma, page);
if (!pte)
return -ENOMEM;
@@ -680,13 +732,14 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
pte_attrs |= SMMU_PTE_WRITABLE;
tegra_smmu_set_pte(as, iova, pte, pte_dma,
- __phys_to_pfn(paddr) | pte_attrs);
+ SMMU_PHYS_PFN(paddr) | pte_attrs);
return 0;
}
-static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t
+__tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
{
struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma;
@@ -702,6 +755,33 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return size;
}
+static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+ struct tegra_smmu_as *as = to_smmu_as(domain);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&as->lock, flags);
+ ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ return ret;
+}
+
+static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size, struct iommu_iotlb_gather *gather)
+{
+ struct tegra_smmu_as *as = to_smmu_as(domain);
+ unsigned long flags;
+
+ spin_lock_irqsave(&as->lock, flags);
+ size = __tegra_smmu_unmap(domain, iova, size, gather);
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ return size;
+}
+
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -716,7 +796,7 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
pfn = *pte & as->smmu->pfn_mask;
- return PFN_PHYS(pfn);
+ return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova);
}
static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
@@ -813,22 +893,34 @@ tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
return NULL;
}
+static void tegra_smmu_group_release(void *iommu_data)
+{
+ struct tegra_smmu_group *group = iommu_data;
+ struct tegra_smmu *smmu = group->smmu;
+
+ mutex_lock(&smmu->lock);
+ list_del(&group->list);
+ mutex_unlock(&smmu->lock);
+}
+
static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
unsigned int swgroup)
{
const struct tegra_smmu_group_soc *soc;
struct tegra_smmu_group *group;
+ struct iommu_group *grp;
+ /* Find group_soc associating with swgroup */
soc = tegra_smmu_find_group(smmu, swgroup);
- if (!soc)
- return NULL;
mutex_lock(&smmu->lock);
+ /* Find existing iommu_group associating with swgroup or group_soc */
list_for_each_entry(group, &smmu->groups, list)
- if (group->soc == soc) {
+ if ((group->swgroup == swgroup) || (soc && group->soc == soc)) {
+ grp = iommu_group_ref_get(group->group);
mutex_unlock(&smmu->lock);
- return group->group;
+ return grp;
}
group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
@@ -838,6 +930,8 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
}
INIT_LIST_HEAD(&group->list);
+ group->swgroup = swgroup;
+ group->smmu = smmu;
group->soc = soc;
group->group = iommu_group_alloc();
@@ -847,6 +941,9 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
return NULL;
}
+ iommu_group_set_iommudata(group->group, group, tegra_smmu_group_release);
+ if (soc)
+ iommu_group_set_name(group->group, soc->name);
list_add_tail(&group->list, &smmu->groups);
mutex_unlock(&smmu->lock);
@@ -1019,10 +1116,11 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->dev = dev;
smmu->mc = mc;
- smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
+ smmu->pfn_mask =
+ BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
mc->soc->num_address_bits, smmu->pfn_mask);
- smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
+ smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
smmu->tlb_mask);