summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/dma-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r--drivers/iommu/dma-iommu.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 0cc702a70a96..c363294b3bb9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -19,6 +19,7 @@
#include <linux/iova.h>
#include <linux/irq.h>
#include <linux/mm.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
@@ -44,7 +45,6 @@ struct iommu_dma_cookie {
dma_addr_t msi_iova;
};
struct list_head msi_page_list;
- spinlock_t msi_lock;
/* Domain for flush queue callback; NULL if flush queue not in use */
struct iommu_domain *fq_domain;
@@ -63,7 +63,6 @@ static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
if (cookie) {
- spin_lock_init(&cookie->msi_lock);
INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type;
}
@@ -399,7 +398,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
}
static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
- size_t size, dma_addr_t dma_limit, struct device *dev)
+ size_t size, u64 dma_limit, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
@@ -424,7 +423,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
if (domain->geometry.force_aperture)
- dma_limit = min(dma_limit, domain->geometry.aperture_end);
+ dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
@@ -477,7 +476,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot, dma_addr_t dma_mask)
+ size_t size, int prot, u64 dma_mask)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -1176,7 +1175,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (msi_page->phys == msi_addr)
return msi_page;
- msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
+ msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
if (!msi_page)
return NULL;
@@ -1206,7 +1205,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_dma_cookie *cookie;
struct iommu_dma_msi_page *msi_page;
- unsigned long flags;
+ static DEFINE_MUTEX(msi_prepare_lock); /* see below */
if (!domain || !domain->iova_cookie) {
desc->iommu_cookie = NULL;
@@ -1216,13 +1215,13 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
cookie = domain->iova_cookie;
/*
- * We disable IRQs to rule out a possible inversion against
- * irq_desc_lock if, say, someone tries to retarget the affinity
- * of an MSI from within an IPI handler.
+ * In fact the whole prepare operation should already be serialised by
+ * irq_domain_mutex further up the callchain, but that's pretty subtle
+ * on its own, so consider this locking as failsafe documentation...
*/
- spin_lock_irqsave(&cookie->msi_lock, flags);
+ mutex_lock(&msi_prepare_lock);
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
- spin_unlock_irqrestore(&cookie->msi_lock, flags);
+ mutex_unlock(&msi_prepare_lock);
msi_desc_set_iommu_cookie(desc, msi_page);