summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iova.c
diff options
context:
space:
mode:
authorSakari Ailus <sakari.ailus@linux.intel.com>2015-07-13 13:31:28 +0200
committerDavid Woodhouse <David.Woodhouse@intel.com>2015-07-28 16:47:58 +0200
commitae1ff3d623905947158fd3394854c23026337810 (patch)
treef4dbf8564370339f17021f74bfe37043e23f7872 /drivers/iommu/iova.c
parentiommu/iova: Avoid over-allocating when size-aligned (diff)
downloadlinux-ae1ff3d623905947158fd3394854c23026337810.tar.xz
linux-ae1ff3d623905947158fd3394854c23026337810.zip
iommu: iova: Move iova cache management to the iova library
This is necessary to separate intel-iommu from the iova library. Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/iommu/iova.c')
-rw-r--r--drivers/iommu/iova.c83
1 files changed, 49 insertions, 34 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 29f2efcf668e..ed95f7a0fad3 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -20,40 +20,6 @@
#include <linux/iova.h>
#include <linux/slab.h>
-static struct kmem_cache *iommu_iova_cache;
-
-int iommu_iova_cache_init(void)
-{
- int ret = 0;
-
- iommu_iova_cache = kmem_cache_create("iommu_iova",
- sizeof(struct iova),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_iova_cache) {
- pr_err("Couldn't create iova cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-void iommu_iova_cache_destroy(void)
-{
- kmem_cache_destroy(iommu_iova_cache);
-}
-
-struct iova *alloc_iova_mem(void)
-{
- return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
- kmem_cache_free(iommu_iova_cache, iova);
-}
-
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit)
@@ -237,6 +203,55 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
rb_insert_color(&iova->node, root);
}
+static struct kmem_cache *iova_cache;
+static unsigned int iova_cache_users;
+static DEFINE_MUTEX(iova_cache_mutex);
+
+struct iova *alloc_iova_mem(void)
+{
+ return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(alloc_iova_mem);
+
+void free_iova_mem(struct iova *iova)
+{
+ kmem_cache_free(iova_cache, iova);
+}
+EXPORT_SYMBOL(free_iova_mem);
+
+int iova_cache_get(void)
+{
+ mutex_lock(&iova_cache_mutex);
+ if (!iova_cache_users) {
+ iova_cache = kmem_cache_create(
+ "iommu_iova", sizeof(struct iova), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!iova_cache) {
+ mutex_unlock(&iova_cache_mutex);
+ printk(KERN_ERR "Couldn't create iova cache\n");
+ return -ENOMEM;
+ }
+ }
+
+ iova_cache_users++;
+ mutex_unlock(&iova_cache_mutex);
+
+ return 0;
+}
+
+void iova_cache_put(void)
+{
+ mutex_lock(&iova_cache_mutex);
+ if (WARN_ON(!iova_cache_users)) {
+ mutex_unlock(&iova_cache_mutex);
+ return;
+ }
+ iova_cache_users--;
+ if (!iova_cache_users)
+ kmem_cache_destroy(iova_cache);
+ mutex_unlock(&iova_cache_mutex);
+}
+
/**
* alloc_iova - allocates an iova
* @iovad: - iova domain in question