summaryrefslogtreecommitdiffstats
path: root/drivers/misc/uacce
diff options
context:
space:
mode:
authorJean-Philippe Brucker <jean-philippe@linaro.org>2020-04-23 14:53:28 +0200
committerJoerg Roedel <jroedel@suse.de>2020-05-29 14:52:53 +0200
commitfb01562e5a8a731bb1807eba0a9fadb355ca2277 (patch)
tree44ea2f6f48fcd596858564915070adb29154c30f /drivers/misc/uacce
parentiommu/vt-d: Use pci_ats_supported() (diff)
downloadlinux-fb01562e5a8a731bb1807eba0a9fadb355ca2277.tar.xz
linux-fb01562e5a8a731bb1807eba0a9fadb355ca2277.zip
uacce: Remove mm_exit() op
The mm_exit() op will be removed from the SVA API. When a process dies and its mm goes away, the IOMMU driver won't notify device drivers anymore. Drivers should expect to handle a lot more aborted DMA. On the upside, it does greatly simplify the queue management. The uacce_mm struct, that tracks all queues bound to an mm, was only used by the mm_exit() callback. Remove it. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Acked-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Acked-by: Zhangfei Gao <zhangfei.gao@linaro.org> Link: https://lore.kernel.org/r/20200423125329.782066-2-jean-philippe@linaro.org Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/misc/uacce')
-rw-r--r--drivers/misc/uacce/uacce.c172
1 files changed, 42 insertions, 130 deletions
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index d39307f060bd..107028e77ca3 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -90,109 +90,39 @@ static long uacce_fops_compat_ioctl(struct file *filep,
}
#endif
-static int uacce_sva_exit(struct device *dev, struct iommu_sva *handle,
- void *data)
+static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
{
- struct uacce_mm *uacce_mm = data;
- struct uacce_queue *q;
-
- /*
- * No new queue can be added concurrently because no caller can have a
- * reference to this mm. But there may be concurrent calls to
- * uacce_mm_put(), so we need the lock.
- */
- mutex_lock(&uacce_mm->lock);
- list_for_each_entry(q, &uacce_mm->queues, list)
- uacce_put_queue(q);
- uacce_mm->mm = NULL;
- mutex_unlock(&uacce_mm->lock);
+ int pasid;
+ struct iommu_sva *handle;
- return 0;
-}
-
-static struct iommu_sva_ops uacce_sva_ops = {
- .mm_exit = uacce_sva_exit,
-};
-
-static struct uacce_mm *uacce_mm_get(struct uacce_device *uacce,
- struct uacce_queue *q,
- struct mm_struct *mm)
-{
- struct uacce_mm *uacce_mm = NULL;
- struct iommu_sva *handle = NULL;
- int ret;
-
- lockdep_assert_held(&uacce->mm_lock);
-
- list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
- if (uacce_mm->mm == mm) {
- mutex_lock(&uacce_mm->lock);
- list_add(&q->list, &uacce_mm->queues);
- mutex_unlock(&uacce_mm->lock);
- return uacce_mm;
- }
- }
-
- uacce_mm = kzalloc(sizeof(*uacce_mm), GFP_KERNEL);
- if (!uacce_mm)
- return NULL;
+ if (!(uacce->flags & UACCE_DEV_SVA))
+ return 0;
- if (uacce->flags & UACCE_DEV_SVA) {
- /*
- * Safe to pass an incomplete uacce_mm, since mm_exit cannot
- * fire while we hold a reference to the mm.
- */
- handle = iommu_sva_bind_device(uacce->parent, mm, uacce_mm);
- if (IS_ERR(handle))
- goto err_free;
+ handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- ret = iommu_sva_set_ops(handle, &uacce_sva_ops);
- if (ret)
- goto err_unbind;
-
- uacce_mm->pasid = iommu_sva_get_pasid(handle);
- if (uacce_mm->pasid == IOMMU_PASID_INVALID)
- goto err_unbind;
+ pasid = iommu_sva_get_pasid(handle);
+ if (pasid == IOMMU_PASID_INVALID) {
+ iommu_sva_unbind_device(handle);
+ return -ENODEV;
}
- uacce_mm->mm = mm;
- uacce_mm->handle = handle;
- INIT_LIST_HEAD(&uacce_mm->queues);
- mutex_init(&uacce_mm->lock);
- list_add(&q->list, &uacce_mm->queues);
- list_add(&uacce_mm->list, &uacce->mm_list);
-
- return uacce_mm;
-
-err_unbind:
- if (handle)
- iommu_sva_unbind_device(handle);
-err_free:
- kfree(uacce_mm);
- return NULL;
+ q->handle = handle;
+ q->pasid = pasid;
+ return 0;
}
-static void uacce_mm_put(struct uacce_queue *q)
+static void uacce_unbind_queue(struct uacce_queue *q)
{
- struct uacce_mm *uacce_mm = q->uacce_mm;
-
- lockdep_assert_held(&q->uacce->mm_lock);
-
- mutex_lock(&uacce_mm->lock);
- list_del(&q->list);
- mutex_unlock(&uacce_mm->lock);
-
- if (list_empty(&uacce_mm->queues)) {
- if (uacce_mm->handle)
- iommu_sva_unbind_device(uacce_mm->handle);
- list_del(&uacce_mm->list);
- kfree(uacce_mm);
- }
+ if (!q->handle)
+ return;
+ iommu_sva_unbind_device(q->handle);
+ q->handle = NULL;
}
static int uacce_fops_open(struct inode *inode, struct file *filep)
{
- struct uacce_mm *uacce_mm = NULL;
struct uacce_device *uacce;
struct uacce_queue *q;
int ret = 0;
@@ -205,21 +135,16 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
if (!q)
return -ENOMEM;
- mutex_lock(&uacce->mm_lock);
- uacce_mm = uacce_mm_get(uacce, q, current->mm);
- mutex_unlock(&uacce->mm_lock);
- if (!uacce_mm) {
- ret = -ENOMEM;
+ ret = uacce_bind_queue(uacce, q);
+ if (ret)
goto out_with_mem;
- }
q->uacce = uacce;
- q->uacce_mm = uacce_mm;
if (uacce->ops->get_queue) {
- ret = uacce->ops->get_queue(uacce, uacce_mm->pasid, q);
+ ret = uacce->ops->get_queue(uacce, q->pasid, q);
if (ret < 0)
- goto out_with_mm;
+ goto out_with_bond;
}
init_waitqueue_head(&q->wait);
@@ -227,12 +152,14 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
uacce->inode = inode;
q->state = UACCE_Q_INIT;
+ mutex_lock(&uacce->queues_lock);
+ list_add(&q->list, &uacce->queues);
+ mutex_unlock(&uacce->queues_lock);
+
return 0;
-out_with_mm:
- mutex_lock(&uacce->mm_lock);
- uacce_mm_put(q);
- mutex_unlock(&uacce->mm_lock);
+out_with_bond:
+ uacce_unbind_queue(q);
out_with_mem:
kfree(q);
return ret;
@@ -241,14 +168,12 @@ out_with_mem:
static int uacce_fops_release(struct inode *inode, struct file *filep)
{
struct uacce_queue *q = filep->private_data;
- struct uacce_device *uacce = q->uacce;
+ mutex_lock(&q->uacce->queues_lock);
+ list_del(&q->list);
+ mutex_unlock(&q->uacce->queues_lock);
uacce_put_queue(q);
-
- mutex_lock(&uacce->mm_lock);
- uacce_mm_put(q);
- mutex_unlock(&uacce->mm_lock);
-
+ uacce_unbind_queue(q);
kfree(q);
return 0;
@@ -513,8 +438,8 @@ struct uacce_device *uacce_alloc(struct device *parent,
if (ret < 0)
goto err_with_uacce;
- INIT_LIST_HEAD(&uacce->mm_list);
- mutex_init(&uacce->mm_lock);
+ INIT_LIST_HEAD(&uacce->queues);
+ mutex_init(&uacce->queues_lock);
device_initialize(&uacce->dev);
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
uacce->dev.class = uacce_class;
@@ -561,8 +486,7 @@ EXPORT_SYMBOL_GPL(uacce_register);
*/
void uacce_remove(struct uacce_device *uacce)
{
- struct uacce_mm *uacce_mm;
- struct uacce_queue *q;
+ struct uacce_queue *q, *next_q;
if (!uacce)
return;
@@ -574,24 +498,12 @@ void uacce_remove(struct uacce_device *uacce)
unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
/* ensure no open queue remains */
- mutex_lock(&uacce->mm_lock);
- list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
- /*
- * We don't take the uacce_mm->lock here. Since we hold the
- * device's mm_lock, no queue can be added to or removed from
- * this uacce_mm. We may run concurrently with mm_exit, but
- * uacce_put_queue() is serialized and iommu_sva_unbind_device()
- * waits for the lock that mm_exit is holding.
- */
- list_for_each_entry(q, &uacce_mm->queues, list)
- uacce_put_queue(q);
-
- if (uacce->flags & UACCE_DEV_SVA) {
- iommu_sva_unbind_device(uacce_mm->handle);
- uacce_mm->handle = NULL;
- }
+ mutex_lock(&uacce->queues_lock);
+ list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
+ uacce_put_queue(q);
+ uacce_unbind_queue(q);
}
- mutex_unlock(&uacce->mm_lock);
+ mutex_unlock(&uacce->queues_lock);
/* disable sva now since no opened queues */
if (uacce->flags & UACCE_DEV_SVA)