summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_gem.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2017-10-31 18:56:20 +0100
committerBen Skeggs <bskeggs@redhat.com>2017-11-02 04:32:33 +0100
commit00d041d087c1715039bf7a610140ffe1af347b55 (patch)
tree9c389b7198096b98949153192c9149854a519998 /drivers/gpu/drm/nouveau/nouveau_gem.c
parentdrm/nouveau: implement per-client delayed workqueue with fence support (diff)
downloadlinux-00d041d087c1715039bf7a610140ffe1af347b55.tar.xz
linux-00d041d087c1715039bf7a610140ffe1af347b55.zip
drm/nouveau: queue delayed unmapping of VMAs on client workqueue
VMAs are about to not take references on the VMM they belong to, which means more care is required when handling delayed unmapping. Queuing it on the client workqueue ensures all pending VMA unmaps will have completed before the VMM is destroyed. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c36
1 files changed, 30 insertions, 6 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index a600c4386c72..2cf94e059170 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -90,19 +90,33 @@ out:
return ret;
}
+struct nouveau_gem_object_unmap {
+ struct nouveau_cli_work work;
+ struct nouveau_vma *vma;
+};
+
static void
-nouveau_gem_object_delete(void *data)
+nouveau_gem_object_delete(struct nouveau_vma *vma)
{
- struct nouveau_vma *vma = data;
nouveau_vma_del(&vma);
}
static void
+nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
+{
+ struct nouveau_gem_object_unmap *work =
+ container_of(w, typeof(*work), work);
+ nouveau_gem_object_delete(work->vma);
+ kfree(work);
+}
+
+static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
struct reservation_object *resv = nvbo->bo.resv;
struct reservation_object_list *fobj;
+ struct nouveau_gem_object_unmap *work;
struct dma_fence *fence = NULL;
fobj = reservation_object_get_list(resv);
@@ -117,10 +131,20 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
else
fence = reservation_object_get_excl(nvbo->bo.resv);
- if (fence && mapped)
- nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
- else
- nouveau_vma_del(&vma);
+ if (!fence || !mapped) {
+ nouveau_gem_object_delete(vma);
+ return;
+ }
+
+ if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
+ WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
+ nouveau_gem_object_delete(vma);
+ return;
+ }
+
+ work->work.func = nouveau_gem_object_delete_work;
+ work->vma = vma;
+ nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
}
void