diff options
author | Matthew Brost <matthew.brost@intel.com> | 2024-04-25 06:55:10 +0200 |
---|---|---|
committer | Matthew Brost <matthew.brost@intel.com> | 2024-04-26 21:10:05 +0200 |
commit | fda75ef80bddf2f08b0e597d59da69a3d8eb5be2 (patch) | |
tree | 2e95b075be85924dcfd4b0ae4ee02d3f88efea0e /drivers/gpu/drm/xe/xe_vm.c | |
parent | drm/xe: Add vm_bind_ioctl_ops_fini helper (diff) | |
download | linux-fda75ef80bddf2f08b0e597d59da69a3d8eb5be2.tar.xz linux-fda75ef80bddf2f08b0e597d59da69a3d8eb5be2.zip |
drm/xe: Move ufence check to op_lock_and_prep
Rather than checking for an unsignaled ufence ay unbind time, check for
this during the op_lock_and_prep function. This helps with the
transition to job 1 per VM bind IOCTL.
v2:
- Rebase
v3:
- Fix typo in commit message (Oak)
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Oak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-11-matthew.brost@intel.com
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 33 |
1 files changed, 23 insertions, 10 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 2f19372aaad5..40c1258c3282 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1653,16 +1653,6 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, trace_xe_vma_unbind(vma); - if (vma->ufence) { - struct xe_user_fence * const f = vma->ufence; - - if (!xe_sync_ufence_get_status(f)) - return ERR_PTR(-EBUSY); - - vma->ufence = NULL; - xe_sync_ufence_put(f); - } - if (number_tiles > 1) { fences = kmalloc_array(number_tiles, sizeof(*fences), GFP_KERNEL); @@ -2717,6 +2707,21 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma, return err; } +static int check_ufence(struct xe_vma *vma) +{ + if (vma->ufence) { + struct xe_user_fence * const f = vma->ufence; + + if (!xe_sync_ufence_get_status(f)) + return -EBUSY; + + vma->ufence = NULL; + xe_sync_ufence_put(f); + } + + return 0; +} + static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, struct xe_vma_op *op) { @@ -2729,6 +2734,10 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, op->map.immediate); break; case DRM_GPUVA_OP_REMAP: + err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va)); + if (err) + break; + err = vma_lock_and_validate(exec, gpuva_to_vma(op->base.remap.unmap->va), false); @@ -2738,6 +2747,10 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm, err = vma_lock_and_validate(exec, op->remap.next, true); break; case DRM_GPUVA_OP_UNMAP: + err = check_ufence(gpuva_to_vma(op->base.unmap.va)); + if (err) + break; + err = vma_lock_and_validate(exec, gpuva_to_vma(op->base.unmap.va), false); |