diff options
author | Christian König <christian.koenig@amd.com> | 2018-09-11 11:50:57 +0200 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-09-19 19:38:41 +0200 |
commit | 403009bfba45163887398652762ed1fc6645181c (patch) | |
tree | 55d50f036ed77a19acffb3f736190878c6130280 /drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |
parent | drm/amdgpu: always recover VRAM during GPU recovery (diff) | |
download | linux-403009bfba45163887398652762ed1fc6645181c.tar.xz linux-403009bfba45163887398652762ed1fc6645181c.zip |
drm/amdgpu: fix shadow BO restoring
Don't grab the reservation lock any more and simplify the handling quite
a bit.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 109 |
1 files changed, 28 insertions, 81 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0267fae316d7..bd79d0a31942 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2951,54 +2951,6 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) } /** - * amdgpu_device_recover_vram_from_shadow - restore shadowed VRAM buffers - * - * @adev: amdgpu_device pointer - * @ring: amdgpu_ring for the engine handling the buffer operations - * @bo: amdgpu_bo buffer whose shadow is being restored - * @fence: dma_fence associated with the operation - * - * Restores the VRAM buffer contents from the shadow in GTT. Used to - * restore things like GPUVM page tables after a GPU reset where - * the contents of VRAM might be lost. - * Returns 0 on success, negative error code on failure. - */ -static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct dma_fence **fence) -{ - uint32_t domain; - int r; - - if (!bo->shadow) - return 0; - - r = amdgpu_bo_reserve(bo, true); - if (r) - return r; - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); - /* if bo has been evicted, then no need to recover */ - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { - r = amdgpu_bo_validate(bo->shadow); - if (r) { - DRM_ERROR("bo validate failed!\n"); - goto err; - } - - r = amdgpu_bo_restore_from_shadow(adev, ring, bo, - NULL, fence, true); - if (r) { - DRM_ERROR("recover page table failed!\n"); - goto err; - } - } -err: - amdgpu_bo_unreserve(bo); - return r; -} - -/** * amdgpu_device_recover_vram - Recover some VRAM contents * * @adev: amdgpu_device pointer @@ -3006,16 +2958,15 @@ err: * Restores the contents of VRAM buffers from the shadows in GTT. Used to * restore things like GPUVM page tables after a GPU reset where * the contents of VRAM might be lost. - * Returns 0 on success, 1 on failure. + * + * Returns: + * 0 on success, negative error code on failure. */ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - struct amdgpu_bo *bo, *tmp; struct dma_fence *fence = NULL, *next = NULL; - long r = 1; - int i = 0; - long tmo; + struct amdgpu_bo *shadow; + long r = 1, tmo; if (amdgpu_sriov_runtime(adev)) tmo = msecs_to_jiffies(8000); @@ -3024,44 +2975,40 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) DRM_INFO("recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); - list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { - next = NULL; - amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next); + list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { + + /* No need to recover an evicted BO */ + if (shadow->tbo.mem.mem_type != TTM_PL_TT || + shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) + continue; + + r = amdgpu_bo_restore_shadow(shadow, &next); + if (r) + break; + if (fence) { r = dma_fence_wait_timeout(fence, false, tmo); - if (r == 0) - pr_err("wait fence %p[%d] timeout\n", fence, i); - else if (r < 0) - pr_err("wait fence %p[%d] interrupted\n", fence, i); - if (r < 1) { - dma_fence_put(fence); - fence = next; + dma_fence_put(fence); + fence = next; + if (r <= 0) break; - } - i++; + } else { + fence = next; } - - dma_fence_put(fence); - fence = next; } mutex_unlock(&adev->shadow_list_lock); - if (fence) { - r = dma_fence_wait_timeout(fence, false, tmo); - if (r == 0) - pr_err("wait fence %p[%d] timeout\n", fence, i); - else if (r < 0) - pr_err("wait fence %p[%d] interrupted\n", fence, i); - - } + if (fence) + tmo = dma_fence_wait_timeout(fence, false, tmo); dma_fence_put(fence); - if (r > 0) - DRM_INFO("recover vram bo from shadow done\n"); - else + if (r <= 0 || tmo <= 0) { DRM_ERROR("recover vram bo from shadow failed\n"); + return -EIO; + } - return (r > 0) ? 0 : 1; + DRM_INFO("recover vram bo from shadow done\n"); + return 0; } /** |