diff options
author | Shiwu Zhang <shiwu.zhang@amd.com> | 2022-11-18 07:21:15 +0100 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2023-06-09 15:50:01 +0200 |
commit | fee500fa7cb7e11a4d2d66e75e65e67c156e27c6 (patch) | |
tree | b827d8b23ed8ea988092d373eacea4d8854c2597 | |
parent | drm/amdgpu: Skip TMR allocation if not required (diff) | |
download | linux-fee500fa7cb7e11a4d2d66e75e65e67c156e27c6.tar.xz linux-fee500fa7cb7e11a4d2d66e75e65e67c156e27c6.zip |
drm/amdgpu: Fix the KCQ hang when binding back
Just like the KIQ, KCQ need to clear the doorbell related regs as well
to avoid hangs when to load driver again after unloading.
Signed-off-by: Shiwu Zhang <shiwu.zhang@amd.com>
Reviewed-by: Le Ma <le.ma@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 28 |
1 files changed, 25 insertions, 3 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index aaa67592bbb5..ef552c9b19b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1670,7 +1670,7 @@ static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring, return 0; } -static int gfx_v9_4_3_xcc_kiq_fini_register(struct amdgpu_ring *ring, +static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring, int xcc_id) { struct amdgpu_device *adev = ring->adev; @@ -1688,7 +1688,7 @@ static int gfx_v9_4_3_xcc_kiq_fini_register(struct amdgpu_ring *ring, } if (j == AMDGPU_MAX_USEC_TIMEOUT) { - DRM_DEBUG("KIQ dequeue request failed.\n"); + DRM_DEBUG("%s dequeue request failed.\n", ring->name); /* Manual disable if dequeue request times out */ WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0); @@ -1793,6 +1793,27 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) return 0; } +static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id) +{ + struct amdgpu_ring *ring; + int j; + + for (j = 0; j < adev->gfx.num_compute_rings; j++) { + ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings]; + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, ring->me, + ring->pipe, + ring->queue, 0, GET_INST(GC, xcc_id)); + gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id); + soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); + mutex_unlock(&adev->srbm_mutex); + } + } + + return 0; +} + static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring; @@ -1923,12 +1944,13 @@ static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) adev->gfx.kiq[xcc_id].ring.pipe, adev->gfx.kiq[xcc_id].ring.queue, 0, GET_INST(GC, xcc_id)); - gfx_v9_4_3_xcc_kiq_fini_register(&adev->gfx.kiq[xcc_id].ring, + gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring, xcc_id); soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id)); mutex_unlock(&adev->srbm_mutex); } + gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id); gfx_v9_4_3_xcc_cp_enable(adev, false, xcc_id); /* Skip suspend with A+A reset */ |