diff options
author | Philip Yang <Philip.Yang@amd.com> | 2020-09-22 19:09:33 +0200 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2021-03-24 03:57:55 +0100 |
commit | b672cb1eee59efe6ca5bb2a2ce90060a22860558 (patch) | |
tree | cb758511767be914445c7cb3964607114056f310 /drivers/gpu/drm/amd/amdgpu/vega20_ih.c | |
parent | drm/amdgpu: Use free system memory size for kfd memory accounting (diff) | |
download | linux-b672cb1eee59efe6ca5bb2a2ce90060a22860558.tar.xz linux-b672cb1eee59efe6ca5bb2a2ce90060a22860558.zip |
drm/amdgpu: enable retry fault wptr overflow
If xnack is on, VM retry fault interrupt send to IH ring1, and ring1
will be full quickly. IH cannot receive other interrupts, this causes
deadlock if migrating buffer using sdma and waiting for sdma done while
handling retry fault.
Remove VMC from IH storm client, enable ring1 write pointer overflow,
then IH will drop retry fault interrupts and be able to receive other
interrupts while driver is handling retry fault.
IH ring1 write pointer doesn't writeback to memory by IH, and ring1
write pointer recorded by self-irq is not updated, so always read
the latest ring1 write pointer from register.
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vega20_ih.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vega20_ih.c | 32 |
1 files changed, 11 insertions, 21 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index 0049f048a305..101416c646c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -223,10 +223,8 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev, tmp = vega20_ih_rb_cntl(ih, tmp); if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); - if (ih == &adev->irq.ih1) { - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + if (ih == &adev->irq.ih1) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); - } if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n"); @@ -300,7 +298,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) u32 ih_chicken; int ret; int i; - u32 tmp; /* disable irqs */ ret = vega20_ih_toggle_interrupts(adev, false); @@ -341,15 +338,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) } } - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); - tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, - CLIENT18_IS_STORM_CLIENT, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp); - - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL); - tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp); - pci_set_master(adev->pdev); /* enable interrupts */ @@ -395,11 +383,17 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev, u32 wptr, tmp; struct amdgpu_ih_regs *ih_regs; - wptr = le32_to_cpu(*ih->wptr_cpu); - ih_regs = &ih->ih_regs; + if (ih == &adev->irq.ih) { + /* Only ring0 supports writeback. On other rings fall back + * to register-based code with overflow checking below. + */ + wptr = le32_to_cpu(*ih->wptr_cpu); - if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) - goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + } + + ih_regs = &ih->ih_regs; /* Double check that the overflow wasn't already cleared. */ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); @@ -491,15 +485,11 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t wptr = cpu_to_le32(entry->src_data[0]); - switch (entry->ring_id) { case 1: - *adev->irq.ih1.wptr_cpu = wptr; schedule_work(&adev->irq.ih1_work); break; case 2: - *adev->irq.ih2.wptr_cpu = wptr; schedule_work(&adev->irq.ih2_work); break; default: break; |