diff options
Diffstat (limited to 'drivers/gpu/drm')
110 files changed, 4477 insertions, 936 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7ae3b22c5628..c2bbcdd9c875 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -120,6 +120,7 @@ amdgpu-y += \ amdgpu_rlc.o \ gfx_v8_0.o \ gfx_v9_0.o \ + gfx_v9_4.o \ gfx_v10_0.o # add async DMA block diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b1bb10625cd9..da3bcff61b97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1009,10 +1009,14 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define AMDGPU_REGS_IDX (1<<0) #define AMDGPU_REGS_NO_KIQ (1<<1) +#define AMDGPU_REGS_KIQ (1<<2) #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) +#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ) +#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ) + #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 82155ac3288a..12247a32f9ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -527,7 +527,7 @@ static int acp_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = state == AMD_PG_STATE_GATE ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index b2487f4f271b..fa8ac9d19a7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2129,6 +2129,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem return -ENOMEM; mutex_init(&(*mem)->lock); + INIT_LIST_HEAD(&(*mem)->bo_va_list); (*mem)->bo = amdgpu_bo_ref(gws_bo); (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; (*mem)->process_info = process_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 64e2babbc36e..94a6c42f29ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -42,19 +42,12 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { [AMDGPU_HW_IP_VCN_JPEG] = 1, }; -static int amdgpu_ctx_total_num_entities(void) -{ - unsigned i, num_entities = 0; - - for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) - num_entities += amdgpu_ctx_num_entities[i]; - - return num_entities; -} - static int amdgpu_ctx_priority_permit(struct drm_file *filp, enum drm_sched_priority priority) { + if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) + return -EINVAL; + /* NORMAL and below are accessible by everyone */ if (priority <= DRM_SCHED_PRIORITY_NORMAL) return 0; @@ -68,64 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, return -EACCES; } -static int amdgpu_ctx_init(struct amdgpu_device *adev, - enum drm_sched_priority priority, - struct drm_file *filp, - struct amdgpu_ctx *ctx) +static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); - unsigned i, j; + struct amdgpu_device *adev = ctx->adev; + struct amdgpu_ctx_entity *entity; + struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; + unsigned num_scheds = 0; + enum drm_sched_priority priority; int r; - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) - return -EINVAL; - - r = amdgpu_ctx_priority_permit(filp, priority); - if (r) - return r; + entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]), + GFP_KERNEL); + if (!entity) + return -ENOMEM; - memset(ctx, 0, sizeof(*ctx)); - ctx->adev = adev; - - ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities, - sizeof(struct dma_fence*), GFP_KERNEL); - if (!ctx->fences) - return -ENOMEM; - - ctx->entities[0] = kcalloc(num_entities, - sizeof(struct amdgpu_ctx_entity), - GFP_KERNEL); - if (!ctx->entities[0]) { - r = -ENOMEM; - goto error_free_fences; - } - - for (i = 0; i < num_entities; ++i) { - struct amdgpu_ctx_entity *entity = &ctx->entities[0][i]; - - entity->sequence = 1; - entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; - } - for (i = 1; i < AMDGPU_HW_IP_NUM; ++i) - ctx->entities[i] = ctx->entities[i - 1] + - amdgpu_ctx_num_entities[i - 1]; - - kref_init(&ctx->refcount); - spin_lock_init(&ctx->ring_lock); - mutex_init(&ctx->lock); - - ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); - ctx->reset_counter_query = ctx->reset_counter; - ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); - ctx->init_priority = priority; - ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; - - for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { - struct drm_gpu_scheduler **scheds; - struct drm_gpu_scheduler *sched; - unsigned num_scheds = 0; - - switch (i) { + entity->sequence = 1; + priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; + switch (hw_ip) { case AMDGPU_HW_IP_GFX: sched = &adev->gfx.gfx_ring[0].sched; scheds = &sched; @@ -166,53 +119,90 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, scheds = adev->jpeg.jpeg_sched; num_scheds = adev->jpeg.num_jpeg_sched; break; - } - - for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) - r = drm_sched_entity_init(&ctx->entities[i][j].entity, - priority, scheds, - num_scheds, &ctx->guilty); - if (r) - goto error_cleanup_entities; } + r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, + &ctx->guilty); + if (r) + goto error_free_entity; + + ctx->entities[hw_ip][ring] = entity; return 0; -error_cleanup_entities: - for (i = 0; i < num_entities; ++i) - drm_sched_entity_destroy(&ctx->entities[0][i].entity); - kfree(ctx->entities[0]); +error_free_entity: + kfree(entity); -error_free_fences: - kfree(ctx->fences); - ctx->fences = NULL; return r; } +static int amdgpu_ctx_init(struct amdgpu_device *adev, + enum drm_sched_priority priority, + struct drm_file *filp, + struct amdgpu_ctx *ctx) +{ + int r; + + r = amdgpu_ctx_priority_permit(filp, priority); + if (r) + return r; + + memset(ctx, 0, sizeof(*ctx)); + + ctx->adev = adev; + + kref_init(&ctx->refcount); + spin_lock_init(&ctx->ring_lock); + mutex_init(&ctx->lock); + + ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); + ctx->reset_counter_query = ctx->reset_counter; + ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); + ctx->init_priority = priority; + ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; + + return 0; + +} + +static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) +{ + + int i; + + if (!entity) + return; + + for (i = 0; i < amdgpu_sched_jobs; ++i) + dma_fence_put(entity->fences[i]); + + kfree(entity); +} + static void amdgpu_ctx_fini(struct kref *ref) { struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_device *adev = ctx->adev; unsigned i, j; if (!adev) return; - for (i = 0; i < num_entities; ++i) - for (j = 0; j < amdgpu_sched_jobs; ++j) - dma_fence_put(ctx->entities[0][i].fences[j]); - kfree(ctx->fences); - kfree(ctx->entities[0]); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) { + amdgpu_ctx_fini_entity(ctx->entities[i][j]); + ctx->entities[i][j] = NULL; + } + } mutex_destroy(&ctx->lock); - kfree(ctx); } int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, u32 ring, struct drm_sched_entity **entity) { + int r; + if (hw_ip >= AMDGPU_HW_IP_NUM) { DRM_ERROR("unknown HW IP type: %d\n", hw_ip); return -EINVAL; @@ -229,7 +219,13 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, return -EINVAL; } - *entity = &ctx->entities[hw_ip][ring].entity; + if (ctx->entities[hw_ip][ring] == NULL) { + r = amdgpu_ctx_init_entity(ctx, hw_ip, ring); + if (r) + return r; + } + + *entity = &ctx->entities[hw_ip][ring]->entity; return 0; } @@ -269,14 +265,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, static void amdgpu_ctx_do_release(struct kref *ref) { struct amdgpu_ctx *ctx; - unsigned num_entities; - u32 i; + u32 i, j; ctx = container_of(ref, struct amdgpu_ctx, refcount); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + if (!ctx->entities[i][j]) + continue; - num_entities = amdgpu_ctx_total_num_entities(); - for (i = 0; i < num_entities; i++) - drm_sched_entity_destroy(&ctx->entities[0][i].entity); + drm_sched_entity_destroy(&ctx->entities[i][j]->entity); + } + } amdgpu_ctx_fini(ref); } @@ -506,19 +505,23 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, enum drm_sched_priority priority) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); enum drm_sched_priority ctx_prio; - unsigned i; + unsigned i, j; ctx->override_priority = priority; ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; - for (i = 0; i < num_entities; i++) { - struct drm_sched_entity *entity = &ctx->entities[0][i].entity; + if (!ctx->entities[i][j]) + continue; - drm_sched_entity_set_priority(entity, ctx_prio); + entity = &ctx->entities[i][j]->entity; + drm_sched_entity_set_priority(entity, ctx_prio); + } } } @@ -554,20 +557,24 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; - uint32_t id, i; + uint32_t id, i, j; idp = &mgr->ctx_handles; mutex_lock(&mgr->lock); idr_for_each_entry(idp, ctx, id) { - for (i = 0; i < num_entities; i++) { - struct drm_sched_entity *entity; + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; + + if (!ctx->entities[i][j]) + continue; - entity = &ctx->entities[0][i].entity; - timeout = drm_sched_entity_flush(entity, timeout); + entity = &ctx->entities[i][j]->entity; + timeout = drm_sched_entity_flush(entity, timeout); + } } } mutex_unlock(&mgr->lock); @@ -576,10 +583,9 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) { - unsigned num_entities = amdgpu_ctx_total_num_entities(); struct amdgpu_ctx *ctx; struct idr *idp; - uint32_t id, i; + uint32_t id, i, j; idp = &mgr->ctx_handles; @@ -589,8 +595,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) continue; } - for (i = 0; i < num_entities; i++) - drm_sched_entity_fini(&ctx->entities[0][i].entity); + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { + struct drm_sched_entity *entity; + + if (!ctx->entities[i][j]) + continue; + + entity = &ctx->entities[i][j]->entity; + drm_sched_entity_fini(entity); + } + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index 4ad90a44dc3c..de490f183af2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -29,10 +29,12 @@ struct drm_device; struct drm_file; struct amdgpu_fpriv; +#define AMDGPU_MAX_ENTITY_NUM 4 + struct amdgpu_ctx_entity { uint64_t sequence; - struct dma_fence **fences; struct drm_sched_entity entity; + struct dma_fence *fences[]; }; struct amdgpu_ctx { @@ -42,8 +44,7 @@ struct amdgpu_ctx { unsigned reset_counter_query; uint32_t vram_lost_counter; spinlock_t ring_lock; - struct dma_fence **fences; - struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM]; + struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM]; bool preamble_presented; enum drm_sched_priority init_priority; enum drm_sched_priority override_priority; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 53d882000101..39cd545976b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -216,8 +216,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, { uint32_t ret; - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_virt_kiq_rreg(adev, reg); + if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) + return amdgpu_kiq_rreg(adev, reg); if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); @@ -294,8 +294,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, adev->last_mm_index = v; } - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) - return amdgpu_virt_kiq_wreg(adev, reg, v); + if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) + return amdgpu_kiq_wreg(adev, reg, v); if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); @@ -985,7 +985,7 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) { struct sysinfo si; - bool is_os_64 = (sizeof(void *) == 8) ? true : false; + bool is_os_64 = (sizeof(void *) == 8); uint64_t total_memory; uint64_t dram_size_seven_GB = 0x1B8000000; uint64_t dram_size_three_GB = 0xB8000000; @@ -3760,6 +3760,10 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_RAVEN: case CHIP_ARCTURUS: + case CHIP_RENOIR: + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: break; default: goto disabled; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h index 61a26c15c8dd..057f6ea645d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h @@ -52,6 +52,9 @@ struct amdgpu_df_funcs { uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, uint32_t ficadl_val, uint32_t ficadh_val); + uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev, + uint32_t df_inst); + uint32_t (*get_df_inst_id)(struct amdgpu_device *adev); }; struct amdgpu_df { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index b88b8b82bb64..0f960b498792 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -296,7 +296,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, spin_lock_init(&kiq->ring_lock); - r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs); + r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs); if (r) return r; @@ -321,7 +321,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) { - amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); + amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs); amdgpu_ring_fini(ring); } @@ -658,3 +658,95 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } + +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_rreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_rreg(ring, reg); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_read; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_read; + + return adev->wb.wb[kiq->reg_val_offs]; + +failed_kiq_read: + pr_err("failed to read reg:%x\n", reg); + return ~0; +} + +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_wreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_emit_wreg(ring, reg, v); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_write; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_write; + + return; + +failed_kiq_write: + pr_err("failed to write reg:%x\n", reg); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index af4bd279f42f..ca17ffb01301 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -94,6 +94,7 @@ struct amdgpu_kiq { struct amdgpu_ring ring; struct amdgpu_irq_src irq; const struct kiq_pm4_funcs *pmf; + uint32_t reg_val_offs; }; /* @@ -375,4 +376,6 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); +void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 86267baca07c..d3c27a3c43f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -60,11 +60,6 @@ */ #define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL -/* - * Default stolen memory size, 1024 * 768 * 4 - */ -#define AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE 0x300000ULL - struct firmware; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 3265487b859f..611021514c52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -172,6 +172,8 @@ struct psp_dtm_context { #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 #define GDDR6_MEM_TRAINING_OFFSET 0x8000 +/*Define the VRAM size that will be encroached by BIST training.*/ +#define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000 enum psp_memory_training_init_flag { PSP_MEM_TRAIN_NOT_SUPPORT = 0x0, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 766be7f18282..cef94e2169fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -742,6 +742,20 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, return 0; } +uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr) +{ + uint32_t df_inst_id; + + if ((!adev->df.funcs) || + (!adev->df.funcs->get_df_inst_id) || + (!adev->df.funcs->get_dram_base_addr)) + return addr; + + df_inst_id = adev->df.funcs->get_df_inst_id(adev); + + return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id); +} + /* wrapper of psp_ras_trigger_error */ int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info) @@ -759,6 +773,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; + /* Calculate XGMI relative offset */ + if (adev->gmc.xgmi.num_physical_nodes > 1) { + block_info.address = get_xgmi_relative_phy_addr(adev, + block_info.address); + } + switch (info->head.block) { case AMDGPU_RAS_BLOCK__GFX: if (adev->gfx.funcs->ras_error_inject) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index ceb0dbf685f1..59ddba137946 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -652,7 +652,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, if ((addr + (uint64_t)size) > (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { - DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", + DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n", addr, lo, hi); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index c4984c5fb2db..d6deb0eb1e15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -65,33 +65,33 @@ /* 1 second timeout */ #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) -#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \ - ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ +#define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) \ + ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ UVD_DPG_LMA_CTL__MASK_EN_MASK | \ - ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ + ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ - RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); \ + RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \ }) -#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \ +#define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) \ do { \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ UVD_DPG_LMA_CTL__READ_WRITE_MASK | \ - ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ + ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ } while (0) -#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) \ +#define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) \ ({ \ uint32_t internal_reg_offset, addr; \ bool video_range, aon_range; \ \ - addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ + addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ addr <<= 2; \ video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \ ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \ @@ -111,7 +111,7 @@ #define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) \ ({ \ - WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL, \ + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 103033f96f13..adc813cde8e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -45,98 +45,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) adev->pg_flags = 0; } -uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) -{ - signed long r, cnt = 0; - unsigned long flags; - uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *ring = &kiq->ring; - - BUG_ON(!ring->funcs->emit_rreg); - - spin_lock_irqsave(&kiq->ring_lock, flags); - amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_rreg(ring, reg); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - - /* don't wait anymore for gpu reset case because this way may - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will - * never return if we keep waiting in virt_kiq_rreg, which cause - * gpu_recover() hang there. - * - * also don't wait anymore for IRQ context - * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) - goto failed_kiq_read; - - might_sleep(); - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - } - - if (cnt > MAX_KIQ_REG_TRY) - goto failed_kiq_read; - - return adev->wb.wb[adev->virt.reg_val_offs]; - -failed_kiq_read: - pr_err("failed to read reg:%x\n", reg); - return ~0; -} - -void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) -{ - signed long r, cnt = 0; - unsigned long flags; - uint32_t seq; - struct amdgpu_kiq *kiq = &adev->gfx.kiq; - struct amdgpu_ring *ring = &kiq->ring; - - BUG_ON(!ring->funcs->emit_wreg); - - spin_lock_irqsave(&kiq->ring_lock, flags); - amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_wreg(ring, reg, v); - amdgpu_fence_emit_polling(ring, &seq); - amdgpu_ring_commit(ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - - /* don't wait anymore for gpu reset case because this way may - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will - * never return if we keep waiting in virt_kiq_rreg, which cause - * gpu_recover() hang there. - * - * also don't wait anymore for IRQ context - * */ - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) - goto failed_kiq_write; - - might_sleep(); - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { - - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); - } - - if (cnt > MAX_KIQ_REG_TRY) - goto failed_kiq_write; - - return; - -failed_kiq_write: - pr_err("failed to write reg:%x\n", reg); -} - void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 4d1ac7612967..daaf909d009a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -287,8 +287,6 @@ static inline bool is_virtual_machine(void) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); -uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); -void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t rreg1, uint32_t ref, uint32_t mask); diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c index d9cc746af5e6..847ca9b3ce4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c @@ -74,9 +74,9 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_VEGA20: case CHIP_RAVEN: athub_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); athub_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c index ceb9aa4df0e7..921a69abda55 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c @@ -77,9 +77,9 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev, case CHIP_NAVI14: case CHIP_NAVI12: athub_v2_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); athub_v2_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index f51326598a8c..5a1bd8ed1a6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -27,6 +27,9 @@ #include "df/df_3_6_offset.h" #include "df/df_3_6_sh_mask.h" +#define DF_3_6_SMN_REG_INST_DIST 0x8 +#define DF_3_6_INST_CNT 8 + static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 32, 0, 0, 0, 2, 4, 8}; @@ -683,6 +686,58 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, } } +static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev, + uint32_t df_inst) +{ + uint32_t base_addr_reg_val = 0; + uint64_t base_addr = 0; + + base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 + + df_inst * DF_3_6_SMN_REG_INST_DIST); + + if (REG_GET_FIELD(base_addr_reg_val, + DF_CS_UMC_AON0_DramBaseAddress0, + AddrRngVal) == 0) { + DRM_WARN("address range not valid"); + return 0; + } + + base_addr = REG_GET_FIELD(base_addr_reg_val, + DF_CS_UMC_AON0_DramBaseAddress0, + DramBaseAddr); + + return base_addr << 28; +} + +static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev) +{ + uint32_t xgmi_node_id = 0; + uint32_t df_inst_id = 0; + + /* Walk through DF dst nodes to find current XGMI node */ + for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) { + + xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 + + df_inst_id * DF_3_6_SMN_REG_INST_DIST); + xgmi_node_id = REG_GET_FIELD(xgmi_node_id, + DF_CS_UMC_AON0_DramLimitAddress0, + DstFabricID); + + /* TODO: establish reason dest fabric id is offset by 7 */ + xgmi_node_id = xgmi_node_id >> 7; + + if (adev->gmc.xgmi.physical_node_id == xgmi_node_id) + break; + } + + if (df_inst_id == DF_3_6_INST_CNT) { + DRM_WARN("cant match df dst id with gpu node"); + return 0; + } + + return df_inst_id; +} + const struct amdgpu_df_funcs df_v3_6_funcs = { .sw_init = df_v3_6_sw_init, .sw_fini = df_v3_6_sw_fini, @@ -696,5 +751,7 @@ const struct amdgpu_df_funcs df_v3_6_funcs = { .pmc_stop = df_v3_6_pmc_stop, .pmc_get_count = df_v3_6_pmc_get_count, .get_fica = df_v3_6_get_fica, - .set_fica = df_v3_6_set_fica + .set_fica = df_v3_6_set_fica, + .get_dram_base_addr = df_v3_6_get_dram_base_addr, + .get_df_inst_id = df_v3_6_get_df_inst_id }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 874f641de281..1785fdad6ecb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -368,7 +368,7 @@ static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = { .map_queues_size = 7, .unmap_queues_size = 6, .query_status_size = 7, - .invalidate_tlbs_size = 12, + .invalidate_tlbs_size = 2, }; static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) @@ -4229,7 +4229,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_PG_STATE_GATE) ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: @@ -4255,7 +4255,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle, case CHIP_NAVI14: case CHIP_NAVI12: gfx_v10_0_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -4737,6 +4737,7 @@ static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -4745,9 +4746,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); } static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 46f0533ba43f..fa245973de12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6449,6 +6449,7 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -6457,9 +6458,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); } static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 46ab46757b25..90f64b8bc358 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -48,6 +48,8 @@ #include "amdgpu_ras.h" +#include "gfx_v9_4.h" + #define GFX9_NUM_GFX_RINGS 1 #define GFX9_MEC_HPD_SIZE 4096 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L @@ -736,6 +738,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); +static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if); @@ -859,7 +862,7 @@ static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = { .map_queues_size = 7, .unmap_queues_size = 6, .query_status_size = 7, - .invalidate_tlbs_size = 12, + .invalidate_tlbs_size = 2, }; static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) @@ -1159,18 +1162,54 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) } } +struct amdgpu_gfxoff_quirk { + u16 chip_vendor; + u16 chip_device; + u16 subsys_vendor; + u16 subsys_device; + u8 revision; +}; + +static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { + /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, + { 0, 0, 0, 0, 0 }, +}; + +static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev) +{ + const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list; + + while (p && p->chip_device != 0) { + if (pdev->vendor == p->chip_vendor && + pdev->device == p->chip_device && + pdev->subsystem_vendor == p->subsys_vendor && + pdev->subsystem_device == p->subsys_device && + pdev->revision == p->revision) { + return true; + } + ++p; + } + return false; +} + static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) { + if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: break; case CHIP_RAVEN: - if (!(adev->rev_id >= 0x8 || - adev->pdev->device == 0x15d8) && - (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */ - !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */ + if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) && + ((adev->gfx.rlc_fw_version != 106 && + adev->gfx.rlc_fw_version < 531) || + (adev->gfx.rlc_fw_version == 53815) || + (adev->gfx.rlc_feature_version < 1) || + !adev->gfx.rlc.is_rlc_v2_1)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; if (adev->pm.pp_feature & PP_GFXOFF_MASK) @@ -1949,6 +1988,17 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .query_ras_error_count = &gfx_v9_0_query_ras_error_count }; +static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v9_0_select_se_sh, + .read_wave_data = &gfx_v9_0_read_wave_data, + .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, + .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, + .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, + .ras_error_inject = &gfx_v9_4_ras_error_inject, + .query_ras_error_count = &gfx_v9_4_query_ras_error_count +}; + static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; @@ -2000,6 +2050,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_ARCTURUS: + adev->gfx.funcs = &gfx_v9_4_gfx_funcs; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2390,6 +2441,22 @@ static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev) } } +static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev) +{ + uint32_t tmp; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG); + tmp = REG_SET_FIELD(tmp, SQ_CONFIG, + DISABLE_BARRIER_WAITCNT, 1); + WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp); + break; + default: + break; + }; +} + static void gfx_v9_0_constants_init(struct amdgpu_device *adev) { u32 tmp; @@ -2435,6 +2502,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) gfx_v9_0_init_compute_vmid(adev); gfx_v9_0_init_gds_vmid(adev); + gfx_v9_0_init_sq_config(adev); } static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) @@ -4029,7 +4097,7 @@ static const struct soc15_reg_entry sgpr2_init_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 }, }; -static const struct soc15_reg_entry sec_ded_counter_registers[] = { +static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1}, { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1}, { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1}, @@ -4118,7 +4186,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; struct amdgpu_ib ib; struct dma_fence *f = NULL; - int r, i, j, k; + int r, i; unsigned total_size, vgpr_offset, sgpr_offset; u64 gpu_addr; @@ -4264,18 +4332,17 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) goto fail; } - /* read back registers to clear the counters */ - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { - for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { - for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { - gfx_v9_0_select_se_sh(adev, j, 0x0, k); - RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); - } - } + switch (adev->asic_type) + { + case CHIP_VEGA20: + gfx_v9_0_clear_ras_edc_counter(adev); + break; + case CHIP_ARCTURUS: + gfx_v9_4_clear_ras_edc_counter(adev); + break; + default: + break; } - WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); - mutex_unlock(&adev->grbm_idx_mutex); fail: amdgpu_ib_free(adev, &ib, NULL); @@ -4638,7 +4705,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_PG_STATE_GATE) ? true : false; + bool enable = (state == AMD_PG_STATE_GATE); switch (adev->asic_type) { case CHIP_RAVEN: @@ -4700,7 +4767,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle, case CHIP_ARCTURUS: case CHIP_RENOIR: gfx_v9_0_update_gfx_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -4717,12 +4784,12 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) *flags = 0; /* AMD_CG_SUPPORT_GFX_MGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE)); if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) *flags |= AMD_CG_SUPPORT_GFX_MGCG; /* AMD_CG_SUPPORT_GFX_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)); if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CGCG; @@ -4731,18 +4798,18 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) *flags |= AMD_CG_SUPPORT_GFX_CGLS; /* AMD_CG_SUPPORT_GFX_RLC_LS */ - data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL)); if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; /* AMD_CG_SUPPORT_GFX_CP_LS */ - data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL)); if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; if (adev->asic_type != CHIP_ARCTURUS) { /* AMD_CG_SUPPORT_GFX_3D_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D)); if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; @@ -5213,6 +5280,7 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) { struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); amdgpu_ring_write(ring, 0 | /* src: register*/ @@ -5221,9 +5289,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + - adev->virt.reg_val_offs * 4)); + kiq->reg_val_offs * 4)); } static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, @@ -5545,7 +5613,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, } -static const struct soc15_ras_field_entry gc_ras_fields_vg20[] = { +static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = { { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) @@ -5993,7 +6061,7 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, int ret; struct ta_ras_trigger_error_input block_info = { 0 }; - if (adev->asic_type != CHIP_VEGA20) + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return -EINVAL; if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks)) @@ -6118,7 +6186,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); - for (i = 0; i < 16; i++) { + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); @@ -6137,7 +6205,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, } } - for (i = 0; i < 7; i++) { + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); @@ -6158,7 +6226,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, } } - for (i = 0; i < 4; i++) { + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); @@ -6170,7 +6238,7 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, } } - for (i = 0; i < 32; i++) { + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); @@ -6197,36 +6265,36 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, return 0; } -static int __get_ras_error_count(const struct soc15_reg_entry *reg, +static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg, uint32_t se_id, uint32_t inst_id, uint32_t value, uint32_t *sec_count, uint32_t *ded_count) { uint32_t i; uint32_t sec_cnt, ded_cnt; - for (i = 0; i < ARRAY_SIZE(gc_ras_fields_vg20); i++) { - if(gc_ras_fields_vg20[i].reg_offset != reg->reg_offset || - gc_ras_fields_vg20[i].seg != reg->seg || - gc_ras_fields_vg20[i].inst != reg->inst) + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) { + if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset || + gfx_v9_0_ras_fields[i].seg != reg->seg || + gfx_v9_0_ras_fields[i].inst != reg->inst) continue; sec_cnt = (value & - gc_ras_fields_vg20[i].sec_count_mask) >> - gc_ras_fields_vg20[i].sec_count_shift; + gfx_v9_0_ras_fields[i].sec_count_mask) >> + gfx_v9_0_ras_fields[i].sec_count_shift; if (sec_cnt) { DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", - gc_ras_fields_vg20[i].name, + gfx_v9_0_ras_fields[i].name, se_id, inst_id, sec_cnt); *sec_count += sec_cnt; } ded_cnt = (value & - gc_ras_fields_vg20[i].ded_count_mask) >> - gc_ras_fields_vg20[i].ded_count_shift; + gfx_v9_0_ras_fields[i].ded_count_mask) >> + gfx_v9_0_ras_fields[i].ded_count_shift; if (ded_cnt) { DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", - gc_ras_fields_vg20[i].name, + gfx_v9_0_ras_fields[i].name, se_id, inst_id, ded_cnt); *ded_count += ded_cnt; @@ -6236,6 +6304,58 @@ static int __get_ras_error_count(const struct soc15_reg_entry *reg, return 0; } +static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev) +{ + int i, j, k; + + /* read back registers to clear the counters */ + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { + gfx_v9_0_select_se_sh(adev, j, 0x0, k); + RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); + } + } + } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + mutex_unlock(&adev->grbm_idx_mutex); + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); + } + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); +} + static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { @@ -6244,7 +6364,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, uint32_t i, j, k; uint32_t reg_value; - if (adev->asic_type != CHIP_VEGA20) + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return -EINVAL; err_data->ue_count = 0; @@ -6252,14 +6372,14 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { - for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { - for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { gfx_v9_0_select_se_sh(adev, j, 0, k); reg_value = - RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); + RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); if (reg_value) - __get_ras_error_count(&sec_ded_counter_registers[i], + gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i], j, k, reg_value, &sec_count, &ded_count); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c new file mode 100644 index 000000000000..f099f13d7f1e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -0,0 +1,978 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/kernel.h> + +#include "amdgpu.h" +#include "amdgpu_gfx.h" +#include "soc15.h" +#include "soc15d.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_pm.h" + +#include "gc/gc_9_4_1_offset.h" +#include "gc/gc_9_4_1_sh_mask.h" +#include "soc15_common.h" + +#include "gfx_v9_4.h" +#include "amdgpu_ras.h" + +static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = { + /* CPC */ + { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 }, + /* DC */ + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 }, + /* CPF */ + { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 }, + /* GDS */ + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 }, + /* SPI */ + { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 }, + /* SQ */ + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 }, + /* SQC */ + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 }, + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 }, + /* TA */ + { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 }, + /* TCA */ + { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 }, + /* TCC */ + { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 }, + /* TCI */ + { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 }, + /* TCP */ + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 }, + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 }, + /* TD */ + { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 }, + /* GCEA */ + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 }, + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 }, + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 }, + /* RLC */ + { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 }, + { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 }, +}; + +static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num, + u32 sh_num, u32 instance) +{ + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, + instance); + + if (se_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, + 1); + else + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + + WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data); +} + +static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = { + /* CPC */ + { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) }, + { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) }, + { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) }, + { "CPC_DC_CSINVOC_RAM_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) }, + { "CPC_DC_RESTORE_RAM_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) }, + { "CPC_DC_CSINVOC_RAM1_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) }, + { "CPC_DC_RESTORE_RAM1_ME1", + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) }, + + /* CPF */ + { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) }, + { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) }, + { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) }, + + /* GDS */ + { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC), + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) }, + { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) }, + { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) }, + { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) }, + { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE0_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE1_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE2_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) }, + { "GDS_ME1_PIPE3_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) }, + + /* SPI */ + { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) }, + { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) }, + { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) }, + { "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) }, + { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) }, + + /* SQ */ + { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) }, + { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) }, + { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) }, + { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) }, + { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) }, + { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) }, + { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) }, + + /* SQC */ + { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKA_UTCL1_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKA_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKA_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_HIT_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + DATA_BANKA_MISS_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_UTCL1_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKB_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + INST_BANKB_MISS_FIFO_DED_COUNT) }, + { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_HIT_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKB_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, + DATA_BANKB_MISS_FIFO_DED_COUNT) }, + { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) }, + + /* TA */ + { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) }, + { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) }, + { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) }, + { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) }, + { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) }, + + /* TCA */ + { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) }, + { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) }, + + /* TCC */ + { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) }, + { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) }, + { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) }, + { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) }, + { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) }, + { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) }, + { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) }, + { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) }, + { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) }, + { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) }, + { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) }, + { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) }, + { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) }, + { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) }, + { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) }, + + /* TCI */ + { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) }, + + /* TCP */ + { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) }, + { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) }, + { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) }, + { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) }, + { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 }, + { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) }, + { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) }, + + /* TD */ + { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) }, + { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) }, + { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) }, + + /* EA */ + { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) }, + { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) }, + { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) }, + { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) }, + { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) }, + { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) }, + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) }, + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 }, + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) }, + { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) }, + { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) }, + { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) }, + { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) }, + { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) }, + { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) }, + { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) }, + { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) }, + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 }, + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) }, + + /* RLC */ + { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) }, + { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) }, + { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) }, + { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) }, + { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) }, + { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) }, + { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) }, + { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) }, + { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT), + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) }, +}; + +static const char * const vml2_mems[] = { + "UTC_VML2_BANK_CACHE_0_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_0_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_0_4K_MEM0", + "UTC_VML2_BANK_CACHE_0_4K_MEM1", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_1_4K_MEM0", + "UTC_VML2_BANK_CACHE_1_4K_MEM1", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_2_4K_MEM0", + "UTC_VML2_BANK_CACHE_2_4K_MEM1", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_3_4K_MEM0", + "UTC_VML2_BANK_CACHE_3_4K_MEM1", + "UTC_VML2_IFIFO_GROUP0", + "UTC_VML2_IFIFO_GROUP1", + "UTC_VML2_IFIFO_GROUP2", + "UTC_VML2_IFIFO_GROUP3", + "UTC_VML2_IFIFO_GROUP4", + "UTC_VML2_IFIFO_GROUP5", + "UTC_VML2_IFIFO_GROUP6", + "UTC_VML2_IFIFO_GROUP7", + "UTC_VML2_IFIFO_GROUP8", + "UTC_VML2_IFIFO_GROUP9", + "UTC_VML2_IFIFO_GROUP10", + "UTC_VML2_IFIFO_GROUP11", + "UTC_VML2_IFIFO_GROUP12", + "UTC_VML2_IFIFO_GROUP13", + "UTC_VML2_IFIFO_GROUP14", + "UTC_VML2_IFIFO_GROUP15", + "UTC_VML2_IFIFO_GROUP16", + "UTC_VML2_IFIFO_GROUP17", + "UTC_VML2_IFIFO_GROUP18", + "UTC_VML2_IFIFO_GROUP19", + "UTC_VML2_IFIFO_GROUP20", + "UTC_VML2_IFIFO_GROUP21", + "UTC_VML2_IFIFO_GROUP22", + "UTC_VML2_IFIFO_GROUP23", + "UTC_VML2_IFIFO_GROUP24", +}; + +static const char * const vml2_walker_mems[] = { + "UTC_VML2_CACHE_PDE0_MEM0", + "UTC_VML2_CACHE_PDE0_MEM1", + "UTC_VML2_CACHE_PDE1_MEM0", + "UTC_VML2_CACHE_PDE1_MEM1", + "UTC_VML2_CACHE_PDE2_MEM0", + "UTC_VML2_CACHE_PDE2_MEM1", + "UTC_VML2_RDIF_ARADDRS", + "UTC_VML2_RDIF_LOG_FIFO", + "UTC_VML2_QUEUE_REQ", + "UTC_VML2_QUEUE_RET", +}; + +static const char * const utcl2_router_mems[] = { + "UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0", + "UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1", + "UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2", + "UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3", + "UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4", + "UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5", + "UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6", + "UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7", + "UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8", + "UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9", + "UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10", + "UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11", + "UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12", + "UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13", + "UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14", + "UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15", + "UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16", + "UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17", + "UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18", + "UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19", + "UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20", + "UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21", + "UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22", + "UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23", + "UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24", +}; + +static const char * const atc_l2_cache_2m_mems[] = { + "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM", +}; + +static const char * const atc_l2_cache_4k_mems[] = { + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7", +}; + +static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, + struct ras_err_data *err_data) +{ + uint32_t i, data; + uint32_t sec_count, ded_count; + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0); + + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_walker_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_walker_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) { + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL); + + sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + utcl2_router_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + utcl2_router_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL); + + sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_2m_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_2m_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL); + + sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_4k_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_4k_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + + return 0; +} + +static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg, + uint32_t se_id, uint32_t inst_id, + uint32_t value, uint32_t *sec_count, + uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) { + if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset || + gfx_v9_4_ras_fields[i].seg != reg->seg || + gfx_v9_4_ras_fields[i].inst != reg->inst) + continue; + + sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >> + gfx_v9_4_ras_fields[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", + gfx_v9_4_ras_fields[i].name, se_id, inst_id, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >> + gfx_v9_4_ras_fields[i].ded_count_shift; + if (ded_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", + gfx_v9_4_ras_fields[i].name, se_id, inst_id, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + +int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i, j, k; + uint32_t reg_value; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return -EINVAL; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + mutex_lock(&adev->grbm_idx_mutex); + + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance; + k++) { + gfx_v9_4_select_se_sh(adev, j, 0, k); + reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_edc_counter_regs[i])); + if (reg_value) + gfx_v9_4_ras_error_count( + &gfx_v9_4_edc_counter_regs[i], + j, k, reg_value, &sec_count, + &ded_count); + } + } + } + + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; + + gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + gfx_v9_4_query_utc_edc_status(adev, err_data); + + return 0; +} + +void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev) +{ + int i, j, k; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { + for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { + for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance; + k++) { + gfx_v9_4_select_se_sh(adev, j, 0x0, k); + RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_edc_counter_regs[i])); + } + } + } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); + mutex_unlock(&adev->grbm_idx_mutex); + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0); + + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); + + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) { + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i); + RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL); + } + + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i); + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL); + } + + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); +} + +int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if) +{ + struct ras_inject_if *info = (struct ras_inject_if *)inject_if; + int ret; + struct ta_ras_trigger_error_input block_info = { 0 }; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return -EINVAL; + + block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); + block_info.sub_block_index = info->head.sub_block_index; + block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); + block_info.address = info->address; + block_info.value = info->value; + + mutex_lock(&adev->grbm_idx_mutex); + ret = psp_ras_trigger_error(&adev->psp, &block_info); + mutex_unlock(&adev->grbm_idx_mutex); + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h new file mode 100644 index 000000000000..2e3f6f755ad4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h @@ -0,0 +1,35 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __GFX_V9_4_H__ +#define __GFX_V9_4_H__ + +void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev); + +int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status); + +int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, + void *inject_if); + +#endif /* __GFX_V9_4_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index bbede09983e1..9775eca6fe43 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -262,7 +262,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, { bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; - u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type); + u32 tmp; /* Use register 17 for GART */ const unsigned eng = 17; unsigned int i; @@ -289,7 +290,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); /* * Issue a dummy read to wait for the ACK register to be cleared @@ -418,7 +419,8 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (amdgpu_emu_mode == 0 && ring->sched.ready) { spin_lock(&adev->gfx.kiq.ring_lock); - amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size); + /* 2 dwords flush + 8 dwords fence */ + amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); amdgpu_fence_emit_polling(ring, &seq); @@ -441,10 +443,10 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (all_hub) { for (i = 0; i < adev->num_vmhubs; i++) gmc_v10_0_flush_gpu_tlb(adev, vmid, - i, 0); + i, flush_type); } else { gmc_v10_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, 0); + AMDGPU_GFXHUB_0, flush_type); } break; } @@ -640,12 +642,7 @@ static int gmc_v10_0_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - /* - * Can't free the stolen VGA memory when it might be used for memory - * training again. - */ - if (!adev->fw_vram_usage.mem_train_support) - amdgpu_bo_late_init(adev); + amdgpu_bo_late_init(adev); r = amdgpu_gmc_allocate_vm_inv_eng(adev); if (r) @@ -829,19 +826,6 @@ static int gmc_v10_0_sw_init(void *handle) adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev); - /* - * In dual GPUs scenario, stolen_size is assigned to zero on the - * secondary GPU, since there is no pre-OS console using that memory. - * Then the bottom region of VRAM was allocated as GTT, unfortunately a - * small region of bottom VRAM was encroached by UMC firmware during - * GDDR6 BIST training, this cause page fault. - * The page fault can be fixed by forcing stolen_size to 3MB, then the - * bottom region of VRAM was allocated as stolen memory, GTT corruption - * avoid. - */ - adev->gmc.stolen_size = max(adev->gmc.stolen_size, - AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE); - /* Memory manager */ r = amdgpu_bo_init(adev); if (r) @@ -881,13 +865,6 @@ static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) static int gmc_v10_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - void *stolen_vga_buf; - - /* - * Free the stolen memory if it wasn't already freed in late_init - * because of memory training. - */ - amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); amdgpu_vm_manager_fini(adev); gmc_v10_0_gart_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 19d5b133e1d7..9da9596a3638 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -381,7 +381,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); #ifdef CONFIG_X86_64 - if (adev->flags & AMD_IS_APU) { + if (adev->flags & AMD_IS_APU && + adev->gmc.real_vram_size > adev->gmc.aper_size) { adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_size = adev->gmc.real_vram_size; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 40a496804356..90216abf14a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -476,13 +476,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, { bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); const unsigned eng = 17; - u32 j, tmp; + u32 j, inv_req, tmp; struct amdgpu_vmhub *hub; BUG_ON(vmhub >= adev->num_vmhubs); hub = &adev->vmhub[vmhub]; - tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); + inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); /* This is necessary for a HW workaround under SRIOV as well * as GFXOFF under bare metal @@ -493,7 +493,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, uint32_t req = hub->vm_inv_eng0_req + eng; uint32_t ack = hub->vm_inv_eng0_ack + eng; - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 1 << vmid); return; } @@ -521,7 +521,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); /* * Issue a dummy read to wait for the ACK register to be cleared @@ -578,7 +578,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (ring->sched.ready) { spin_lock(&adev->gfx.kiq.ring_lock); - amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size); + /* 2 dwords flush + 8 dwords fence */ + amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub); amdgpu_fence_emit_polling(ring, &seq); @@ -601,10 +602,10 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, if (all_hub) { for (i = 0; i < adev->num_vmhubs; i++) gmc_v9_0_flush_gpu_tlb(adev, vmid, - i, 0); + i, flush_type); } else { gmc_v9_0_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, 0); + AMDGPU_GFXHUB_0, flush_type); } break; } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index a78292d84854..ff2e6e1ccde7 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -690,7 +690,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { if (jpeg_v2_0_is_idle(handle)) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 2c58939e6ad0..c6d046df4b70 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -469,7 +469,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index adfd8a6171eb..49a3a56ec017 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -523,9 +523,9 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_RAVEN: case CHIP_RENOIR: mmhub_v1_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v1_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index a7cb185d639a..bde189680521 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -427,9 +427,9 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, case CHIP_NAVI14: case CHIP_NAVI12: mmhub_v2_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v2_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 5c42387c9274..a5281df8d84f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -625,9 +625,9 @@ int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev, switch (adev->asic_type) { case CHIP_ARCTURUS: mmhub_v9_4_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v9_4_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; @@ -663,6 +663,7 @@ void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags) } static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { + /* MMHUB Range 0 */ { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), @@ -751,6 +752,24 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { 0, 0, SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), }, + { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 1 */ { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), @@ -838,16 +857,686 @@ static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHAB Range 2*/ + { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Rang 3 */ + { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 4 */ + { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUAB Range 5 */ + { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 6 */ + { "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT), + }, + + /* MMHUB Range 7*/ + { "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT), + }, + { "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT), + }, + { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0, + }, + { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), + }, + { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), + }, + { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT), + }, + { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), + }, + { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), + }, + { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), + }, + { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), + 0, 0, + SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), + }, + { "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT), + }, + { "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT), + }, + { "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT), + }, + { "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT), + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT), } }; static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = { - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0}, - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0}, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 }, + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 }, }; static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg, diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index f737ce459c28..cf557a428298 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -426,7 +426,7 @@ static int navi10_ih_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; navi10_ih_update_clockgating_state(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 2e0f8933410e..2d1bebdf1603 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -950,13 +950,13 @@ static int nv_common_set_clockgating_state(void *handle, case CHIP_NAVI14: case CHIP_NAVI12: adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); nv_update_hdp_mem_power_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); nv_update_hdp_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 685dd9754c67..0829188c1a5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -22,6 +22,7 @@ #include <linux/firmware.h> #include <linux/module.h> +#include <linux/vmalloc.h> #include "amdgpu.h" #include "amdgpu_psp.h" @@ -971,10 +972,13 @@ Err_out: */ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) { - int ret; - uint32_t p2c_header[4]; struct psp_memory_training_context *ctx = &psp->mem_train_ctx; uint32_t *pcache = (uint32_t*)ctx->sys_cache; + struct amdgpu_device *adev = psp->adev; + uint32_t p2c_header[4]; + uint32_t sz; + void *buf; + int ret; if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { DRM_DEBUG("Memory training is not supported.\n"); @@ -989,7 +993,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) return 0; } - amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); + amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n", pcache[0], pcache[1], pcache[2], pcache[3], p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]); @@ -1026,11 +1030,38 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) DRM_DEBUG("Memory training ops:%x.\n", ops); if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + /* + * Long traing will encroach certain mount of bottom VRAM, + * saving the content of this bottom VRAM to system memory + * before training, and restoring it after training to avoid + * VRAM corruption. + */ + sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE; + + if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) { + DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n", + adev->gmc.visible_vram_size, + adev->mman.aper_base_kaddr); + return -EINVAL; + } + + buf = vmalloc(sz); + if (!buf) { + DRM_ERROR("failed to allocate system memory.\n"); + return -ENOMEM; + } + + memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz); ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); if (ret) { DRM_ERROR("Send long training msg failed.\n"); + vfree(buf); return ret; } + + memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); + adev->nbio.funcs->hdp_flush(adev, NULL); + vfree(buf); } if (ops & PSP_MEM_TRAIN_SAVE) { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 27c7001be1ee..e55884d204bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2176,9 +2176,9 @@ static int sdma_v4_0_set_clockgating_state(void *handle, case CHIP_ARCTURUS: case CHIP_RENOIR: sdma_v4_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); sdma_v4_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 4c6bf1f8a528..67b9830b7c7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1525,9 +1525,9 @@ static int sdma_v5_0_set_clockgating_state(void *handle, case CHIP_NAVI14: case CHIP_NAVI12: sdma_v5_0_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); sdma_v5_0_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 9aac9f9c50bb..42d5601b6bf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -648,7 +648,7 @@ static int si_dma_set_clockgating_state(void *handle, bool enable; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - enable = (state == AMD_CG_STATE_GATE) ? true : false; + enable = (state == AMD_CG_STATE_GATE); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { for (i = 0; i < adev->sdma.num_instances; i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 317803f6a561..15f3424a1ff7 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -537,6 +537,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev) static int soc15_asic_reset(struct amdgpu_device *adev) { + /* original raven doesn't have full asic reset */ + if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8) + return 0; + switch (soc15_asic_reset_method(adev)) { case AMD_RESET_METHOD_BACO: if (!adev->in_suspend) @@ -1467,38 +1471,38 @@ static int soc15_common_set_clockgating_state(void *handle, case CHIP_VEGA12: case CHIP_VEGA20: adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_rom_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->df.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; case CHIP_RAVEN: case CHIP_RENOIR: adev->nbio.funcs->update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_drm_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); soc15_update_rom_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; case CHIP_ARCTURUS: soc15_update_hdp_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 01e62fb8e6e0..0fa8aae2d78e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -763,7 +763,7 @@ static int uvd_v5_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 217084d56ab8..e0aadcaf6c8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1421,7 +1421,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 475ae68f38f5..217db187207c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -739,7 +739,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 683701cf7270..3fd102efb7af 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -887,7 +887,7 @@ static int vce_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); int i; if ((adev->asic_type == CHIP_POLARIS10) || diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index e654938f6cca..1a24fadd30e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1346,7 +1346,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index f4db8af6536b..4f7216788f11 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1213,7 +1213,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (enable) { /* wait for STATUS to clear */ @@ -1624,7 +1624,7 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) +int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; uint32_t tmp = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h index ef749b02ded9..6c9de1882428 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h @@ -37,6 +37,7 @@ extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr); extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); +extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring); extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring); extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index c8b63d57a541..70fae7977f8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -435,88 +435,88 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (!indirect) { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } else { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); } offset = 0; } else { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); offset = size; WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); } if (!indirect) WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); else WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); /* cache window 1: stack */ if (!indirect) { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } else { WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); /* cache window 2: context */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); /* non-cache window */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); + UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); /* VCN global tiling registers */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); } /** @@ -670,19 +670,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev, UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__MMSCH_MODE_MASK); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); + UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); /* turn off clock gating */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect); + UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect); /* turn on SUVD clock gating */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); + UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); /* turn on sw mode in UVD_SUVD_CGC_CTRL */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); + UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); } /** @@ -772,11 +772,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); + UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); /* disable master interupt */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect); + UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect); /* setup mmUVD_LMI_CTRL */ tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | @@ -788,28 +788,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 0x00100000L); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect); + UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MPC_CNTL), + UVD, 0, mmUVD_MPC_CNTL), 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MPC_SET_MUXA0), + UVD, 0, mmUVD_MPC_SET_MUXA0), ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MPC_SET_MUXB0), + UVD, 0, mmUVD_MPC_SET_MUXB0), ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MPC_SET_MUX), + UVD, 0, mmUVD_MPC_SET_MUX), ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); @@ -817,26 +817,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect); + UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); + UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); /* enable LMI MC and UMC channels */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect); + UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect); /* unblock VCPU register access */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect); + UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect); tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); + UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); /* enable master interrupt */ WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( - UVD, inst_idx, mmUVD_MASTINT_EN), + UVD, 0, mmUVD_MASTINT_EN), UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); if (indirect) @@ -891,8 +891,10 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) - return vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + continue; + } /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, @@ -903,6 +905,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp); } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + return 0; + /*SW clock gating */ vcn_v2_5_disable_clock_gating(adev); @@ -1294,10 +1299,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { r = vcn_v2_5_stop_dpg_mode(adev, i); - goto power_off; + continue; } /* wait for vcn idle */ @@ -1349,7 +1353,6 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } -power_off: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, false); @@ -1488,7 +1491,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .emit_ib = vcn_v2_0_dec_ring_emit_ib, .emit_fence = vcn_v2_0_dec_ring_emit_fence, .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_dec_ring_test_ring, + .test_ring = vcn_v2_0_dec_ring_test_ring, .test_ib = amdgpu_vcn_dec_ring_test_ib, .insert_nop = vcn_v2_0_dec_ring_insert_nop, .insert_start = vcn_v2_0_dec_ring_insert_start, @@ -1663,7 +1666,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + bool enable = (state == AMD_CG_STATE_GATE); if (amdgpu_sriov_vf(adev)) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index d9e331084ea0..407c6093c2ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -717,7 +717,7 @@ static int vega10_ih_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; vega10_ih_update_clockgating_state(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2870553a2ce0..80d22bf702e8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1237,16 +1237,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, list_add(&q->list, &qpd->queues_list); qpd->queue_count++; + + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + dqm->sdma_queue_count++; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) + dqm->xgmi_sdma_queue_count++; + if (q->properties.is_active) { dqm->queue_count++; retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); } - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) - dqm->sdma_queue_count++; - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - dqm->xgmi_sdma_queue_count++; /* * Unconditionally increment this counter, regardless of the queue's * type or whether the queue is active. diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9402374d2466..279541517a99 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -940,14 +940,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } - dc_hardware_init(adev->dm.dc); - r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); goto error; } + dc_hardware_init(adev->dm.dc); + adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -7759,24 +7759,27 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, struct drm_crtc_state *new_crtc_state, *old_crtc_state; struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state; struct dc_stream_status *status = NULL; - - struct dc_surface_update *updates; enum surface_update_type update_type = UPDATE_TYPE_FAST; + struct surface_info_bundle { + struct dc_surface_update surface_updates[MAX_SURFACES]; + struct dc_plane_info plane_infos[MAX_SURFACES]; + struct dc_scaling_info scaling_infos[MAX_SURFACES]; + struct dc_flip_addrs flip_addrs[MAX_SURFACES]; + struct dc_stream_update stream_update; + } *bundle; - updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); + bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); - if (!updates) { - DRM_ERROR("Failed to allocate plane updates\n"); + if (!bundle) { + DRM_ERROR("Failed to allocate update bundle\n"); /* Set type to FULL to avoid crashing in DC*/ update_type = UPDATE_TYPE_FULL; goto cleanup; } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - struct dc_scaling_info scaling_info; - struct dc_stream_update stream_update; - memset(&stream_update, 0, sizeof(stream_update)); + memset(bundle, 0, sizeof(struct surface_info_bundle)); new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); @@ -7793,8 +7796,9 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { const struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(new_plane_state->fb); - struct dc_plane_info plane_info; - struct dc_flip_addrs flip_addr; + struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane]; + struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane]; + struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane]; uint64_t tiling_flags; new_plane_crtc = new_plane_state->crtc; @@ -7812,49 +7816,48 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, if (crtc != new_plane_crtc) continue; - updates[num_plane].surface = new_dm_plane_state->dc_state; + bundle->surface_updates[num_plane].surface = + new_dm_plane_state->dc_state; if (new_crtc_state->mode_changed) { - stream_update.dst = new_dm_crtc_state->stream->dst; - stream_update.src = new_dm_crtc_state->stream->src; + bundle->stream_update.dst = new_dm_crtc_state->stream->dst; + bundle->stream_update.src = new_dm_crtc_state->stream->src; } if (new_crtc_state->color_mgmt_changed) { - updates[num_plane].gamma = + bundle->surface_updates[num_plane].gamma = new_dm_plane_state->dc_state->gamma_correction; - updates[num_plane].in_transfer_func = + bundle->surface_updates[num_plane].in_transfer_func = new_dm_plane_state->dc_state->in_transfer_func; - stream_update.gamut_remap = + bundle->stream_update.gamut_remap = &new_dm_crtc_state->stream->gamut_remap_matrix; - stream_update.output_csc_transform = + bundle->stream_update.output_csc_transform = &new_dm_crtc_state->stream->csc_color_matrix; - stream_update.out_transfer_func = + bundle->stream_update.out_transfer_func = new_dm_crtc_state->stream->out_transfer_func; } ret = fill_dc_scaling_info(new_plane_state, - &scaling_info); + scaling_info); if (ret) goto cleanup; - updates[num_plane].scaling_info = &scaling_info; + bundle->surface_updates[num_plane].scaling_info = scaling_info; if (amdgpu_fb) { ret = get_fb_info(amdgpu_fb, &tiling_flags); if (ret) goto cleanup; - memset(&flip_addr, 0, sizeof(flip_addr)); - ret = fill_dc_plane_info_and_addr( dm->adev, new_plane_state, tiling_flags, - &plane_info, - &flip_addr.address); + plane_info, + &flip_addr->address); if (ret) goto cleanup; - updates[num_plane].plane_info = &plane_info; - updates[num_plane].flip_addr = &flip_addr; + bundle->surface_updates[num_plane].plane_info = plane_info; + bundle->surface_updates[num_plane].flip_addr = flip_addr; } num_plane++; @@ -7875,14 +7878,15 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, status = dc_stream_get_status_from_state(old_dm_state->context, new_dm_crtc_state->stream); - stream_update.stream = new_dm_crtc_state->stream; + bundle->stream_update.stream = new_dm_crtc_state->stream; /* * TODO: DC modifies the surface during this call so we need * to lock here - find a way to do this without locking. */ mutex_lock(&dm->dc_lock); - update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, - &stream_update, status); + update_type = dc_check_update_surfaces_for_stream( + dc, bundle->surface_updates, num_plane, + &bundle->stream_update, status); mutex_unlock(&dm->dc_lock); if (update_type > UPDATE_TYPE_MED) { @@ -7892,7 +7896,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, } cleanup: - kfree(updates); + kfree(bundle); *out_type = update_type; return ret; @@ -8163,6 +8167,16 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; #endif + /* + * Perform validation of MST topology in the state: + * We need to perform MST atomic check before calling + * dc_validate_global_state(), or there is a chance + * to get stuck in an infinite loop and hang eventually. + */ + ret = drm_dp_mst_atomic_check(state); + if (ret) + goto fail; + if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) { ret = -EINVAL; goto fail; @@ -8191,10 +8205,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, dc_retain_state(old_dm_state->context); } } - /* Perform validation of MST topology in the state*/ - ret = drm_dp_mst_atomic_check(state); - if (ret) - goto fail; /* Store the overall update type for use later in atomic check. */ for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index ae329335dfcc..0acd3409dd6c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -135,6 +135,20 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, mutex_unlock(&hdcp_w->mutex); } +static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, + unsigned int link_index, + struct amdgpu_dm_connector *aconnector) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + hdcp_w->aconnector = aconnector; + + mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); + + process_output(hdcp_w); + mutex_unlock(&hdcp_w->mutex); +} void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -303,6 +317,11 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) memset(link, 0, sizeof(*link)); display->index = aconnector->base.index; + + if (config->dpms_off) { + hdcp_remove_display(hdcp_work, link_index, aconnector); + return; + } display->state = MOD_HDCP_DISPLAY_ACTIVE; if (aconnector->dc_sink != NULL) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 069b7a6f5597..318b474ff20e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -216,7 +216,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); } - ret = drm_dp_update_payload_part1(mst_mgr); + /* It's OK for this to fail */ + drm_dp_update_payload_part1(mst_mgr); /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each @@ -225,9 +226,6 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( get_payload_table(aconnector, proposed_table); - if (ret) - return false; - return true; } @@ -285,7 +283,6 @@ bool dm_helpers_dp_mst_send_payload_allocation( struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; - int ret; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; @@ -299,10 +296,8 @@ bool dm_helpers_dp_mst_send_payload_allocation( if (!mst_mgr->mst_state) return false; - ret = drm_dp_update_payload_part2(mst_mgr); - - if (ret) - return false; + /* It's OK for this to fail */ + drm_dp_update_payload_part2(mst_mgr); if (!enable) drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 96b391e4b3e7..5672f7765919 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -632,7 +632,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, if (drm_dp_atomic_find_vcpi_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn,\ + vars[next_index].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return; if (!drm_dp_mst_atomic_check(state)) { diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 2cb7a4288cb7..629a07a2719b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -89,6 +89,10 @@ static enum bp_result encoder_control_digx_v1_5( struct bios_parser *bp, struct bp_encoder_control *cntl); +static enum bp_result encoder_control_fallback( + struct bios_parser *bp, + struct bp_encoder_control *cntl); + static void init_dig_encoder_control(struct bios_parser *bp) { uint32_t version = @@ -100,7 +104,7 @@ static void init_dig_encoder_control(struct bios_parser *bp) break; default: dm_output_to_console("Don't have dig_encoder_control for v%d\n", version); - bp->cmd_tbl.dig_encoder_control = NULL; + bp->cmd_tbl.dig_encoder_control = encoder_control_fallback; break; } } @@ -184,6 +188,18 @@ static enum bp_result encoder_control_digx_v1_5( return result; } +static enum bp_result encoder_control_fallback( + struct bios_parser *bp, + struct bp_encoder_control *cntl) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return encoder_control_digx_v1_5(bp, cntl); + } + + return BP_RESULT_FAILURE; +} + /***************************************************************************** ****************************************************************************** ** @@ -196,6 +212,10 @@ static enum bp_result transmitter_control_v1_6( struct bios_parser *bp, struct bp_transmitter_control *cntl); +static enum bp_result transmitter_control_fallback( + struct bios_parser *bp, + struct bp_transmitter_control *cntl); + static void init_transmitter_control(struct bios_parser *bp) { uint8_t frev; @@ -209,7 +229,7 @@ static void init_transmitter_control(struct bios_parser *bp) break; default: dm_output_to_console("Don't have transmitter_control for v%d\n", crev); - bp->cmd_tbl.transmitter_control = NULL; + bp->cmd_tbl.transmitter_control = transmitter_control_fallback; break; } } @@ -273,6 +293,18 @@ static enum bp_result transmitter_control_v1_6( return result; } +static enum bp_result transmitter_control_fallback( + struct bios_parser *bp, + struct bp_transmitter_control *cntl) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return transmitter_control_v1_6(bp, cntl); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ****************************************************************************** ** @@ -285,6 +317,10 @@ static enum bp_result set_pixel_clock_v7( struct bios_parser *bp, struct bp_pixel_clock_parameters *bp_params); +static enum bp_result set_pixel_clock_fallback( + struct bios_parser *bp, + struct bp_pixel_clock_parameters *bp_params); + static void init_set_pixel_clock(struct bios_parser *bp) { switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) { @@ -294,7 +330,7 @@ static void init_set_pixel_clock(struct bios_parser *bp) default: dm_output_to_console("Don't have set_pixel_clock for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)); - bp->cmd_tbl.set_pixel_clock = NULL; + bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback; break; } } @@ -400,6 +436,18 @@ static enum bp_result set_pixel_clock_v7( return result; } +static enum bp_result set_pixel_clock_fallback( + struct bios_parser *bp, + struct bp_pixel_clock_parameters *bp_params) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return set_pixel_clock_v7(bp, bp_params); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ****************************************************************************** ** @@ -632,6 +680,11 @@ static enum bp_result enable_disp_power_gating_v2_1( enum controller_id crtc_id, enum bp_pipe_control_action action); +static enum bp_result enable_disp_power_gating_fallback( + struct bios_parser *bp, + enum controller_id crtc_id, + enum bp_pipe_control_action action); + static void init_enable_disp_power_gating( struct bios_parser *bp) { @@ -643,7 +696,7 @@ static void init_enable_disp_power_gating( default: dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n", BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)); - bp->cmd_tbl.enable_disp_power_gating = NULL; + bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_fallback; break; } } @@ -658,6 +711,10 @@ static void enable_disp_power_gating_dmcub( power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING; power_gating.power_gating.pwr = *pwr; + /* ATOM_ENABLE is old API in DMUB */ + if (power_gating.power_gating.pwr.enable == ATOM_ENABLE) + power_gating.power_gating.pwr.enable = ATOM_INIT; + dc_dmub_srv_cmd_queue(dmcub, &power_gating.header); dc_dmub_srv_cmd_execute(dmcub); dc_dmub_srv_wait_idle(dmcub); @@ -695,6 +752,19 @@ static enum bp_result enable_disp_power_gating_v2_1( return result; } +static enum bp_result enable_disp_power_gating_fallback( + struct bios_parser *bp, + enum controller_id crtc_id, + enum bp_pipe_control_action action) +{ + if (bp->base.ctx->dc->ctx->dmub_srv && + bp->base.ctx->dc->debug.dmub_command_table) { + return enable_disp_power_gating_v2_1(bp, crtc_id, action); + } + + return BP_RESULT_FAILURE; +} + /****************************************************************************** ******************************************************************************* ** diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index a27d84ca15a5..1a37550731de 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1435,6 +1435,7 @@ void dcn_bw_update_from_pplib(struct dc *dc) struct dc_context *ctx = dc->ctx; struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; bool res; + unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx; /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ res = dm_pp_get_clock_levels_by_type_with_voltage( @@ -1446,17 +1447,28 @@ void dcn_bw_update_from_pplib(struct dc *dc) res = verify_clock_values(&fclks); if (res) { - ASSERT(fclks.num_levels >= 3); - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels * - (fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; + ASSERT(fclks.num_levels); + + vmin0p65_idx = 0; + vmid0p72_idx = fclks.num_levels - + (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1)); + vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1); + vmax0p9_idx = fclks.num_levels - 1; + + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = + 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = + dc->dcn_soc->number_of_channels * + (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; } else BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6c797fac189d..04441dbcba76 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2462,12 +2462,7 @@ void dc_set_power_state( enum dc_acpi_cm_power_state power_state) { struct kref refcount; - struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib), - GFP_KERNEL); - - ASSERT(dml); - if (!dml) - return; + struct display_mode_lib *dml; switch (power_state) { case DC_ACPI_CM_POWER_STATE_D0: @@ -2490,6 +2485,12 @@ void dc_set_power_state( * clean state, and dc hw programming optimizations will not * cause any trouble. */ + dml = kzalloc(sizeof(struct display_mode_lib), + GFP_KERNEL); + + ASSERT(dml); + if (!dml) + return; /* Preserve refcount */ refcount = dc->current_state->refcount; @@ -2503,10 +2504,10 @@ void dc_set_power_state( dc->current_state->refcount = refcount; dc->current_state->bw_ctx.dml = *dml; + kfree(dml); + break; } - - kfree(dml); } void dc_resume(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 260c0b62d37d..a09119c10d7c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -851,18 +851,12 @@ static bool dc_link_detect_helper(struct dc_link *link, if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps))) same_dpcd = false; } - /* Active dongle plug in without display or downstream unplug*/ + /* Active dongle downstream unplug*/ if (link->type == dc_connection_active_dongle && link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { - if (prev_sink != NULL) { + if (prev_sink != NULL) /* Downstream unplug */ dc_sink_release(prev_sink); - } else { - /* Empty dongle plug in */ - dp_verify_link_cap_with_retries(link, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); - } return true; } @@ -969,8 +963,7 @@ static bool dc_link_detect_helper(struct dc_link *link, same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX && - reason != DETECT_REASON_HPDRX) { + sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { /* * TODO debug why Dell 2413 doesn't like * two link trainings @@ -2882,7 +2875,16 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link) // Clear all of MST payload then reallocate for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && + + /* driver enable split pipe for external monitors + * we have to check pipe_ctx is split pipe or not + * If it's split pipe, driver using top pipe to + * reaallocate. + */ + if (!pipe_ctx || pipe_ctx->top_pipe) + continue; + + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->stream->dpms_off == false && pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { deallocate_mst_payload(pipe_ctx); @@ -2891,7 +2893,11 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link) for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && + + if (!pipe_ctx || pipe_ctx->top_pipe) + continue; + + if (pipe_ctx->stream && pipe_ctx->stream->link == link && pipe_ctx->stream->dpms_off == false && pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { /* enable/disable PHY will clear connection between BE and FE diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 6ab298c65247..cb731c1d30b1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -983,7 +983,7 @@ static enum link_training_result perform_clock_recovery_sequence( offset); /* 2. update DPCD of the receiver*/ - if (!retries_cr) + if (!retry_count) /* EPR #361076 - write as a 5-byte burst, * but only for the 1-st iteration.*/ dpcd_set_lt_pattern_and_lane_settings( @@ -3680,7 +3680,7 @@ static void set_crtc_test_pattern(struct dc_link *link, struct pipe_ctx *odm_pipe; enum controller_dp_color_space controller_color_space; int opp_cnt = 1; - uint8_t count = 0; + int count; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: @@ -3725,11 +3725,11 @@ static void set_crtc_test_pattern(struct dc_link *link, width, height); /* wait for dpg to blank pixel data with test pattern */ - for (count = 0; count < 1000; count++) + for (count = 0; count < 1000; count++) { if (opp->funcs->dpg_is_blanked(opp)) break; - else - udelay(100); + udelay(100); + } } } break; @@ -3925,8 +3925,38 @@ bool dc_link_dp_set_test_pattern( sizeof(training_pattern)); } } else { - /* CRTC Patterns */ + enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; + + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + color_space = COLOR_SPACE_SRGB; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_SRGB_LIMITED; + break; + + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + color_space = COLOR_SPACE_YCBCR601; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR601_LIMITED; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + color_space = COLOR_SPACE_YCBCR709; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR709_LIMITED; + break; + default: + break; + } + /* update MSA to requested color space */ + pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, + &pipe_ctx->stream->timing, + color_space, + pipe_ctx->stream->use_vsc_sdp_for_colorimetry, + link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + + /* CRTC Patterns */ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); + /* Set Test Pattern state */ link->test_pattern_enabled = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3fa85a54360f..8ff25b5dd2f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.68" +#define DC_VER "3.2.69" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -425,6 +425,7 @@ struct dc_debug_options { bool validate_dml_output; bool enable_dmcub_surface_flip; bool usbc_combo_phy_reset_wa; + bool disable_dsc; }; struct dc_debug_data { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 1cd4d8fc361f..066188ba7949 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -100,20 +100,6 @@ static uint32_t get_hw_buffer_available_size( dce_i2c_hw->buffer_used_bytes; } -static uint32_t get_speed( - const struct dce_i2c_hw *dce_i2c_hw) -{ - uint32_t pre_scale = 0; - - REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale); - - /* [anaumov] it seems following is unnecessary */ - /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/ - return pre_scale ? - dce_i2c_hw->reference_frequency / pre_scale : - dce_i2c_hw->default_speed; -} - static void process_channel_reply( struct dce_i2c_hw *dce_i2c_hw, struct i2c_payload *reply) @@ -278,16 +264,25 @@ static void set_speed( struct dce_i2c_hw *dce_i2c_hw, uint32_t speed) { + uint32_t xtal_ref_div = 0; + uint32_t prescale = 0; + + REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); + + if (xtal_ref_div == 0) + xtal_ref_div = 2; + + prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed; if (speed) { if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) REG_UPDATE_N(SPEED, 3, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); else REG_UPDATE_N(SPEED, 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } } @@ -344,9 +339,7 @@ static void release_engine( bool safe_to_reset; /* Restore original HW engine speed */ - - set_speed(dce_i2c_hw, dce_i2c_hw->original_speed); - + set_speed(dce_i2c_hw, dce_i2c_hw->default_speed); /* Reset HW engine */ { @@ -378,7 +371,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( { uint32_t counter = 0; enum gpio_result result; - uint32_t current_speed; struct dce_i2c_hw *dce_i2c_hw = NULL; if (!ddc) @@ -416,11 +408,6 @@ struct dce_i2c_hw *acquire_i2c_hw_engine( dce_i2c_hw->ddc = ddc; - current_speed = get_speed(dce_i2c_hw); - - if (current_speed) - dce_i2c_hw->original_speed = current_speed; - if (!setup_engine(dce_i2c_hw)) { release_engine(dce_i2c_hw); return NULL; @@ -478,13 +465,9 @@ static void submit_channel_request_hw( static uint32_t get_transaction_timeout_hw( const struct dce_i2c_hw *dce_i2c_hw, - uint32_t length) + uint32_t length, + uint32_t speed) { - - uint32_t speed = get_speed(dce_i2c_hw); - - - uint32_t period_timeout; uint32_t num_of_clock_stretches; @@ -504,7 +487,8 @@ static uint32_t get_transaction_timeout_hw( bool dce_i2c_hw_engine_submit_payload( struct dce_i2c_hw *dce_i2c_hw, struct i2c_payload *payload, - bool middle_of_transaction) + bool middle_of_transaction, + uint32_t speed) { struct i2c_request_transaction_data request; @@ -542,7 +526,7 @@ bool dce_i2c_hw_engine_submit_payload( /* obtain timeout value before submitting request */ transaction_timeout = get_transaction_timeout_hw( - dce_i2c_hw, payload->length + 1); + dce_i2c_hw, payload->length + 1, speed); submit_channel_request_hw( dce_i2c_hw, &request); @@ -588,13 +572,11 @@ bool dce_i2c_submit_command_hw( struct i2c_payload *payload = cmd->payloads + index_of_payload; if (!dce_i2c_hw_engine_submit_payload( - dce_i2c_hw, payload, mot)) { + dce_i2c_hw, payload, mot, cmd->speed)) { result = false; break; } - - ++index_of_payload; } @@ -625,7 +607,6 @@ void dce_i2c_hw_construct( dce_i2c_hw->buffer_used_bytes = 0; dce_i2c_hw->transaction_count = 0; dce_i2c_hw->engine_keep_power_up_count = 1; - dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED; dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED; dce_i2c_hw->send_reset_length = 0; dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE; @@ -640,9 +621,6 @@ void dce100_i2c_hw_construct( const struct dce_i2c_shift *shifts, const struct dce_i2c_mask *masks) { - - uint32_t xtal_ref_div = 0; - dce_i2c_hw_construct(dce_i2c_hw, ctx, engine_id, @@ -650,21 +628,6 @@ void dce100_i2c_hw_construct( shifts, masks); dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100; - - REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); - - if (xtal_ref_div == 0) - xtal_ref_div = 2; - - /*Calculating Reference Clock by divding original frequency by - * XTAL_REF_DIV. - * At upper level, uint32_t reference_frequency = - * dal_dce_i2c_get_reference_clock(as) >> 1 - * which already divided by 2. So we need x2 to get original - * reference clock from ppll_info - */ - dce_i2c_hw->reference_frequency = - (dce_i2c_hw->reference_frequency * 2) / xtal_ref_div; } void dce112_i2c_hw_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h index d4b2037f7d74..fb055e6883c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h @@ -256,7 +256,6 @@ struct i2c_request_transaction_data { struct dce_i2c_hw { struct ddc *ddc; - uint32_t original_speed; uint32_t engine_keep_power_up_count; uint32_t transaction_count; uint32_t buffer_used_bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index f2127afb37b2..1008ac8a0f2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2911,6 +2911,33 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) hubbub->funcs->update_dchub(hubbub, dh_data); } +static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) +{ + struct pipe_ctx *test_pipe; + const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2; + int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b; + + /** + * Disable the cursor if there's another pipe above this with a + * plane that contains this pipe's viewport to prevent double cursor + * and incorrect scaling artifacts. + */ + for (test_pipe = pipe_ctx->top_pipe; test_pipe; + test_pipe = test_pipe->top_pipe) { + if (!test_pipe->plane_state->visible) + continue; + + r2 = &test_pipe->plane_res.scl_data.recout; + r2_r = r2->x + r2->width; + r2_b = r2->y + r2->height; + + if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b) + return true; + } + + return false; +} + void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; @@ -2956,6 +2983,9 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) pos_cpy.enable = false; + if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx)) + pos_cpy.enable = false; + // Swap axis and mirror horizontally if (param.rotation == ROTATION_ANGLE_90) { uint32_t temp_x = pos_cpy.x; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index da63fc53cc4a..cf09b9335728 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -261,28 +261,28 @@ static void hubp21_apply_PLAT_54186_wa( address->video_progressive.luma_addr.high_part == 0xf4) return; - if ((rotation_angle == 0 || rotation_angle == 180) + if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180) && viewport_c_height <= 512) return; - if ((rotation_angle == 90 || rotation_angle == 270) + if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270) && viewport_c_width <= 512) return; switch (rotation_angle) { - case 0: /* 0 degree rotation */ + case ROTATION_ANGLE_0: /* 0 degree rotation */ row_height = 128; patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1; patched_viewport_width = viewport_c_width; hubp21->PLAT_54186_wa_chroma_addr_offset = 0; break; - case 2: /* 180 degree rotation */ + case ROTATION_ANGLE_180: /* 180 degree rotation */ row_height = 128; patched_viewport_height = viewport_c_height + row_height; patched_viewport_width = viewport_c_width; hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe; break; - case 1: /* 90 degree rotation */ + case ROTATION_ANGLE_90: /* 90 degree rotation */ row_height = 256; if (h_mirror_en) { patched_viewport_height = viewport_c_height; @@ -294,7 +294,7 @@ static void hubp21_apply_PLAT_54186_wa( hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; } break; - case 3: /* 270 degree rotation */ + case ROTATION_ANGLE_270: /* 270 degree rotation */ row_height = 256; if (h_mirror_en) { patched_viewport_height = viewport_c_height; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 1d741bca2211..0d506d30d6b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -830,7 +830,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, - .max_downscale_src_width = 3840, + .max_downscale_src_width = 4096, .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = true, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index e7a8ac7a1f22..45f028986a8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -38,6 +38,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN20_MAX_420_IMAGE_WIDTH 4096 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -3894,13 +3895,19 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 22f3b5a4b3b9..485a9c62ec58 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -39,6 +39,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff #define DCN20_MAX_DSC_IMAGE_WIDTH 5184 +#define DCN20_MAX_420_IMAGE_WIDTH 4096 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -3935,15 +3936,22 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || - (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown - && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index af35b3bea909..e6617c958bb8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -65,6 +65,7 @@ typedef struct { #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff #define DCN21_MAX_DSC_IMAGE_WIDTH 5184 +#define DCN21_MAX_420_IMAGE_WIDTH 4096 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( @@ -3971,15 +3972,22 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || - (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown - && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) { - locals->ODMCombineEnablePerState[i][k] = false; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; - } else { - locals->ODMCombineEnablePerState[i][k] = true; - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + if (mode_lib->vba.ODMCapability) { + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index e7a44df676ca..2875efd85467 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -763,8 +763,8 @@ struct vba_vars_st { double SwathWidthC[DC__NUM_DPP__MAX]; unsigned int BytePerPixelY[DC__NUM_DPP__MAX]; unsigned int BytePerPixelC[DC__NUM_DPP__MAX]; - long dummyinteger1; - long dummyinteger2; + unsigned int dummyinteger1; + unsigned int dummyinteger2; double FinalDRAMClockChangeLatency; double Tdmdl_vm[DC__NUM_DPP__MAX]; double Tdmdl[DC__NUM_DPP__MAX]; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 8b78fcbfe746..87d682d25278 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -224,7 +224,8 @@ static void get_dsc_enc_caps( memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); if (dsc) { - dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + if (!dsc->ctx->dc->debug.disable_dsc) + dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); if (dsc->ctx->dc->debug.native422_support) dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; } diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h index 8e23a7017588..f8917594036a 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h @@ -231,6 +231,8 @@ struct dmub_srv_base_funcs { struct dmub_srv_hw_funcs { /* private: internal use only */ + void (*init)(struct dmub_srv *dmub); + void (*reset)(struct dmub_srv *dmub); void (*reset_release)(struct dmub_srv *dmub); @@ -417,6 +419,21 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params); /** + * dmub_srv_hw_reset() - puts the DMUB hardware in reset state if initialized + * @dmub: the dmub service + * + * Before destroying the DMUB service or releasing the backing framebuffer + * memory we'll need to put the DMCUB into reset first. + * + * A subsequent call to dmub_srv_hw_init() will re-enable the DMCUB. + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - unspecified error + */ +enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub); + +/** * dmub_srv_cmd_queue() - queues a command to the DMUB * @dmub: the dmub service * @cmd: the command to queue diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index cd51c6138894..b2ca8e0dbac9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -54,6 +54,19 @@ const struct dmub_srv_common_regs dmub_srv_dcn20_regs = { /* Shared functions. */ +static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub, + uint64_t *fb_base, + uint64_t *fb_offset) +{ + uint32_t tmp; + + REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp); + *fb_base = (uint64_t)tmp << 24; + + REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp); + *fb_offset = (uint64_t)tmp << 24; +} + static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in, uint64_t fb_base, uint64_t fb_offset, @@ -67,6 +80,8 @@ void dmub_dcn20_reset(struct dmub_srv *dmub) REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); + REG_WRITE(DMCUB_INBOX1_WPTR, 0); } void dmub_dcn20_reset_release(struct dmub_srv *dmub) @@ -82,7 +97,9 @@ void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, const struct dmub_window *cw1) { union dmub_addr offset; - uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; + uint64_t fb_base, fb_offset; + + dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, @@ -118,7 +135,9 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, const struct dmub_window *cw6) { union dmub_addr offset; - uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; + uint64_t fb_base, fb_offset; + + dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); @@ -173,8 +192,6 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub, REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000); REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); - REG_WRITE(DMCUB_INBOX1_RPTR, 0); - REG_WRITE(DMCUB_INBOX1_WPTR, 0); } uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index 53bfd4da69ad..04b0fa13153d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -92,7 +92,9 @@ struct dmub_srv; DMUB_SR(DMCUB_SCRATCH14) \ DMUB_SR(DMCUB_SCRATCH15) \ DMUB_SR(CC_DC_PIPE_DIS) \ - DMUB_SR(MMHUBBUB_SOFT_RESET) + DMUB_SR(MMHUBBUB_SOFT_RESET) \ + DMUB_SR(DCN_VM_FB_LOCATION_BASE) \ + DMUB_SR(DCN_VM_FB_OFFSET) #define DMUB_COMMON_FIELDS() \ DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ @@ -121,7 +123,9 @@ struct dmub_srv; DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ - DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) + DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ + DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ + DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) struct dmub_srv_common_reg_offset { #define DMUB_SR(reg) uint32_t reg; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index dee676335d73..85a518bf8a76 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -312,6 +312,9 @@ enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) if (!dmub->sw_init) return DMUB_STATUS_INVALID; + if (!dmub->hw_init) + return DMUB_STATUS_OK; + if (dmub->hw_funcs.is_hw_init) *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); @@ -415,6 +418,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, return DMUB_STATUS_OK; } +enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) +{ + if (!dmub->sw_init) + return DMUB_STATUS_INVALID; + + if (dmub->hw_init == false) + return DMUB_STATUS_OK; + + if (dmub->hw_funcs.reset) + dmub->hw_funcs.reset(dmub); + + dmub->hw_init = false; + + return DMUB_STATUS_OK; +} + enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, const struct dmub_cmd_header *cmd) { diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 1b278c42809a..cac09d500fda 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1673,129 +1673,6 @@ static bool map_regamma_hw_to_x_user( #define _EXTRA_POINTS 3 -bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, - const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, - const struct freesync_hdr_tf_params *fs_params) -{ - struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; - struct dividers dividers; - - struct pwl_float_data *rgb_user = NULL; - struct pwl_float_data_ex *rgb_regamma = NULL; - struct gamma_pixel *axis_x = NULL; - struct pixel_gamma_point *coeff = NULL; - enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; - bool ret = false; - - if (output_tf->type == TF_TYPE_BYPASS) - return false; - - /* we can use hardcoded curve for plain SRGB TF */ - if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && - output_tf->tf == TRANSFER_FUNCTION_SRGB) { - if (ramp == NULL) - return true; - if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || - (!mapUserRamp && ramp->type == GAMMA_RGB_256)) - return true; - } - - output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - - if (ramp && ramp->type != GAMMA_CS_TFM_1D && - (mapUserRamp || ramp->type != GAMMA_RGB_256)) { - rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, - sizeof(*rgb_user), - GFP_KERNEL); - if (!rgb_user) - goto rgb_user_alloc_fail; - - axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x), - GFP_KERNEL); - if (!axis_x) - goto axis_x_alloc_fail; - - dividers.divider1 = dc_fixpt_from_fraction(3, 2); - dividers.divider2 = dc_fixpt_from_int(2); - dividers.divider3 = dc_fixpt_from_fraction(5, 2); - - build_evenly_distributed_points( - axis_x, - ramp->num_entries, - dividers); - - if (ramp->type == GAMMA_RGB_256 && mapUserRamp) - scale_gamma(rgb_user, ramp, dividers); - else if (ramp->type == GAMMA_RGB_FLOAT_1024) - scale_gamma_dx(rgb_user, ramp, dividers); - } - - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; - - coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), - GFP_KERNEL); - if (!coeff) - goto coeff_alloc_fail; - - tf = output_tf->tf; - if (tf == TRANSFER_FUNCTION_PQ) { - tf_pts->end_exponent = 7; - tf_pts->x_point_at_y1_red = 125; - tf_pts->x_point_at_y1_green = 125; - tf_pts->x_point_at_y1_blue = 125; - - build_pq(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, - output_tf->sdr_ref_white_level); - } else if (tf == TRANSFER_FUNCTION_GAMMA22 && - fs_params != NULL && fs_params->skip_tm == 0) { - build_freesync_hdr(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, - fs_params); - } else if (tf == TRANSFER_FUNCTION_HLG) { - build_freesync_hdr(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, - fs_params); - - } else { - tf_pts->end_exponent = 0; - tf_pts->x_point_at_y1_red = 1; - tf_pts->x_point_at_y1_green = 1; - tf_pts->x_point_at_y1_blue = 1; - - build_regamma(rgb_regamma, - MAX_HW_POINTS, - coordinates_x, tf); - } - map_regamma_hw_to_x_user(ramp, coeff, rgb_user, - coordinates_x, axis_x, rgb_regamma, - MAX_HW_POINTS, tf_pts, - (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && - (ramp && ramp->type != GAMMA_CS_TFM_1D)); - - if (ramp && ramp->type == GAMMA_CS_TFM_1D) - apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); - - ret = true; - - kvfree(coeff); -coeff_alloc_fail: - kvfree(rgb_regamma); -rgb_regamma_alloc_fail: - kvfree(axis_x); -axis_x_alloc_fail: - kvfree(rgb_user); -rgb_user_alloc_fail: - return ret; -} - bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, const struct regamma_lut *regamma) { @@ -2043,14 +1920,14 @@ rgb_user_alloc_fail: return ret; } - -bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, +static bool calculate_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points, + struct pwl_float_data_ex *rgb_regamma, + const struct freesync_hdr_tf_params *fs_params, uint32_t sdr_ref_white_level) { uint32_t i; bool ret = false; - struct pwl_float_data_ex *rgb_regamma = NULL; if (trans == TRANSFER_FUNCTION_UNITY || trans == TRANSFER_FUNCTION_LINEAR) { @@ -2060,68 +1937,33 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, points->x_point_at_y1_blue = 1; for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = coordinates_x[i].x; - points->green[i] = coordinates_x[i].x; - points->blue[i] = coordinates_x[i].x; + rgb_regamma[i].r = coordinates_x[i].x; + rgb_regamma[i].g = coordinates_x[i].x; + rgb_regamma[i].b = coordinates_x[i].x; } + ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; points->end_exponent = 7; points->x_point_at_y1_red = 125; points->x_point_at_y1_green = 125; points->x_point_at_y1_blue = 125; - build_pq(rgb_regamma, MAX_HW_POINTS, coordinates_x, sdr_ref_white_level); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_regamma[i].r; - points->green[i] = rgb_regamma[i].g; - points->blue[i] = rgb_regamma[i].b; - } - ret = true; - - kvfree(rgb_regamma); - } else if (trans == TRANSFER_FUNCTION_SRGB || - trans == TRANSFER_FUNCTION_BT709 || - trans == TRANSFER_FUNCTION_GAMMA22 || - trans == TRANSFER_FUNCTION_GAMMA24 || - trans == TRANSFER_FUNCTION_GAMMA26) { - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; - points->end_exponent = 0; - points->x_point_at_y1_red = 1; - points->x_point_at_y1_green = 1; - points->x_point_at_y1_blue = 1; - build_regamma(rgb_regamma, + ret = true; + } else if (trans == TRANSFER_FUNCTION_GAMMA22 && + fs_params != NULL && fs_params->skip_tm == 0) { + build_freesync_hdr(rgb_regamma, MAX_HW_POINTS, coordinates_x, - trans); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_regamma[i].r; - points->green[i] = rgb_regamma[i].g; - points->blue[i] = rgb_regamma[i].b; - } - ret = true; + fs_params); - kvfree(rgb_regamma); + ret = true; } else if (trans == TRANSFER_FUNCTION_HLG) { - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, - sizeof(*rgb_regamma), - GFP_KERNEL); - if (!rgb_regamma) - goto rgb_regamma_alloc_fail; points->end_exponent = 4; points->x_point_at_y1_red = 12; points->x_point_at_y1_green = 12; @@ -2131,18 +1973,127 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, MAX_HW_POINTS, coordinates_x, 80, 1000); - for (i = 0; i <= MAX_HW_POINTS ; i++) { - points->red[i] = rgb_regamma[i].r; - points->green[i] = rgb_regamma[i].g; - points->blue[i] = rgb_regamma[i].b; - } + + ret = true; + } else { + // trans == TRANSFER_FUNCTION_SRGB + // trans == TRANSFER_FUNCTION_BT709 + // trans == TRANSFER_FUNCTION_GAMMA22 + // trans == TRANSFER_FUNCTION_GAMMA24 + // trans == TRANSFER_FUNCTION_GAMMA26 + points->end_exponent = 0; + points->x_point_at_y1_red = 1; + points->x_point_at_y1_green = 1; + points->x_point_at_y1_blue = 1; + + build_regamma(rgb_regamma, + MAX_HW_POINTS, + coordinates_x, + trans); + ret = true; - kvfree(rgb_regamma); } -rgb_regamma_alloc_fail: + return ret; } +bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, + const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, + const struct freesync_hdr_tf_params *fs_params) +{ + struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; + struct dividers dividers; + + struct pwl_float_data *rgb_user = NULL; + struct pwl_float_data_ex *rgb_regamma = NULL; + struct gamma_pixel *axis_x = NULL; + struct pixel_gamma_point *coeff = NULL; + enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; + bool ret = false; + + if (output_tf->type == TF_TYPE_BYPASS) + return false; + + /* we can use hardcoded curve for plain SRGB TF */ + if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && + output_tf->tf == TRANSFER_FUNCTION_SRGB) { + if (ramp == NULL) + return true; + if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || + (!mapUserRamp && ramp->type == GAMMA_RGB_256)) + return true; + } + + output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + + if (ramp && ramp->type != GAMMA_CS_TFM_1D && + (mapUserRamp || ramp->type != GAMMA_RGB_256)) { + rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, + sizeof(*rgb_user), + GFP_KERNEL); + if (!rgb_user) + goto rgb_user_alloc_fail; + + axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x), + GFP_KERNEL); + if (!axis_x) + goto axis_x_alloc_fail; + + dividers.divider1 = dc_fixpt_from_fraction(3, 2); + dividers.divider2 = dc_fixpt_from_int(2); + dividers.divider3 = dc_fixpt_from_fraction(5, 2); + + build_evenly_distributed_points( + axis_x, + ramp->num_entries, + dividers); + + if (ramp->type == GAMMA_RGB_256 && mapUserRamp) + scale_gamma(rgb_user, ramp, dividers); + else if (ramp->type == GAMMA_RGB_FLOAT_1024) + scale_gamma_dx(rgb_user, ramp, dividers); + } + + rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, + sizeof(*rgb_regamma), + GFP_KERNEL); + if (!rgb_regamma) + goto rgb_regamma_alloc_fail; + + coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), + GFP_KERNEL); + if (!coeff) + goto coeff_alloc_fail; + + tf = output_tf->tf; + + ret = calculate_curve(tf, + tf_pts, + rgb_regamma, + fs_params, + output_tf->sdr_ref_white_level); + + if (ret) { + map_regamma_hw_to_x_user(ramp, coeff, rgb_user, + coordinates_x, axis_x, rgb_regamma, + MAX_HW_POINTS, tf_pts, + (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && + (ramp && ramp->type != GAMMA_CS_TFM_1D)); + + if (ramp && ramp->type == GAMMA_CS_TFM_1D) + apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); + } + + kvfree(coeff); +coeff_alloc_fail: + kvfree(rgb_regamma); +rgb_regamma_alloc_fail: + kvfree(axis_x); +axis_x_alloc_fail: + kvfree(rgb_user); +rgb_user_alloc_fail: + return ret; +} bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points) diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index 44ddea58523a..9994817a9a03 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -103,10 +103,6 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, const struct dc_gamma *ramp, bool mapUserRamp); -bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, - struct dc_transfer_func_distributed_points *points, - uint32_t sdr_ref_white_level); - bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points); diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 6e5ecefe7d9d..b9992ebf77a6 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -381,7 +381,7 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync, bool update = false; unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; - //Compute the exit refresh rate and exit frame duration + /* Compute the exit refresh rate and exit frame duration */ unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us) + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ)); unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index f98d3d9ecb6d..af78e4f1be68 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -63,7 +63,7 @@ struct mod_hdcp_transition_input_hdcp1 { uint8_t hdcp_capable_dp; uint8_t binfo_read_dp; uint8_t r0p_available_dp; - uint8_t link_integiry_check; + uint8_t link_integrity_check; uint8_t reauth_request_check; uint8_t stream_encryption_dp; }; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 04845e43df15..37670db64855 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -283,8 +283,8 @@ static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp, hdcp, "bstatus_read")) goto out; if (!mod_hdcp_execute_and_set(check_link_integrity_dp, - &input->link_integiry_check, &status, - hdcp, "link_integiry_check")) + &input->link_integrity_check, &status, + hdcp, "link_integrity_check")) goto out; if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, &input->reauth_request_check, &status, @@ -431,8 +431,8 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp, hdcp, "bstatus_read")) goto out; if (!mod_hdcp_execute_and_set(check_link_integrity_dp, - &input->link_integiry_check, &status, - hdcp, "link_integiry_check")) + &input->link_integrity_check, &status, + hdcp, "link_integrity_check")) goto out; if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, &input->reauth_request_check, &status, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c index 21ebc62bb9d9..76edcbe51f71 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -241,7 +241,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, } break; case D1_A4_AUTHENTICATED: - if (input->link_integiry_check != PASS || + if (input->link_integrity_check != PASS || input->reauth_request_check != PASS) { /* 1A-07: restart hdcp on a link integrity failure */ fail_and_restart_in_ms(0, &status, output); @@ -249,7 +249,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, } break; case D1_A6_WAIT_FOR_READY: - if (input->link_integiry_check == FAIL || + if (input->link_integrity_check == FAIL || input->reauth_request_check == FAIL) { fail_and_restart_in_ms(0, &status, output); break; diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h index 87c84691b5be..bb2c9c7a18df 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h @@ -71,4 +71,7 @@ #define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3 0x1d098UL #define smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3 0x1d09cUL +#define smnDF_CS_UMC_AON0_DramBaseAddress0 0x1c110UL +#define smnDF_CS_UMC_AON0_DramLimitAddress0 0x1c114UL + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h index 65e9f756e86e..7afa87c7ff54 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h @@ -53,4 +53,12 @@ #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L +//DF_CS_UMC_AON0_DramLimitAddress0 +#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID__SHIFT 0x0 +#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO__SHIFT 0xa +#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr__SHIFT 0xc +#define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID_MASK 0x000003FFL +#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK 0x00000400L +#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK 0xFFFFF000L + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h index c9e3f6d849a8..ea316d8dcb37 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h @@ -2060,7 +2060,8 @@ // addressBlock: gc_sqdec //SQ_CONFIG -#define SQ_CONFIG__UNUSED__SHIFT 0x0 +#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT__SHIFT 0x0 +#define SQ_CONFIG__UNUSED__SHIFT 0x1 #define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT 0x7 #define SQ_CONFIG__DEBUG_EN__SHIFT 0x8 #define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT 0x9 @@ -2079,7 +2080,8 @@ #define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT 0x1d #define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT 0x1e #define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT 0x1f -#define SQ_CONFIG__UNUSED_MASK 0x0000007FL +#define SQ_CONFIG__DISABLE_BARRIER_WAITCNT_MASK 0x00000001L +#define SQ_CONFIG__UNUSED_MASK 0x0000007EL #define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK 0x00000080L #define SQ_CONFIG__DEBUG_EN_MASK 0x00000100L #define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK 0x00000200L diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h new file mode 100644 index 000000000000..f41556abfbbc --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h @@ -0,0 +1,264 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _gc_9_4_1_OFFSET_HEADER +#define _gc_9_4_1_OFFSET_HEADER + +// addressBlock: gc_grbmdec +// base address: 0x8000 +#define mmGRBM_CNTL 0x0000 +#define mmGRBM_CNTL_BASE_IDX 0 +#define mmGRBM_SKEW_CNTL 0x0001 +#define mmGRBM_SKEW_CNTL_BASE_IDX 0 +#define mmGRBM_STATUS2 0x0002 +#define mmGRBM_STATUS2_BASE_IDX 0 +#define mmGRBM_PWR_CNTL 0x0003 +#define mmGRBM_PWR_CNTL_BASE_IDX 0 +#define mmGRBM_STATUS 0x0004 +#define mmGRBM_STATUS_BASE_IDX 0 +#define mmGRBM_STATUS_SE0 0x0005 +#define mmGRBM_STATUS_SE0_BASE_IDX 0 +#define mmGRBM_STATUS_SE1 0x0006 +#define mmGRBM_STATUS_SE1_BASE_IDX 0 +#define mmGRBM_SOFT_RESET 0x0008 +#define mmGRBM_SOFT_RESET_BASE_IDX 0 +#define mmGRBM_GFX_CLKEN_CNTL 0x000c +#define mmGRBM_GFX_CLKEN_CNTL_BASE_IDX 0 +#define mmGRBM_WAIT_IDLE_CLOCKS 0x000d +#define mmGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0 +#define mmGRBM_STATUS_SE2 0x000e +#define mmGRBM_STATUS_SE2_BASE_IDX 0 +#define mmGRBM_STATUS_SE3 0x000f +#define mmGRBM_STATUS_SE3_BASE_IDX 0 +#define mmGRBM_READ_ERROR 0x0016 +#define mmGRBM_READ_ERROR_BASE_IDX 0 +#define mmGRBM_READ_ERROR2 0x0017 +#define mmGRBM_READ_ERROR2_BASE_IDX 0 +#define mmGRBM_INT_CNTL 0x0018 +#define mmGRBM_INT_CNTL_BASE_IDX 0 +#define mmGRBM_TRAP_OP 0x0019 +#define mmGRBM_TRAP_OP_BASE_IDX 0 +#define mmGRBM_TRAP_ADDR 0x001a +#define mmGRBM_TRAP_ADDR_BASE_IDX 0 +#define mmGRBM_TRAP_ADDR_MSK 0x001b +#define mmGRBM_TRAP_ADDR_MSK_BASE_IDX 0 +#define mmGRBM_TRAP_WD 0x001c +#define mmGRBM_TRAP_WD_BASE_IDX 0 +#define mmGRBM_TRAP_WD_MSK 0x001d +#define mmGRBM_TRAP_WD_MSK_BASE_IDX 0 +#define mmGRBM_DSM_BYPASS 0x001e +#define mmGRBM_DSM_BYPASS_BASE_IDX 0 +#define mmGRBM_WRITE_ERROR 0x001f +#define mmGRBM_WRITE_ERROR_BASE_IDX 0 +#define mmGRBM_IOV_ERROR 0x0020 +#define mmGRBM_IOV_ERROR_BASE_IDX 0 +#define mmGRBM_CHIP_REVISION 0x0021 +#define mmGRBM_CHIP_REVISION_BASE_IDX 0 +#define mmGRBM_GFX_CNTL 0x0022 +#define mmGRBM_GFX_CNTL_BASE_IDX 0 +#define mmGRBM_RSMU_CFG 0x0023 +#define mmGRBM_RSMU_CFG_BASE_IDX 0 +#define mmGRBM_IH_CREDIT 0x0024 +#define mmGRBM_IH_CREDIT_BASE_IDX 0 +#define mmGRBM_PWR_CNTL2 0x0025 +#define mmGRBM_PWR_CNTL2_BASE_IDX 0 +#define mmGRBM_UTCL2_INVAL_RANGE_START 0x0026 +#define mmGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0 +#define mmGRBM_UTCL2_INVAL_RANGE_END 0x0027 +#define mmGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0 +#define mmGRBM_RSMU_READ_ERROR 0x0028 +#define mmGRBM_RSMU_READ_ERROR_BASE_IDX 0 +#define mmGRBM_CHICKEN_BITS 0x0029 +#define mmGRBM_CHICKEN_BITS_BASE_IDX 0 +#define mmGRBM_FENCE_RANGE0 0x002a +#define mmGRBM_FENCE_RANGE0_BASE_IDX 0 +#define mmGRBM_FENCE_RANGE1 0x002b +#define mmGRBM_FENCE_RANGE1_BASE_IDX 0 +#define mmGRBM_NOWHERE 0x003f +#define mmGRBM_NOWHERE_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG0 0x0040 +#define mmGRBM_SCRATCH_REG0_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG1 0x0041 +#define mmGRBM_SCRATCH_REG1_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG2 0x0042 +#define mmGRBM_SCRATCH_REG2_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG3 0x0043 +#define mmGRBM_SCRATCH_REG3_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG4 0x0044 +#define mmGRBM_SCRATCH_REG4_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG5 0x0045 +#define mmGRBM_SCRATCH_REG5_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG6 0x0046 +#define mmGRBM_SCRATCH_REG6_BASE_IDX 0 +#define mmGRBM_SCRATCH_REG7 0x0047 +#define mmGRBM_SCRATCH_REG7_BASE_IDX 0 + +// addressBlock: gc_cppdec2 +// base address: 0xc600 +#define mmCPF_EDC_TAG_CNT 0x1189 +#define mmCPF_EDC_TAG_CNT_BASE_IDX 0 +#define mmCPF_EDC_ROQ_CNT 0x118a +#define mmCPF_EDC_ROQ_CNT_BASE_IDX 0 +#define mmCPG_EDC_TAG_CNT 0x118b +#define mmCPG_EDC_TAG_CNT_BASE_IDX 0 +#define mmCPG_EDC_DMA_CNT 0x118d +#define mmCPG_EDC_DMA_CNT_BASE_IDX 0 +#define mmCPC_EDC_SCRATCH_CNT 0x118e +#define mmCPC_EDC_SCRATCH_CNT_BASE_IDX 0 +#define mmCPC_EDC_UCODE_CNT 0x118f +#define mmCPC_EDC_UCODE_CNT_BASE_IDX 0 +#define mmDC_EDC_STATE_CNT 0x1191 +#define mmDC_EDC_STATE_CNT_BASE_IDX 0 +#define mmDC_EDC_CSINVOC_CNT 0x1192 +#define mmDC_EDC_CSINVOC_CNT_BASE_IDX 0 +#define mmDC_EDC_RESTORE_CNT 0x1193 +#define mmDC_EDC_RESTORE_CNT_BASE_IDX 0 + +// addressBlock: gc_gdsdec +// base address: 0x9700 +#define mmGDS_EDC_CNT 0x05c5 +#define mmGDS_EDC_CNT_BASE_IDX 0 +#define mmGDS_EDC_GRBM_CNT 0x05c6 +#define mmGDS_EDC_GRBM_CNT_BASE_IDX 0 +#define mmGDS_EDC_OA_DED 0x05c7 +#define mmGDS_EDC_OA_DED_BASE_IDX 0 +#define mmGDS_EDC_OA_PHY_CNT 0x05cb +#define mmGDS_EDC_OA_PHY_CNT_BASE_IDX 0 +#define mmGDS_EDC_OA_PIPE_CNT 0x05cc +#define mmGDS_EDC_OA_PIPE_CNT_BASE_IDX 0 + +// addressBlock: gc_shsdec +// base address: 0x9000 +#define mmSPI_EDC_CNT 0x0445 +#define mmSPI_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_sqdec +// base address: 0x8c00 +#define mmSQC_EDC_CNT2 0x032c +#define mmSQC_EDC_CNT2_BASE_IDX 0 +#define mmSQC_EDC_CNT3 0x032d +#define mmSQC_EDC_CNT3_BASE_IDX 0 +#define mmSQC_EDC_PARITY_CNT3 0x032e +#define mmSQC_EDC_PARITY_CNT3_BASE_IDX 0 +#define mmSQC_EDC_CNT 0x03a2 +#define mmSQC_EDC_CNT_BASE_IDX 0 +#define mmSQ_EDC_SEC_CNT 0x03a3 +#define mmSQ_EDC_SEC_CNT_BASE_IDX 0 +#define mmSQ_EDC_DED_CNT 0x03a4 +#define mmSQ_EDC_DED_CNT_BASE_IDX 0 +#define mmSQ_EDC_INFO 0x03a5 +#define mmSQ_EDC_INFO_BASE_IDX 0 +#define mmSQ_EDC_CNT 0x03a6 +#define mmSQ_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_tpdec +// base address: 0x9400 +#define mmTA_EDC_CNT 0x0586 +#define mmTA_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_tcdec +// base address: 0xac00 +#define mmTCP_EDC_CNT 0x0b17 +#define mmTCP_EDC_CNT_BASE_IDX 0 +#define mmTCP_EDC_CNT_NEW 0x0b18 +#define mmTCP_EDC_CNT_NEW_BASE_IDX 0 +#define mmTCP_ATC_EDC_GATCL1_CNT 0x12b1 +#define mmTCP_ATC_EDC_GATCL1_CNT_BASE_IDX 0 +#define mmTCI_EDC_CNT 0x0b60 +#define mmTCI_EDC_CNT_BASE_IDX 0 +#define mmTCC_EDC_CNT 0x0b82 +#define mmTCC_EDC_CNT_BASE_IDX 0 +#define mmTCC_EDC_CNT2 0x0b83 +#define mmTCC_EDC_CNT2_BASE_IDX 0 +#define mmTCA_EDC_CNT 0x0bc5 +#define mmTCA_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_tpdec +// base address: 0x9400 +#define mmTD_EDC_CNT 0x052e +#define mmTD_EDC_CNT_BASE_IDX 0 +#define mmTA_EDC_CNT 0x0586 +#define mmTA_EDC_CNT_BASE_IDX 0 + +// addressBlock: gc_ea_gceadec2 +// base address: 0x9c00 +#define mmGCEA_EDC_CNT 0x0706 +#define mmGCEA_EDC_CNT_BASE_IDX 0 +#define mmGCEA_EDC_CNT2 0x0707 +#define mmGCEA_EDC_CNT2_BASE_IDX 0 +#define mmGCEA_EDC_CNT3 0x071b +#define mmGCEA_EDC_CNT3_BASE_IDX 0 + +// addressBlock: gc_gfxudec +// base address: 0x30000 +#define mmSCRATCH_REG0 0x2040 +#define mmSCRATCH_REG0_BASE_IDX 1 +#define mmSCRATCH_REG1 0x2041 +#define mmSCRATCH_REG1_BASE_IDX 1 +#define mmSCRATCH_REG2 0x2042 +#define mmSCRATCH_REG2_BASE_IDX 1 +#define mmSCRATCH_REG3 0x2043 +#define mmSCRATCH_REG3_BASE_IDX 1 +#define mmSCRATCH_REG4 0x2044 +#define mmSCRATCH_REG4_BASE_IDX 1 +#define mmSCRATCH_REG5 0x2045 +#define mmSCRATCH_REG5_BASE_IDX 1 +#define mmSCRATCH_REG6 0x2046 +#define mmSCRATCH_REG6_BASE_IDX 1 +#define mmSCRATCH_REG7 0x2047 +#define mmSCRATCH_REG7_BASE_IDX 1 +#define mmGRBM_GFX_INDEX 0x2200 +#define mmGRBM_GFX_INDEX_BASE_IDX 1 + +// addressBlock: gc_utcl2_atcl2dec +// base address: 0xa000 +#define mmATC_L2_CACHE_4K_DSM_INDEX 0x080e +#define mmATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_DSM_INDEX 0x080f +#define mmATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_4K_DSM_CNTL 0x0810 +#define mmATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_DSM_CNTL 0x0811 +#define mmATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 0 + +// addressBlock: gc_utcl2_vml2pfdec +// base address: 0xa100 +#define mmVML2_MEM_ECC_INDEX 0x0860 +#define mmVML2_MEM_ECC_INDEX_BASE_IDX 0 +#define mmVML2_WALKER_MEM_ECC_INDEX 0x0861 +#define mmVML2_WALKER_MEM_ECC_INDEX_BASE_IDX 0 +#define mmUTCL2_MEM_ECC_INDEX 0x0862 +#define mmUTCL2_MEM_ECC_INDEX_BASE_IDX 0 + +#define mmVML2_MEM_ECC_CNTL 0x0863 +#define mmVML2_MEM_ECC_CNTL_BASE_IDX 0 +#define mmVML2_WALKER_MEM_ECC_CNTL 0x0864 +#define mmVML2_WALKER_MEM_ECC_CNTL_BASE_IDX 0 +#define mmUTCL2_MEM_ECC_CNTL 0x0865 +#define mmUTCL2_MEM_ECC_CNTL_BASE_IDX 0 + +// addressBlock: gc_rlcpdec +// base address: 0x3b000 +#define mmRLC_EDC_CNT 0x4d40 +#define mmRLC_EDC_CNT_BASE_IDX 1 +#define mmRLC_EDC_CNT2 0x4d41 +#define mmRLC_EDC_CNT2_BASE_IDX 1 + +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h new file mode 100644 index 000000000000..f26246a600c6 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h @@ -0,0 +1,748 @@ +/* + * Copyright (C) 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _gc_9_4_1_SH_MASK_HEADER +#define _gc_9_4_1_SH_MASK_HEADER + +// addressBlock: gc_cppdec2 +//CPF_EDC_TAG_CNT +#define CPF_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0 +#define CPF_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2 +#define CPF_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L +#define CPF_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL +//CPF_EDC_ROQ_CNT +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2__SHIFT 0x4 +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2__SHIFT 0x6 +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +#define CPF_EDC_ROQ_CNT__DED_COUNT_ME2_MASK 0x00000030L +#define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2_MASK 0x000000C0L +//CPG_EDC_TAG_CNT +#define CPG_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0 +#define CPG_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2 +#define CPG_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L +#define CPG_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL +//CPG_EDC_DMA_CNT +#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT__SHIFT 0x0 +#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT__SHIFT 0x2 +#define CPG_EDC_DMA_CNT__TAG_DED_COUNT__SHIFT 0x4 +#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT__SHIFT 0x6 +#define CPG_EDC_DMA_CNT__ROQ_DED_COUNT_MASK 0x00000003L +#define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT_MASK 0x0000000CL +#define CPG_EDC_DMA_CNT__TAG_DED_COUNT_MASK 0x00000030L +#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT_MASK 0x000000C0L +//CPC_EDC_SCRATCH_CNT +#define CPC_EDC_SCRATCH_CNT__DED_COUNT__SHIFT 0x0 +#define CPC_EDC_SCRATCH_CNT__SEC_COUNT__SHIFT 0x2 +#define CPC_EDC_SCRATCH_CNT__DED_COUNT_MASK 0x00000003L +#define CPC_EDC_SCRATCH_CNT__SEC_COUNT_MASK 0x0000000CL +//CPC_EDC_UCODE_CNT +#define CPC_EDC_UCODE_CNT__DED_COUNT__SHIFT 0x0 +#define CPC_EDC_UCODE_CNT__SEC_COUNT__SHIFT 0x2 +#define CPC_EDC_UCODE_CNT__DED_COUNT_MASK 0x00000003L +#define CPC_EDC_UCODE_CNT__SEC_COUNT_MASK 0x0000000CL +//DC_EDC_STATE_CNT +#define DC_EDC_STATE_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define DC_EDC_STATE_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define DC_EDC_STATE_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define DC_EDC_STATE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +//DC_EDC_CSINVOC_CNT +#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1__SHIFT 0x4 +#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1__SHIFT 0x6 +#define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +#define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1_MASK 0x00000030L +#define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L +//DC_EDC_RESTORE_CNT +#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1__SHIFT 0x0 +#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1__SHIFT 0x2 +#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1__SHIFT 0x4 +#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1__SHIFT 0x6 +#define DC_EDC_RESTORE_CNT__DED_COUNT_ME1_MASK 0x00000003L +#define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL +#define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1_MASK 0x00000030L +#define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L + +// addressBlock: gc_gdsdec +//GDS_EDC_CNT +#define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0 +#define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4 +#define GDS_EDC_CNT__UNUSED__SHIFT 0x6 +#define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L +#define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L +#define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L +//GDS_EDC_GRBM_CNT +#define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0 +#define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2 +#define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4 +#define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L +#define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL +#define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L +//GDS_EDC_OA_DED +#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0 +#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1 +#define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2 +#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3 +#define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4 +#define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5 +#define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6 +#define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7 +#define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8 +#define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9 +#define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa +#define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb +#define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xc +#define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L +#define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L +#define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L +#define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L +#define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L +#define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L +#define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L +#define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L +#define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L +#define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L +#define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L +#define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L +#define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFF000L +//GDS_EDC_OA_PHY_CNT +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0 +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2 +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4 +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6 +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC__SHIFT 0x8 +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED__SHIFT 0xa +#define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xc +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L +#define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L +#define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC_MASK 0x00000300L +#define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED_MASK 0x00000C00L +#define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFF000L +//GDS_EDC_OA_PIPE_CNT +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe +#define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10 +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L +#define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L +#define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L + +// addressBlock: gc_shsdec +//SPI_EDC_CNT +#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT__SHIFT 0x0 +#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT__SHIFT 0x2 +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT__SHIFT 0x4 +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT__SHIFT 0x6 +#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT__SHIFT 0x8 +#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT__SHIFT 0xa +#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT__SHIFT 0xc +#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT__SHIFT 0xe +#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT__SHIFT 0x10 +#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT__SHIFT 0x12 +#define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT_MASK 0x00000003L +#define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT_MASK 0x0000000CL +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT_MASK 0x00000030L +#define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT_MASK 0x000000C0L +#define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT_MASK 0x00000300L +#define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT_MASK 0x00000C00L +#define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT_MASK 0x00003000L +#define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT_MASK 0x0000C000L +#define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK 0x00030000L +#define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK 0x000C0000L + +// addressBlock: gc_sqdec +//SQC_EDC_CNT2 +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT 0xa +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT 0xe +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x10 +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT 0x12 +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK 0x0000C000L +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK 0x00030000L +#define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK 0x000C0000L +//SQC_EDC_CNT3 +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT 0xa +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT 0xe +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK 0x0000C000L +//SQC_EDC_PARITY_CNT3 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT__SHIFT 0xa +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0xe +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x10 +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x12 +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x14 +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x16 +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT__SHIFT 0x18 +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT__SHIFT 0x1a +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x1c +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x1e +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT_MASK 0x0000C000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00030000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x000C0000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x00300000L +#define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT_MASK 0x00C00000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT_MASK 0x03000000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT_MASK 0x0C000000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x30000000L +#define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT_MASK 0xC0000000L +//SQC_EDC_CNT +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x0 +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x2 +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x4 +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT 0x6 +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x8 +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT 0xa +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT 0xc +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT 0xe +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x10 +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x12 +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x14 +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT 0x16 +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x18 +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x1a +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1c +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1e +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000003L +#define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK 0x0000000CL +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK 0x00000030L +#define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK 0x000000C0L +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000300L +#define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK 0x00000C00L +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK 0x00003000L +#define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK 0x0000C000L +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00030000L +#define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK 0x000C0000L +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK 0x00300000L +#define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK 0x00C00000L +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK 0x03000000L +#define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK 0x0C000000L +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK 0x30000000L +#define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK 0xC0000000L +//SQ_EDC_SEC_CNT +#define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT 0x0 +#define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT 0x8 +#define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT 0x10 +#define SQ_EDC_SEC_CNT__LDS_SEC_MASK 0x000000FFL +#define SQ_EDC_SEC_CNT__SGPR_SEC_MASK 0x0000FF00L +#define SQ_EDC_SEC_CNT__VGPR_SEC_MASK 0x00FF0000L +//SQ_EDC_DED_CNT +#define SQ_EDC_DED_CNT__LDS_DED__SHIFT 0x0 +#define SQ_EDC_DED_CNT__SGPR_DED__SHIFT 0x8 +#define SQ_EDC_DED_CNT__VGPR_DED__SHIFT 0x10 +#define SQ_EDC_DED_CNT__LDS_DED_MASK 0x000000FFL +#define SQ_EDC_DED_CNT__SGPR_DED_MASK 0x0000FF00L +#define SQ_EDC_DED_CNT__VGPR_DED_MASK 0x00FF0000L +//SQ_EDC_INFO +#define SQ_EDC_INFO__WAVE_ID__SHIFT 0x0 +#define SQ_EDC_INFO__SIMD_ID__SHIFT 0x4 +#define SQ_EDC_INFO__SOURCE__SHIFT 0x6 +#define SQ_EDC_INFO__VM_ID__SHIFT 0x9 +#define SQ_EDC_INFO__WAVE_ID_MASK 0x0000000FL +#define SQ_EDC_INFO__SIMD_ID_MASK 0x00000030L +#define SQ_EDC_INFO__SOURCE_MASK 0x000001C0L +#define SQ_EDC_INFO__VM_ID_MASK 0x00001E00L +//SQ_EDC_CNT +#define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT 0x0 +#define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT 0x2 +#define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT 0x4 +#define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT 0x6 +#define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT 0x8 +#define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT 0xa +#define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT 0xc +#define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT 0xe +#define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT 0x10 +#define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT 0x12 +#define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT 0x14 +#define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT 0x16 +#define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT 0x18 +#define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT 0x1a +#define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK 0x00000003L +#define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK 0x0000000CL +#define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK 0x00000030L +#define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK 0x000000C0L +#define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK 0x00000300L +#define SQ_EDC_CNT__SGPR_DED_COUNT_MASK 0x00000C00L +#define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK 0x00003000L +#define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK 0x0000C000L +#define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK 0x00030000L +#define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK 0x000C0000L +#define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK 0x00300000L +#define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK 0x00C00000L +#define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK 0x03000000L +#define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK 0x0C000000L + +// addressBlock: gc_tpdec +//TA_EDC_CNT +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0 +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2 +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4 +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6 +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8 +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10 +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12 +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L + +// addressBlock: gc_tcdec +//TCP_EDC_CNT +#define TCP_EDC_CNT__SEC_COUNT__SHIFT 0x0 +#define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT 0x8 +#define TCP_EDC_CNT__DED_COUNT__SHIFT 0x10 +#define TCP_EDC_CNT__SEC_COUNT_MASK 0x000000FFL +#define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK 0x0000FF00L +#define TCP_EDC_CNT__DED_COUNT_MASK 0x00FF0000L +//TCP_EDC_CNT_NEW +#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT__SHIFT 0x0 +#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT__SHIFT 0x2 +#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT__SHIFT 0x4 +#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT__SHIFT 0x6 +#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT__SHIFT 0x8 +#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT__SHIFT 0xa +#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT__SHIFT 0xc +#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT__SHIFT 0xe +#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT__SHIFT 0x10 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT__SHIFT 0x12 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT__SHIFT 0x14 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT__SHIFT 0x16 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT__SHIFT 0x18 +#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT_MASK 0x00000003L +#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT_MASK 0x0000000CL +#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT_MASK 0x00000030L +#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT_MASK 0x000000C0L +#define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT_MASK 0x00000300L +#define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT_MASK 0x00000C00L +#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT_MASK 0x00003000L +#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT_MASK 0x0000C000L +#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT_MASK 0x00030000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT_MASK 0x000C0000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT_MASK 0x00300000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT_MASK 0x00C00000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT_MASK 0x03000000L +//TCP_ATC_EDC_GATCL1_CNT +#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT 0x0 +#define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK 0x000000FFL +//TCI_EDC_CNT +#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT__SHIFT 0x0 +#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT__SHIFT 0x2 +#define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT_MASK 0x00000003L +#define TCI_EDC_CNT__WRITE_RAM_DED_COUNT_MASK 0x0000000CL +//TCA_EDC_CNT +#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT__SHIFT 0x0 +#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT__SHIFT 0x2 +#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT__SHIFT 0x4 +#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT__SHIFT 0x6 +#define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT_MASK 0x00000003L +#define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT_MASK 0x0000000CL +#define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT_MASK 0x00000030L +#define TCA_EDC_CNT__REQ_FIFO_DED_COUNT_MASK 0x000000C0L +//TCC_EDC_CNT +#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT__SHIFT 0x0 +#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT__SHIFT 0x2 +#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT__SHIFT 0x4 +#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT__SHIFT 0x6 +#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT__SHIFT 0x8 +#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT__SHIFT 0xa +#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT__SHIFT 0xc +#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT__SHIFT 0xe +#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT__SHIFT 0x10 +#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT__SHIFT 0x12 +#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT__SHIFT 0x14 +#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT__SHIFT 0x16 +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT__SHIFT 0x18 +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT__SHIFT 0x1a +#define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT_MASK 0x00000003L +#define TCC_EDC_CNT__CACHE_DATA_DED_COUNT_MASK 0x0000000CL +#define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT_MASK 0x00000030L +#define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT_MASK 0x000000C0L +#define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT_MASK 0x00000300L +#define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT_MASK 0x00000C00L +#define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT_MASK 0x00003000L +#define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT_MASK 0x0000C000L +#define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT_MASK 0x00030000L +#define TCC_EDC_CNT__SRC_FIFO_DED_COUNT_MASK 0x000C0000L +#define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT_MASK 0x00300000L +#define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT_MASK 0x00C00000L +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT_MASK 0x03000000L +#define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT_MASK 0x0C000000L +//TCC_EDC_CNT2 +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT__SHIFT 0x0 +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT__SHIFT 0x2 +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT__SHIFT 0x4 +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT__SHIFT 0x6 +#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT__SHIFT 0x8 +#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT__SHIFT 0xa +#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT__SHIFT 0xc +#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT__SHIFT 0xe +#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT__SHIFT 0x10 +#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT__SHIFT 0x12 +#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT__SHIFT 0x14 +#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT__SHIFT 0x16 +#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT__SHIFT 0x18 +#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT__SHIFT 0x1a +#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT__SHIFT 0x1c +#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT__SHIFT 0x1e +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT_MASK 0x00000003L +#define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT_MASK 0x0000000CL +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT_MASK 0x00000030L +#define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT_MASK 0x000000C0L +#define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT_MASK 0x00000300L +#define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT_MASK 0x00000C00L +#define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT_MASK 0x00003000L +#define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT_MASK 0x0000C000L +#define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT_MASK 0x00030000L +#define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT_MASK 0x000C0000L +#define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT_MASK 0x00300000L +#define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT_MASK 0x00C00000L +#define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT_MASK 0x03000000L +#define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT_MASK 0x0C000000L +#define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT_MASK 0x30000000L +#define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT_MASK 0xC0000000L + +// addressBlock: gc_tpdec +//TD_EDC_CNT +#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT__SHIFT 0x0 +#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT__SHIFT 0x2 +#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT__SHIFT 0x4 +#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT__SHIFT 0x6 +#define TD_EDC_CNT__CS_FIFO_SEC_COUNT__SHIFT 0x8 +#define TD_EDC_CNT__CS_FIFO_DED_COUNT__SHIFT 0xa +#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT_MASK 0x00000003L +#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT_MASK 0x0000000CL +#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT_MASK 0x00000030L +#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT_MASK 0x000000C0L +#define TD_EDC_CNT__CS_FIFO_SEC_COUNT_MASK 0x00000300L +#define TD_EDC_CNT__CS_FIFO_DED_COUNT_MASK 0x00000C00L +//TA_EDC_CNT +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0 +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2 +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4 +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6 +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8 +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10 +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12 +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL +#define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L +#define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L +#define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L +#define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L +#define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L +#define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L +#define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L +#define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L + +// addressBlock: gc_ea_gceadec2 +//GCEA_EDC_CNT +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT__SHIFT 0x1e +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +#define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT_MASK 0xC0000000L +//GCEA_EDC_CNT2 +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L +//GCEA_EDC_CNT3 +#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT 0x18 +#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT 0x1a +#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT 0x1c +#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT 0x1e +#define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK 0x00C00000L +#define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK 0x03000000L +#define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK 0x0C000000L +#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L +#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L + +// addressBlock: gc_gfxudec +//GRBM_GFX_INDEX +#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0 +#define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8 +#define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10 +#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d +#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e +#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f +#define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL +#define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000FF00L +#define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L +#define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L +#define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L +#define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L + +// addressBlock: gc_utcl2_atcl2dec +//ATC_L2_CNTL +//ATC_L2_CACHE_4K_DSM_INDEX +#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0 +#define ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL +//ATC_L2_CACHE_2M_DSM_INDEX +#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0 +#define ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL +//ATC_L2_CACHE_4K_DSM_CNTL +#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd +#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf +#define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L +#define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L +//ATC_L2_CACHE_2M_DSM_CNTL +#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd +#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf +#define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L +#define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L + +// addressBlock: gc_utcl2_vml2pfdec +//VML2_MEM_ECC_INDEX +#define VML2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VML2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VML2_WALKER_MEM_ECC_INDEX +#define VML2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VML2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//UTCL2_MEM_ECC_INDEX +#define UTCL2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define UTCL2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VML2_MEM_ECC_CNTL +#define VML2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc +#define VML2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe +#define VML2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L +#define VML2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L +//VML2_WALKER_MEM_ECC_CNTL +#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc +#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe +#define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L +#define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L +//UTCL2_MEM_ECC_CNTL +#define UTCL2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc +#define UTCL2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe +#define UTCL2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L +#define UTCL2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L + +// addressBlock: gc_rlcpdec +//RLC_EDC_CNT +#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT__SHIFT 0x0 +#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT__SHIFT 0x2 +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4 +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT__SHIFT 0x6 +#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT__SHIFT 0x8 +#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT__SHIFT 0xa +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT__SHIFT 0xe +#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT__SHIFT 0x10 +#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT__SHIFT 0x12 +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14 +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT__SHIFT 0x16 +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT__SHIFT 0x18 +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT__SHIFT 0x1a +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT__SHIFT 0x1c +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT__SHIFT 0x1e +#define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT_MASK 0x00000003L +#define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT_MASK 0x0000000CL +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L +#define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L +#define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT_MASK 0x00000300L +#define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT_MASK 0x00000C00L +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L +#define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L +#define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT_MASK 0x00030000L +#define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT_MASK 0x000C0000L +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L +#define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT_MASK 0x03000000L +#define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT_MASK 0x0C000000L +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT_MASK 0x30000000L +#define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT_MASK 0xC0000000L +//RLC_EDC_CNT2 +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT__SHIFT 0x0 +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT__SHIFT 0x2 +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4 +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT__SHIFT 0x6 +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT__SHIFT 0x8 +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT__SHIFT 0xa +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT__SHIFT 0xe +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT__SHIFT 0x10 +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT__SHIFT 0x12 +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14 +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT__SHIFT 0x16 +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT__SHIFT 0x18 +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT__SHIFT 0x1a +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT__SHIFT 0x1c +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT__SHIFT 0x1e +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT_MASK 0x00000003L +#define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT_MASK 0x0000000CL +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L +#define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT_MASK 0x00000300L +#define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT_MASK 0x00000C00L +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L +#define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT_MASK 0x00030000L +#define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT_MASK 0x000C0000L +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L +#define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT_MASK 0x03000000L +#define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT_MASK 0x0C000000L +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT_MASK 0x30000000L +#define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT_MASK 0xC0000000L + +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h index 40dfbf16bd34..111a71b434e2 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h @@ -11185,6 +11185,14 @@ #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -11193,6 +11201,14 @@ #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA0_DSM_CNTL #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -14197,6 +14213,14 @@ #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -14205,6 +14229,14 @@ #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA1_DSM_CNTL #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -17209,6 +17241,14 @@ #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -17217,6 +17257,14 @@ #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA2_DSM_CNTL #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -20221,6 +20269,14 @@ #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -20229,6 +20285,14 @@ #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA3_DSM_CNTL #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -23233,6 +23297,14 @@ #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -23241,6 +23313,14 @@ #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA4_DSM_CNTL #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -34952,6 +35032,14 @@ #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -34960,6 +35048,14 @@ #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA5_DSM_CNTL #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -37964,6 +38060,14 @@ #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -37972,6 +38076,14 @@ #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA6_DSM_CNTL #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 @@ -40976,6 +41088,14 @@ #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 +#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a +#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c +#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL #define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L @@ -40984,6 +41104,14 @@ #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L +#define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L +#define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L +#define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L +#define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L //MMEA7_DSM_CNTL #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 99469479e277..99ad4ddbe12f 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -21,6 +21,7 @@ */ #include <linux/firmware.h> +#include <linux/pci.h> #include "pp_debug.h" #include "amdgpu.h" @@ -1137,6 +1138,23 @@ static int smu_smc_table_hw_init(struct smu_context *smu, ret = smu_system_features_control(smu, true); if (ret) return ret; + + if (adev->asic_type == CHIP_NAVI10) { + if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 || + adev->pdev->revision == 0xc3 || + adev->pdev->revision == 0xca || + adev->pdev->revision == 0xcb)) || + (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 || + adev->pdev->revision == 0xf4 || + adev->pdev->revision == 0xf5 || + adev->pdev->revision == 0xf6))) { + ret = smu_disable_umc_cdr_12gbps_workaround(smu); + if (ret) { + pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n"); + return ret; + } + } + } } if (adev->asic_type != CHIP_ARCTURUS) { ret = smu_notify_display_change(smu); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 4e8ab139bb3b..689072a312a7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1026,12 +1026,15 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; - clocks->data[i].latency_in_us = latency_required ? - smu10_get_mem_latency(hwmgr, - pclk_vol_table->entries[i].clk) : - 0; - clocks->num_levels++; + if (pclk_vol_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = + pclk_vol_table->entries[i].clk * 10; + clocks->data[clocks->num_levels].latency_in_us = latency_required ? + smu10_get_mem_latency(hwmgr, + pclk_vol_table->entries[i].clk) : + 0; + clocks->num_levels++; + } } return 0; @@ -1077,9 +1080,11 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, clocks->num_levels = 0; for (i = 0; i < pclk_vol_table->count; i++) { - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; - clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol; - clocks->num_levels++; + if (pclk_vol_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; + clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol; + clocks->num_levels++; + } } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index d70abada66bf..bf04cfefb283 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -720,7 +720,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; /* param1 is for corresponding std voltage */ - data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; + data->dpm_table.vddc_table.dpm_levels[i].enabled = true; } data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; @@ -730,7 +730,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) /* Initialize Vddci DPM table based on allow Mclk values */ for (i = 0; i < allowed_vdd_mclk_table->count; i++) { data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; - data->dpm_table.vddci_table.dpm_levels[i].enabled = 1; + data->dpm_table.vddci_table.dpm_levels[i].enabled = true; } data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; } @@ -744,7 +744,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) */ for (i = 0; i < allowed_vdd_mclk_table->count; i++) { data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; - data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; + data->dpm_table.mvdd_table.dpm_levels[i].enabled = true; } data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index b0591a8dda41..97b6714e83e6 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -273,6 +273,7 @@ struct smu_table_context uint8_t thermal_controller_type; void *overdrive_table; + void *boot_overdrive_table; }; struct smu_dpm_context { @@ -565,6 +566,7 @@ struct pptable_funcs { int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int (*override_pcie_parameters)(struct smu_context *smu); uint32_t (*get_pptable_power_limit)(struct smu_context *smu); + int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu); }; int smu_load_microcode(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h index d8c9b7f91fcc..a5b4df146713 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h @@ -170,6 +170,8 @@ __SMU_DUMMY_MAP(SetSoftMinJpeg), \ __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \ __SMU_DUMMY_MAP(DFCstateControl), \ + __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \ + __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \ #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h index 373861ddccd0..406bfd187ce8 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h @@ -120,7 +120,10 @@ #define PPSMC_MSG_GetVoltageByDpmOverdrive 0x45 #define PPSMC_MSG_BacoAudioD3PME 0x48 -#define PPSMC_Message_Count 0x49 +#define PPSMC_MSG_DALDisableDummyPstateChange 0x49 +#define PPSMC_MSG_DALEnableDummyPstateChange 0x4A + +#define PPSMC_Message_Count 0x4B typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 93c66c69ca28..19a9846b730e 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -119,6 +119,10 @@ static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg), MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME), MSG_MAP(ArmD3, PPSMC_MSG_ArmD3), + MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange), + MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange), + MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm), + MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive), }; static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = { @@ -737,6 +741,15 @@ static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_tabl return od_table->cap[feature]; } +static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table, + enum SMU_11_0_ODSETTING_ID setting, + uint32_t *min, uint32_t *max) +{ + if (min) + *min = od_table->min[setting]; + if (max) + *max = od_table->max[setting]; +} static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) @@ -755,6 +768,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, OverDriveTable_t *od_table = (OverDriveTable_t *)table_context->overdrive_table; struct smu_11_0_overdrive_table *od_settings = smu->od_settings; + uint32_t min_value, max_value; switch (clk_type) { case SMU_GFXCLK: @@ -843,7 +857,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) break; size += sprintf(buf + size, "OD_MCLK:\n"); - size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax); + size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax); break; case SMU_OD_VDDC_CURVE: if (!smu->od_enabled || !od_table || !od_settings) @@ -868,6 +882,55 @@ static int navi10_print_clk_levels(struct smu_context *smu, size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE); } break; + case SMU_OD_RANGE: + if (!smu->od_enabled || !od_table || !od_settings) + break; + size = sprintf(buf, "%s:\n", "OD_RANGE"); + + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) { + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN, + &min_value, NULL); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX, + NULL, &max_value); + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) { + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, + &min_value, &max_value); + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", + min_value, max_value); + } + + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) { + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", + min_value, max_value); + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3, + &min_value, &max_value); + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", + min_value, max_value); + } + + break; default: break; } @@ -949,6 +1012,8 @@ static int navi10_get_clock_by_type_with_latency(struct smu_context *smu, case SMU_GFXCLK: case SMU_DCEFCLK: case SMU_SOCCLK: + case SMU_MCLK: + case SMU_UCLK: ret = smu_get_dpm_level_count(smu, clk_type, &level_count); if (ret) return ret; @@ -1871,6 +1936,28 @@ static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_tab return 0; } +static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, + uint16_t *voltage, + uint32_t freq) +{ + uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16); + uint32_t value = 0; + int ret; + + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_GetVoltageByDpm, + param); + if (ret) { + pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); + return ret; + } + + smu_read_smc_arg(smu, &value); + *voltage = (uint16_t)value; + + return 0; +} + static int navi10_setup_od_limits(struct smu_context *smu) { struct smu_11_0_overdrive_table *overdrive_table = NULL; struct smu_11_0_powerplay_table *powerplay_table = NULL; @@ -1890,23 +1977,54 @@ static int navi10_setup_od_limits(struct smu_context *smu) { } static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) { - OverDriveTable_t *od_table; + OverDriveTable_t *od_table, *boot_od_table; int ret = 0; ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); if (ret) return ret; + od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table; + boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table; if (initialize) { ret = navi10_setup_od_limits(smu); if (ret) { pr_err("Failed to retrieve board OD limits\n"); return ret; } + if (od_table) { + if (!od_table->GfxclkVolt1) { + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, + &od_table->GfxclkVolt1, + od_table->GfxclkFreq1); + if (ret) + od_table->GfxclkVolt1 = 0; + if (boot_od_table) + boot_od_table->GfxclkVolt1 = od_table->GfxclkVolt1; + } + if (!od_table->GfxclkVolt2) { + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, + &od_table->GfxclkVolt2, + od_table->GfxclkFreq2); + if (ret) + od_table->GfxclkVolt2 = 0; + if (boot_od_table) + boot_od_table->GfxclkVolt2 = od_table->GfxclkVolt2; + } + + if (!od_table->GfxclkVolt3) { + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, + &od_table->GfxclkVolt3, + od_table->GfxclkFreq3); + if (ret) + od_table->GfxclkVolt3 = 0; + if (boot_od_table) + boot_od_table->GfxclkVolt3 = od_table->GfxclkVolt3; + } + } } - od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table; if (od_table) { navi10_dump_od_table(od_table); } @@ -2002,6 +2120,13 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL return ret; od_table->UclkFmax = input[1]; break; + case PP_OD_RESTORE_DEFAULT_TABLE: + if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) { + pr_err("Overdrive table was not initialized!\n"); + return -EINVAL; + } + memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t)); + break; case PP_OD_COMMIT_DPM_TABLE: navi10_dump_od_table(od_table); ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true); @@ -2091,6 +2216,61 @@ static int navi10_run_btc(struct smu_context *smu) return ret; } +static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable) +{ + int result = 0; + + if (!enable) + result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE); + else + result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE); + + return result; +} + +static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu) +{ + uint32_t uclk_count, uclk_min, uclk_max; + uint32_t smu_version; + int ret = 0; + + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) + return ret; + + /* This workaround is available only for 42.50 or later SMC firmwares */ + if (smu_version < 0x2A3200) + return 0; + + ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count); + if (ret) + return ret; + + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min); + if (ret) + return ret; + + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max); + if (ret) + return ret; + + /* Force UCLK out of the highest DPM */ + ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min); + if (ret) + return ret; + + /* Revert the UCLK Hardmax */ + ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max); + if (ret) + return ret; + + /* + * In this case, SMU already disabled dummy pstate during enablement + * of UCLK DPM, we have to re-enabled it. + * */ + return navi10_dummy_pstate_control(smu, true); +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -2185,6 +2365,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .od_edit_dpm_table = navi10_od_edit_dpm_table, .get_pptable_power_limit = navi10_get_pptable_power_limit, .run_btc = navi10_run_btc, + .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index 783319ec8bf9..7bd200ffcda8 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -207,4 +207,7 @@ int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \ ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0) +#define smu_disable_umc_cdr_12gbps_workaround(smu) \ + ((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0) + #endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 02f8c9cb89d9..0dc49479a7eb 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1882,6 +1882,12 @@ int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, pr_err("Failed to export overdrive table!\n"); return ret; } + if (!table_context->boot_overdrive_table) { + table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL); + if (!table_context->boot_overdrive_table) { + return -ENOMEM; + } + } } ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true); if (ret) { diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index a3915bfcce81..275dbf65f1a0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -128,20 +128,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr, if (enable) { PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", return -EINVAL); PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", return -EINVAL); } else { PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", return -EINVAL); PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", return -EINVAL); } @@ -158,13 +158,13 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", return -EINVAL); smc_features_low = smu9_get_argument(hwmgr); PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", return -EINVAL); smc_features_high = smu9_get_argument(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index 0db57fb83d30..49e5ef3e3876 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -316,20 +316,20 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr, if (enable) { PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", return ret); PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0, - "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", return ret); } else { PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", return ret); PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0, - "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", + "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", return ret); } @@ -347,12 +347,12 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", return ret); smc_features_low = vega20_get_argument(hwmgr); PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0, - "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", + "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", return ret); smc_features_high = vega20_get_argument(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 38febd5ca4da..4ad8d6c14ee5 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -1706,22 +1706,11 @@ static int vega20_set_default_od_settings(struct smu_context *smu, struct smu_table_context *table_context = &smu->smu_table; int ret; - if (initialize) { - if (table_context->overdrive_table) - return -EINVAL; - - table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL); - - if (!table_context->overdrive_table) - return -ENOMEM; - - ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, - table_context->overdrive_table, false); - if (ret) { - pr_err("Failed to export over drive table!\n"); - return ret; - } + ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); + if (ret) + return ret; + if (initialize) { ret = vega20_set_default_od8_setttings(smu); if (ret) return ret; @@ -2778,12 +2767,11 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, break; case PP_OD_RESTORE_DEFAULT_TABLE: - ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false); - if (ret) { - pr_err("Failed to export over drive table!\n"); - return ret; + if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) { + pr_err("Overdrive table was not initialized!\n"); + return -EINVAL; } - + memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t)); break; case PP_OD_COMMIT_DPM_TABLE: diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h index df8336b593f7..ff94f3f6f264 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core.h +++ b/drivers/gpu/drm/nouveau/dispnv50/core.h @@ -6,6 +6,7 @@ struct nv50_core { const struct nv50_core_func *func; struct nv50_dmac chan; + bool assign_windows; }; int nv50_core_new(struct nouveau_drm *, struct nv50_core **); @@ -18,6 +19,10 @@ struct nv50_core_func { struct nvif_device *); void (*update)(struct nv50_core *, u32 *interlock, bool ntfy); + struct { + void (*owner)(struct nv50_core *); + } wndw; + const struct nv50_head_func *head; const struct nv50_outp_func { void (*ctrl)(struct nv50_core *, int or, u32 ctrl, @@ -48,6 +53,7 @@ int core917d_new(struct nouveau_drm *, s32, struct nv50_core **); int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **); int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *); void corec37d_update(struct nv50_core *, u32 *, bool); +void corec37d_wndw_owner(struct nv50_core *); extern const struct nv50_outp_func sorc37d; int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **); diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c index 40d9b654ab8c..3b36dc8d36b2 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c @@ -25,6 +25,20 @@ #include <nouveau_bo.h> void +corec37d_wndw_owner(struct nv50_core *core) +{ + const u32 windows = 8; /*XXX*/ + u32 *push, i; + if ((push = evo_wait(&core->chan, 2 * windows))) { + for (i = 0; i < windows; i++) { + evo_mthd(push, 0x1000 + (i * 0x080), 1); + evo_data(push, i >> 1); + } + evo_kick(push, &core->chan); + } +} + +void corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy) { u32 *push; @@ -76,20 +90,18 @@ corec37d_init(struct nv50_core *core) { const u32 windows = 8; /*XXX*/ u32 *push, i; - if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) { + if ((push = evo_wait(&core->chan, 2 + 5 * windows))) { evo_mthd(push, 0x0208, 1); evo_data(push, core->chan.sync.handle); for (i = 0; i < windows; i++) { - evo_mthd(push, 0x1000 + (i * 0x080), 3); - evo_data(push, i >> 1); + evo_mthd(push, 0x1004 + (i * 0x080), 2); evo_data(push, 0x0000001f); evo_data(push, 0x00000000); evo_mthd(push, 0x1010 + (i * 0x080), 1); evo_data(push, 0x00127fff); } - evo_mthd(push, 0x0200, 1); - evo_data(push, 0x00000001); evo_kick(push, &core->chan); + core->assign_windows = true; } } @@ -99,6 +111,7 @@ corec37d = { .ntfy_init = corec37d_ntfy_init, .ntfy_wait_done = corec37d_ntfy_wait_done, .update = corec37d_update, + .wndw.owner = corec37d_wndw_owner, .head = &headc37d, .sor = &sorc37d, }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c index b606d68cda10..147adcd60937 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c @@ -27,20 +27,18 @@ corec57d_init(struct nv50_core *core) { const u32 windows = 8; /*XXX*/ u32 *push, i; - if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) { + if ((push = evo_wait(&core->chan, 2 + 5 * windows))) { evo_mthd(push, 0x0208, 1); evo_data(push, core->chan.sync.handle); for (i = 0; i < windows; i++) { - evo_mthd(push, 0x1000 + (i * 0x080), 3); - evo_data(push, i >> 1); + evo_mthd(push, 0x1004 + (i * 0x080), 2); evo_data(push, 0x0000000f); evo_data(push, 0x00000000); evo_mthd(push, 0x1010 + (i * 0x080), 1); evo_data(push, 0x00117fff); } - evo_mthd(push, 0x0200, 1); - evo_data(push, 0x00000001); evo_kick(push, &core->chan); + core->assign_windows = true; } } @@ -50,6 +48,7 @@ corec57d = { .ntfy_init = corec37d_ntfy_init, .ntfy_wait_done = corec37d_ntfy_wait_done, .update = corec37d_update, + .wndw.owner = corec37d_wndw_owner, .head = &headc57d, .sor = &sorc37d, }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 2f123082c85d..a3dc2ba19fb2 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1933,6 +1933,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) struct nouveau_drm *drm = nouveau_drm(dev); struct nv50_disp *disp = nv50_disp(dev); struct nv50_atom *atom = nv50_atom(state); + struct nv50_core *core = disp->core; struct nv50_outp_atom *outp, *outt; u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {}; int i; @@ -2051,6 +2052,21 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) } } + /* Update window->head assignment. + * + * This has to happen in an update that's not interlocked with + * any window channels to avoid hitting HW error checks. + * + *TODO: Proper handling of window ownership (Turing apparently + * supports non-fixed mappings). + */ + if (core->assign_windows) { + core->func->wndw.owner(core); + core->func->update(core, interlock, false); + core->assign_windows = false; + interlock[NV50_DISP_INTERLOCK_CORE] = 0; + } + /* Update plane(s). */ for_each_new_plane_in_state(state, plane, new_plane_state, i) { struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c index 3aa2cc3af1e2..c1032527f791 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c @@ -155,6 +155,12 @@ gv100_disp_intr_ctrl_disp(struct nv50_disp *disp) if (stat & 0x00000008) stat &= ~0x00000008; + if (stat & 0x00000080) { + u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000); + nvkm_warn(subdev, "error %08x\n", error); + stat &= ~0x00000080; + } + if (stat & 0x00000100) { unsigned long wndws = nvkm_rd32(device, 0x611858); unsigned long other = nvkm_rd32(device, 0x61185c); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 856526cb2caf..d07c7db0c815 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -127,6 +127,8 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc) DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); + msleep(10); + WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); @@ -672,7 +674,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index) { struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc; - int i; radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); if (radeon_crtc == NULL) @@ -701,12 +702,6 @@ static void radeon_crtc_init(struct drm_device *dev, int index) radeon_crtc->mode_set.num_connectors = 0; #endif - for (i = 0; i < 256; i++) { - radeon_crtc->lut_r[i] = i << 2; - radeon_crtc->lut_g[i] = i << 2; - radeon_crtc->lut_b[i] = i << 2; - } - if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) radeon_atombios_init_crtc(dev, radeon_crtc); else diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index fd470d6bf3f4..96565171d13e 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -327,7 +327,6 @@ enum radeon_flip_status { struct radeon_crtc { struct drm_crtc base; int crtc_id; - u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; bool cursor_out_of_bounds; diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index ec79e8e5ad3c..63bccd201b97 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -45,7 +45,7 @@ * @guilty: atomic_t set to 1 when a job on this queue * is found to be guilty causing a timeout * - * Note: the sched_list should have atleast one element to schedule + * Note: the sched_list should have at least one element to schedule * the entity * * Returns 0 on success or a negative error code on failure. diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index aa9e49f04988..bd268028fb3d 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1037,23 +1037,9 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, free_pages((unsigned long)virt, get_order(size)); } -static int host1x_drm_probe(struct host1x_device *dev) +static bool host1x_drm_wants_iommu(struct host1x_device *dev) { - struct drm_driver *driver = &tegra_drm_driver; struct iommu_domain *domain; - struct tegra_drm *tegra; - struct drm_device *drm; - int err; - - drm = drm_dev_alloc(driver, &dev->dev); - if (IS_ERR(drm)) - return PTR_ERR(drm); - - tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); - if (!tegra) { - err = -ENOMEM; - goto put; - } /* * If the Tegra DRM clients are backed by an IOMMU, push buffers are @@ -1082,9 +1068,38 @@ static int host1x_drm_probe(struct host1x_device *dev) * up the device tree appropriately. This is considered an problem * of integration, so care must be taken for the DT to be consistent. */ - domain = iommu_get_domain_for_dev(drm->dev->parent); + domain = iommu_get_domain_for_dev(dev->dev.parent); + + /* + * Tegra20 and Tegra30 don't support addressing memory beyond the + * 32-bit boundary, so the regular GATHER opcodes will always be + * sufficient and whether or not the host1x is attached to an IOMMU + * doesn't matter. + */ + if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32)) + return true; + + return domain != NULL; +} + +static int host1x_drm_probe(struct host1x_device *dev) +{ + struct drm_driver *driver = &tegra_drm_driver; + struct tegra_drm *tegra; + struct drm_device *drm; + int err; + + drm = drm_dev_alloc(driver, &dev->dev); + if (IS_ERR(drm)) + return PTR_ERR(drm); + + tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); + if (!tegra) { + err = -ENOMEM; + goto put; + } - if (domain && iommu_present(&platform_bus_type)) { + if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) { tegra->domain = iommu_domain_alloc(&platform_bus_type); if (!tegra->domain) { err = -ENOMEM; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 1237df157e05..623768100c6a 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -60,8 +60,16 @@ static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, /* * If we've manually mapped the buffer object through the IOMMU, make * sure to return the IOVA address of our mapping. + * + * Similarly, for buffers that have been allocated by the DMA API the + * physical address can be used for devices that are not attached to + * an IOMMU. For these devices, callers must pass a valid pointer via + * the @phys argument. + * + * Imported buffers were also already mapped at import time, so the + * existing mapping can be reused. */ - if (phys && obj->mm) { + if (phys) { *phys = obj->iova; return NULL; } diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index cadcdd9ea427..9ccfb56e9b01 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -3,6 +3,8 @@ * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved. */ +#include <linux/iommu.h> + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> @@ -107,21 +109,27 @@ const struct drm_plane_funcs tegra_plane_funcs = { static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) { + struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev); unsigned int i; int err; for (i = 0; i < state->base.fb->format->num_planes; i++) { struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + dma_addr_t phys_addr, *phys; + struct sg_table *sgt; - if (!dc->client.group) { - struct sg_table *sgt; + if (!domain || dc->client.group) + phys = &phys_addr; + else + phys = NULL; - sgt = host1x_bo_pin(dc->dev, &bo->base, NULL); - if (IS_ERR(sgt)) { - err = PTR_ERR(sgt); - goto unpin; - } + sgt = host1x_bo_pin(dc->dev, &bo->base, phys); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto unpin; + } + if (sgt) { err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); if (err == 0) { @@ -143,7 +151,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) state->iova[i] = sg_dma_address(sgt->sgl); state->sgt[i] = sgt; } else { - state->iova[i] = bo->iova; + state->iova[i] = phys_addr; } } @@ -156,9 +164,11 @@ unpin: struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); struct sg_table *sgt = state->sgt[i]; - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); - host1x_bo_unpin(dc->dev, &bo->base, sgt); + if (sgt) + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + host1x_bo_unpin(dc->dev, &bo->base, sgt); state->iova[i] = DMA_MAPPING_ERROR; state->sgt[i] = NULL; } @@ -172,17 +182,13 @@ static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state) for (i = 0; i < state->base.fb->format->num_planes; i++) { struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + struct sg_table *sgt = state->sgt[i]; - if (!dc->client.group) { - struct sg_table *sgt = state->sgt[i]; - - if (sgt) { - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, - DMA_TO_DEVICE); - host1x_bo_unpin(dc->dev, &bo->base, sgt); - } - } + if (sgt) + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + host1x_bo_unpin(dc->dev, &bo->base, sgt); state->iova[i] = DMA_MAPPING_ERROR; state->sgt[i] = NULL; } diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 41d24949478e..81226a4953c1 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -3915,6 +3915,17 @@ static int tegra_sor_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sor); pm_runtime_enable(&pdev->dev); + INIT_LIST_HEAD(&sor->client.list); + sor->client.ops = &sor_client_ops; + sor->client.dev = &pdev->dev; + + err = host1x_client_register(&sor->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to register host1x client: %d\n", + err); + goto rpm_disable; + } + /* * On Tegra210 and earlier, provide our own implementation for the * pad output clock. @@ -3922,16 +3933,17 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->clk_pad) { char *name; + name = devm_kasprintf(sor->dev, GFP_KERNEL, "sor%u_pad_clkout", + sor->index); + if (!name) { + err = -ENOMEM; + goto unregister; + } + err = host1x_client_resume(&sor->client); if (err < 0) { dev_err(sor->dev, "failed to resume: %d\n", err); - goto remove; - } - - name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index); - if (!name) { - err = -ENOMEM; - goto remove; + goto unregister; } sor->clk_pad = tegra_clk_sor_pad_register(sor, name); @@ -3940,24 +3952,17 @@ static int tegra_sor_probe(struct platform_device *pdev) if (IS_ERR(sor->clk_pad)) { err = PTR_ERR(sor->clk_pad); - dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n", - err); - goto remove; - } - - INIT_LIST_HEAD(&sor->client.list); - sor->client.ops = &sor_client_ops; - sor->client.dev = &pdev->dev; - - err = host1x_client_register(&sor->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to register host1x client: %d\n", + dev_err(sor->dev, "failed to register SOR pad clock: %d\n", err); - goto remove; + goto unregister; } return 0; +unregister: + host1x_client_unregister(&sor->client); +rpm_disable: + pm_runtime_disable(&pdev->dev); remove: if (sor->ops && sor->ops->remove) sor->ops->remove(sor); @@ -3971,8 +3976,6 @@ static int tegra_sor_remove(struct platform_device *pdev) struct tegra_sor *sor = platform_get_drvdata(pdev); int err; - pm_runtime_disable(&pdev->dev); - err = host1x_client_unregister(&sor->client); if (err < 0) { dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", @@ -3980,6 +3983,8 @@ static int tegra_sor_remove(struct platform_device *pdev) return err; } + pm_runtime_disable(&pdev->dev); + if (sor->ops && sor->ops->remove) { err = sor->ops->remove(sor); if (err < 0) |