diff options
author | Dave Airlie <airlied@redhat.com> | 2020-03-13 00:09:11 +0100 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-03-13 00:09:11 +0100 |
commit | 69ddce0970d9d1de63bed9c24eefa0814db29a5a (patch) | |
tree | 2e64e14ab5ad2448cb60dcc77a34966dfaa157ee /drivers | |
parent | Merge tag 'drm-misc-next-2020-03-09' of git://anongit.freedesktop.org/drm/drm... (diff) | |
parent | drm/amdgpu/runpm: disable runpm on Vega10 (diff) | |
download | linux-69ddce0970d9d1de63bed9c24eefa0814db29a5a.tar.xz linux-69ddce0970d9d1de63bed9c24eefa0814db29a5a.zip |
Merge tag 'amd-drm-next-5.7-2020-03-10' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.7-2020-03-10:
amdgpu:
- SR-IOV fixes
- Fix up fallout from drm load/unload callback removal
- Navi, renoir power management watermark fixes
- Refactor smu parameter handling
- Display FEC fixes
- Display DCC fixes
- HDCP fixes
- Add support for USB-C PD firmware updates
- Pollock detection fix
- Rework compute ring priority handling
- RAS fixes
- Misc cleanups
amdkfd:
- Consolidate more gfx config details in amdgpu
- Consolidate bo alloc flags
- Improve code comments
- SDMA MQD fixes
- Misc cleanups
gpu scheduler:
- Add suport for modifying the sched list
uapi:
- Clarify comments about GEM_CREATE flags that are not used by userspace.
The kernel driver has always prevented userspace from using these.
They are only used internally in the kernel driver.
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200310212748.4519-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers')
129 files changed, 2860 insertions, 1317 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 97dd9596d17a..9e90cba9d5c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -579,6 +579,7 @@ struct amdgpu_asic_funcs { /* invalidate hdp read cache */ void (*invalidate_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); + void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev); /* check if the asic needs a full reset of if soft reset will work */ bool (*need_full_reset)(struct amdgpu_device *adev); /* initialize doorbell layout for specific asic*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bc2e72a66db9..abfbe89e805e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/dma-buf.h> #include "amdgpu_xgmi.h" +#include <uapi/linux/kfd_ioctl.h> static const unsigned int compute_vmid_bitmap = 0xFF00; @@ -224,7 +225,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, - void **cpu_ptr, bool mqd_gfx9) + void **cpu_ptr, bool cp_mqd_gfx9) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; @@ -240,8 +241,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, bp.type = ttm_bo_type_kernel; bp.resv = NULL; - if (mqd_gfx9) - bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9; + if (cp_mqd_gfx9) + bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9; r = amdgpu_bo_create(adev, &bp, &bo); if (r) { @@ -501,10 +502,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, metadata_size, &metadata_flags); if (flags) { *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? - ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT; + KFD_IOC_ALLOC_MEM_FLAGS_VRAM + : KFD_IOC_ALLOC_MEM_FLAGS_GTT; if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) - *flags |= ALLOC_MEM_FLAGS_PUBLIC; + *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC; } out_put: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index b0ad3be0b03f..13feb313e9b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -242,6 +242,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config); + /* KGD2KFD callbacks */ int kgd2kfd_init(void); void kgd2kfd_exit(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 4bcc175a149d..d6549e5ea7e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -319,7 +319,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, - .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index ca91fffb8a36..4ec6d0c03201 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -42,38 +42,6 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; -#if 0 -/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but - * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu - * changes commented out related code, doing the same here for now but - * need to sync with Ken et al - */ - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); -#endif - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -805,7 +773,6 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, - .get_tile_config = amdgpu_amdkfd_get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, .get_hive_id = amdgpu_amdkfd_get_hive_id, .get_unique_id = amdgpu_amdkfd_get_unique_id, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 8f052e98a3c6..0b7e78748540 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -84,31 +84,6 @@ union TCP_WATCH_CNTL_BITS { float f32All; }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -730,7 +705,6 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = { .address_watch_get_offset = kgd_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 19a10db93d68..ccd635b812b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -41,31 +41,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -static int get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFBANK); - config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg, - MC_ARB_RAMCFG, NOOFRANKS); - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -676,6 +651,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, .set_vm_context_page_table_base = set_vm_context_page_table_base, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 7f91feff7c4f..df841c2ac5e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -48,28 +48,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; - -/* Because of REG_GET_FIELD() being used, we put this function in the - * asic specific file. - */ -int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - config->gb_addr_config = adev->gfx.config.gb_addr_config; - - config->tile_config_ptr = adev->gfx.config.tile_mode_array; - config->num_tile_configs = - ARRAY_SIZE(adev->gfx.config.tile_mode_array); - config->macro_tile_config_ptr = - adev->gfx.config.macrotile_mode_array; - config->num_macro_tile_configs = - ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); - - return 0; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -736,7 +714,6 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, - .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .get_hive_id = amdgpu_amdkfd_get_hive_id, .get_unique_id = amdgpu_amdkfd_get_unique_id, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 63d3e6683dfe..aedf67d57449 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -60,5 +60,3 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, uint8_t vmid, uint16_t *p_pasid); -int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, - struct tile_config *config); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index e1d1eed7a25f..9dff792c9290 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -29,6 +29,7 @@ #include "amdgpu_vm.h" #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" +#include <uapi/linux/kfd_ioctl.h> /* BO flag to indicate a KFD userptr BO */ #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) @@ -400,18 +401,18 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) { struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); - bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT; + bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; uint32_t mapping_flags; mapping_flags = AMDGPU_VM_PAGE_READABLE; - if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE) + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; - if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE) + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; switch (adev->asic_type) { case CHIP_ARCTURUS: - if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) { + if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { if (bo_adev == adev) mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; @@ -1160,24 +1161,24 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( /* * Check on which domain to allocate BO */ - if (flags & ALLOC_MEM_FLAGS_VRAM) { + if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; - alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ? + alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : AMDGPU_GEM_CREATE_NO_CPU_ACCESS; - } else if (flags & ALLOC_MEM_FLAGS_GTT) { + } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; - } else if (flags & ALLOC_MEM_FLAGS_USERPTR) { + } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_CPU; alloc_flags = 0; if (!offset || !*offset) return -EINVAL; user_addr = untagged_addr(*offset); - } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL | - ALLOC_MEM_FLAGS_MMIO_REMAP)) { + } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_CPU; bo_type = ttm_bo_type_sg; @@ -1198,7 +1199,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( } INIT_LIST_HEAD(&(*mem)->bo_va_list); mutex_init(&(*mem)->lock); - (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); + (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); /* Workaround for AQL queue wraparound bug. Map the same * memory twice. That means we only actually allocate half @@ -1680,10 +1681,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, INIT_LIST_HEAD(&(*mem)->bo_va_list); mutex_init(&(*mem)->lock); + (*mem)->alloc_flags = ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? - ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) | - ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE; + KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT) + | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE + | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; (*mem)->bo = amdgpu_bo_ref(bo); (*mem)->va = va; @@ -2242,3 +2245,25 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) kfree(mem); return 0; } + +/* Returns GPU-specific tiling mode information */ +int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + config->gb_addr_config = adev->gfx.config.gb_addr_config; + config->tile_config_ptr = adev->gfx.config.tile_mode_array; + config->num_tile_configs = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + config->macro_tile_config_ptr = + adev->gfx.config.macrotile_mode_array; + config->num_macro_tile_configs = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + + /* Those values are not set from GFX9 onwards */ + config->num_banks = adev->gfx.config.num_banks; + config->num_ranks = adev->gfx.config.num_ranks; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 438d10ae343b..af91627b19b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1208,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct drm_sched_entity *entity = p->entity; enum drm_sched_priority priority; - struct amdgpu_ring *ring; struct amdgpu_bo_list_entry *e; struct amdgpu_job *job; uint64_t seq; @@ -1261,9 +1260,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); - ring = to_amdgpu_ring(entity->rq->sched); - amdgpu_ring_priority_get(ring, priority); - amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 94a6c42f29ea..fa575bdc03c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, return -EACCES; } +static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) +{ + switch (prio) { + case DRM_SCHED_PRIORITY_HIGH_HW: + case DRM_SCHED_PRIORITY_KERNEL: + return AMDGPU_GFX_PIPE_PRIO_HIGH; + default: + return AMDGPU_GFX_PIPE_PRIO_NORMAL; + } +} + static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring) { struct amdgpu_device *adev = ctx->adev; struct amdgpu_ctx_entity *entity; struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; unsigned num_scheds = 0; + enum gfx_pipe_priority hw_prio; enum drm_sched_priority priority; int r; @@ -85,8 +97,9 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const num_scheds = 1; break; case AMDGPU_HW_IP_COMPUTE: - scheds = adev->gfx.compute_sched; - num_scheds = adev->gfx.num_compute_sched; + hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority); + scheds = adev->gfx.compute_prio_sched[hw_prio]; + num_scheds = adev->gfx.num_compute_sched[hw_prio]; break; case AMDGPU_HW_IP_DMA: scheds = adev->sdma.sdma_sched; @@ -502,6 +515,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, return fence; } +static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, + struct amdgpu_ctx_entity *aentity, + int hw_ip, + enum drm_sched_priority priority) +{ + struct amdgpu_device *adev = ctx->adev; + enum gfx_pipe_priority hw_prio; + struct drm_gpu_scheduler **scheds = NULL; + unsigned num_scheds; + + /* set sw priority */ + drm_sched_entity_set_priority(&aentity->entity, priority); + + /* set hw priority */ + if (hw_ip == AMDGPU_HW_IP_COMPUTE) { + hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority); + scheds = adev->gfx.compute_prio_sched[hw_prio]; + num_scheds = adev->gfx.num_compute_sched[hw_prio]; + drm_sched_entity_modify_sched(&aentity->entity, scheds, + num_scheds); + } +} + void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, enum drm_sched_priority priority) { @@ -514,13 +550,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, ctx->init_priority : ctx->override_priority; for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { - struct drm_sched_entity *entity; - if (!ctx->entities[i][j]) continue; - entity = &ctx->entities[i][j]->entity; - drm_sched_entity_set_priority(entity, ctx_prio); + amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j], + i, ctx_prio); } } } @@ -628,20 +662,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) mutex_destroy(&mgr->lock); } + +static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev) +{ + int num_compute_sched_normal = 0; + int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1; + int i; + + /* use one drm sched array, gfx.compute_sched to store both high and + * normal priority drm compute schedulers */ + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + if (!adev->gfx.compute_ring[i].has_high_prio) + adev->gfx.compute_sched[num_compute_sched_normal++] = + &adev->gfx.compute_ring[i].sched; + else + adev->gfx.compute_sched[num_compute_sched_high--] = + &adev->gfx.compute_ring[i].sched; + } + + /* compute ring only has two priority for now */ + i = AMDGPU_GFX_PIPE_PRIO_NORMAL; + adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0]; + adev->gfx.num_compute_sched[i] = num_compute_sched_normal; + + i = AMDGPU_GFX_PIPE_PRIO_HIGH; + if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) { + /* When compute has no high priority rings then use */ + /* normal priority sched array */ + adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0]; + adev->gfx.num_compute_sched[i] = num_compute_sched_normal; + } else { + adev->gfx.compute_prio_sched[i] = + &adev->gfx.compute_sched[num_compute_sched_high - 1]; + adev->gfx.num_compute_sched[i] = + adev->gfx.num_compute_rings - num_compute_sched_normal; + } +} + void amdgpu_ctx_init_sched(struct amdgpu_device *adev) { int i, j; + amdgpu_ctx_init_compute_sched(adev); for (i = 0; i < adev->gfx.num_gfx_rings; i++) { adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched; adev->gfx.num_gfx_sched++; } - for (i = 0; i < adev->gfx.num_compute_rings; i++) { - adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched; - adev->gfx.num_compute_sched++; - } - for (i = 0; i < adev->sdma.num_instances; i++) { adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched; adev->sdma.num_sdma_sched++; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 3bb74056b9d2..c573edf02afc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -992,18 +992,6 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) return 0; } -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) -{ - unsigned i; - - for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { - if (adev->debugfs_regs[i]) { - debugfs_remove(adev->debugfs_regs[i]); - adev->debugfs_regs[i] = NULL; - } - } -} - static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -1269,9 +1257,44 @@ failure: return 0; } +static int amdgpu_debugfs_sclk_set(void *data, u64 val) +{ + int ret = 0; + uint32_t max_freq, min_freq; + struct amdgpu_device *adev = (struct amdgpu_device *)data; + + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return -EINVAL; + + ret = pm_runtime_get_sync(adev->ddev->dev); + if (ret < 0) + return ret; + + if (is_support_sw_smu(adev)) { + ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true); + if (ret || val > max_freq || val < min_freq) + return -EINVAL; + ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true); + } else { + return 0; + } + + pm_runtime_mark_last_busy(adev->ddev->dev); + pm_runtime_put_autosuspend(adev->ddev->dev); + + if (ret) + return -EINVAL; + + return 0; +} + DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL, amdgpu_debugfs_ib_preempt, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL, + amdgpu_debugfs_sclk_set, "%llu\n"); + +extern void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev); int amdgpu_debugfs_init(struct amdgpu_device *adev) { int r, i; @@ -1285,6 +1308,15 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) return -EIO; } + adev->smu.debugfs_sclk = + debugfs_create_file("amdgpu_force_sclk", 0200, + adev->ddev->primary->debugfs_root, adev, + &fops_sclk_set); + if (!(adev->smu.debugfs_sclk)) { + DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); + return -EIO; + } + /* Register debugfs entries for amdgpu_ttm */ r = amdgpu_ttm_debugfs_init(adev); if (r) { @@ -1335,35 +1367,19 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) } } + amdgpu_ras_debugfs_create_all(adev); + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, ARRAY_SIZE(amdgpu_debugfs_list)); } -void amdgpu_debugfs_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring) - continue; - - amdgpu_debugfs_ring_fini(ring); - } - amdgpu_ttm_debugfs_fini(adev); - debugfs_remove(adev->debugfs_preempt); -} - #else int amdgpu_debugfs_init(struct amdgpu_device *adev) { return 0; } -void amdgpu_debugfs_fini(struct amdgpu_device *adev) { } int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index b382527e359a..de12d1101526 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -32,7 +32,6 @@ struct amdgpu_debugfs { }; int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); -void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); int amdgpu_debugfs_init(struct amdgpu_device *adev); void amdgpu_debugfs_fini(struct amdgpu_device *adev); int amdgpu_debugfs_add_files(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a3aaf2e3794c..7d4a11d7f5c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3193,6 +3193,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev) flush_delayed_work(&adev->delayed_init_work); adev->shutdown = true; + /* make sure IB test finished before entering exclusive mode + * to avoid preemption on IB test + * */ + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_request_full_gpu(adev, false); + /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ @@ -3235,7 +3241,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->rmmio = NULL; amdgpu_device_doorbell_fini(adev); - amdgpu_debugfs_regs_cleanup(adev); device_remove_file(adev->dev, &dev_attr_pcie_replay_count); if (adev->ucode_sysfs_en) amdgpu_ucode_sysfs_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e1c4c2df716b..8ea86ffdea0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1121,18 +1121,16 @@ static void amdgpu_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - struct amdgpu_device *adev = dev->dev_private; #ifdef MODULE if (THIS_MODULE->state != MODULE_STATE_GOING) #endif DRM_ERROR("Hotplug removal is not supported\n"); drm_dev_unplug(dev); - drm_dev_put(dev); - amdgpu_debugfs_fini(adev); amdgpu_driver_unload_kms(dev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); } static void @@ -1301,24 +1299,55 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_dev->dev_private; - struct drm_crtc *crtc; + /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ + int ret = 1; if (!adev->runpm) { pm_runtime_forbid(dev); return -EBUSY; } - list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { - if (crtc->enabled) { - DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); - return -EBUSY; + if (amdgpu_device_has_dc_support(adev)) { + struct drm_crtc *crtc; + + drm_modeset_lock_all(drm_dev); + + drm_for_each_crtc(crtc, drm_dev) { + if (crtc->state->active) { + ret = -EBUSY; + break; + } + } + + drm_modeset_unlock_all(drm_dev); + + } else { + struct drm_connector *list_connector; + struct drm_connector_list_iter iter; + + mutex_lock(&drm_dev->mode_config.mutex); + drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); + + drm_connector_list_iter_begin(drm_dev, &iter); + drm_for_each_connector_iter(list_connector, &iter) { + if (list_connector->dpms == DRM_MODE_DPMS_ON) { + ret = -EBUSY; + break; + } } + + drm_connector_list_iter_end(&iter); + + drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); + mutex_unlock(&drm_dev->mode_config.mutex); } + if (ret == -EBUSY) + DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); + pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); - /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ - return 1; + return ret; } long amdgpu_drm_ioctl(struct file *filp, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 7403588684b3..6b9c9193cdfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -192,6 +192,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev) return adev->gfx.mec.num_mec > 1; } +bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, + int queue) +{ + /* Policy: make queue 0 of each pipe as high priority compute queue */ + return (queue == 0); + +} + void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) { int i, queue, pipe, mec; @@ -565,7 +573,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) int r; struct ras_fs_if fs_info = { .sysfs_name = "gfx_err_count", - .debugfs_name = "gfx_err_inject", }; struct ras_ih_if ih_info = { .cb = amdgpu_gfx_process_ras_data_cb, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index ca17ffb01301..5825692d07e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -41,6 +41,15 @@ #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES +enum gfx_pipe_priority { + AMDGPU_GFX_PIPE_PRIO_NORMAL = 1, + AMDGPU_GFX_PIPE_PRIO_HIGH, + AMDGPU_GFX_PIPE_PRIO_MAX +}; + +#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0 +#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15 + struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; @@ -151,6 +160,8 @@ struct amdgpu_gfx_config { unsigned num_gpus; unsigned multi_gpu_tile_size; unsigned mc_arb_ramcfg; + unsigned num_banks; + unsigned num_ranks; unsigned gb_addr_config; unsigned num_rbs; unsigned gs_vgt_table_depth; @@ -204,6 +215,7 @@ struct amdgpu_gfx_funcs { u32 queue, u32 vmid); int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status); + void (*reset_ras_error_count) (struct amdgpu_device *adev); }; struct sq_work { @@ -278,8 +290,9 @@ struct amdgpu_gfx { uint32_t num_gfx_sched; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; + struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX]; struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS]; - uint32_t num_compute_sched; + uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX]; unsigned num_compute_rings; struct amdgpu_irq_src eop_irq; struct amdgpu_irq_src priv_reg_irq; @@ -361,6 +374,8 @@ void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, int pipe, int queue); +bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, + int queue); int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index d42be880a236..4981e443a884 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -117,12 +117,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) static void amdgpu_job_free_cb(struct drm_sched_job *s_job) { - struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); drm_sched_job_cleanup(s_job); - amdgpu_ring_priority_put(ring, s_job->s_priority); dma_fence_put(job->fence); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); @@ -143,7 +141,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f) { enum drm_sched_priority priority; - struct amdgpu_ring *ring; int r; if (!f) @@ -158,9 +155,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, priority = job->base.s_priority; drm_sched_entity_push_job(&job->base, entity); - ring = to_amdgpu_ring(entity->rq->sched); - amdgpu_ring_priority_get(ring, priority); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 6a1b62bc3dcf..fd1dc3236eca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -88,9 +88,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) goto done_free; - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_request_full_gpu(adev, false); - if (adev->runpm) { pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); @@ -175,6 +172,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) else if (amdgpu_device_supports_baco(dev) && (amdgpu_runtime_pm != 0) && (adev->asic_type >= CHIP_TOPAZ) && + (adev->asic_type != CHIP_VEGA10) && (adev->asic_type != CHIP_VEGA20) && (adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */ adev->runpm = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c index 676c48c02d77..ead3dc572ec5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -32,7 +32,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "mmhub_err_count", - .debugfs_name = "mmhub_err_inject", }; if (!adev->mmhub.ras_if) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 1cd78940cf82..e89fb35fec71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -26,6 +26,7 @@ struct amdgpu_mmhub_funcs { int (*ras_late_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); }; struct amdgpu_mmhub { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index 7d5c3a9de9ea..6201a5f4b4fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -30,7 +30,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "pcie_bif_err_count", - .debugfs_name = "pcie_bif_err_inject", }; if (!adev->nbio.ras_if) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 1791c084787d..c687f5415b3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1319,7 +1319,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) amdgpu_amdkfd_unreserve_memory_limit(abo); /* We only remove the fence if the resv has individualized. */ - WARN_ON_ONCE(bo->base.resv != &bo->base._resv); + WARN_ON_ONCE(bo->type == ttm_bo_type_kernel + && bo->base.resv != &bo->base._resv); if (bo->base.resv == &bo->base._resv) amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index d33f74100094..6d9b05e21f97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -24,6 +24,7 @@ */ #include <linux/firmware.h> +#include <linux/dma-mapping.h> #include "amdgpu.h" #include "amdgpu_psp.h" @@ -38,6 +39,9 @@ static void psp_set_funcs(struct amdgpu_device *adev); +static int psp_sysfs_init(struct amdgpu_device *adev); +static void psp_sysfs_fini(struct amdgpu_device *adev); + /* * Due to DF Cstate management centralized to PMFW, the firmware * loading sequence will be updated as below: @@ -136,6 +140,13 @@ static int psp_sw_init(void *handle) return ret; } + if (adev->asic_type == CHIP_NAVI10) { + ret= psp_sysfs_init(adev); + if (ret) { + return ret; + } + } + return 0; } @@ -152,6 +163,10 @@ static int psp_sw_fini(void *handle) release_firmware(adev->psp.ta_fw); adev->psp.ta_fw = NULL; } + + if (adev->asic_type == CHIP_NAVI10) + psp_sysfs_fini(adev); + return 0; } @@ -1816,6 +1831,97 @@ static int psp_set_powergating_state(void *handle, return 0; } +static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t fw_ver; + int ret; + + if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { + DRM_INFO("PSP block is not ready yet."); + return -EBUSY; + } + + mutex_lock(&adev->psp.mutex); + ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver); + mutex_unlock(&adev->psp.mutex); + + if (ret) { + DRM_ERROR("Failed to read USBC PD FW, err = %d", ret); + return ret; + } + + return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver); +} + +static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + void *cpu_addr; + dma_addr_t dma_addr; + int ret; + char fw_name[100]; + const struct firmware *usbc_pd_fw; + + if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) { + DRM_INFO("PSP block is not ready yet."); + return -EBUSY; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); + ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); + if (ret) + goto fail; + + /* We need contiguous physical mem to place the FW for psp to access */ + cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL); + + ret = dma_mapping_error(adev->dev, dma_addr); + if (ret) + goto rel_buf; + + memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size); + + /* + * x86 specific workaround. + * Without it the buffer is invisible in PSP. + * + * TODO Remove once PSP starts snooping CPU cache + */ +#ifdef CONFIG_X86 + clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1))); +#endif + + mutex_lock(&adev->psp.mutex); + ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr); + mutex_unlock(&adev->psp.mutex); + +rel_buf: + dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); + release_firmware(usbc_pd_fw); + +fail: + if (ret) { + DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); + return ret; + } + + return count; +} + +static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, + psp_usbc_pd_fw_sysfs_read, + psp_usbc_pd_fw_sysfs_write); + + + const struct amd_ip_funcs psp_ip_funcs = { .name = "psp", .early_init = psp_early_init, @@ -1834,6 +1940,21 @@ const struct amd_ip_funcs psp_ip_funcs = { .set_powergating_state = psp_set_powergating_state, }; +static int psp_sysfs_init(struct amdgpu_device *adev) +{ + int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw); + + if (ret) + DRM_ERROR("Failed to create USBC PD FW control file!"); + + return ret; +} + +static void psp_sysfs_fini(struct amdgpu_device *adev) +{ + device_remove_file(adev->dev, &dev_attr_usbc_pd_fw); +} + static const struct amdgpu_psp_funcs psp_funcs = { .check_fw_loading_status = psp_check_fw_loading_status, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 37fa184f27f6..297435c0c7c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -114,6 +114,8 @@ struct psp_funcs int (*mem_training)(struct psp_context *psp, uint32_t ops); uint32_t (*ring_get_wptr)(struct psp_context *psp); void (*ring_set_wptr)(struct psp_context *psp, uint32_t value); + int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr); + int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver); }; #define AMDGPU_XGMI_MAX_CONNECTED_NODES 64 @@ -351,6 +353,14 @@ struct amdgpu_psp_funcs { #define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp)) #define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value)) +#define psp_load_usbc_pd_fw(psp, dma_addr) \ + ((psp)->funcs->load_usbc_pd_fw ? \ + (psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL) + +#define psp_read_usbc_pd_fw(psp, fw_ver) \ + ((psp)->funcs->read_usbc_pd_fw ? \ + (psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL) + extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amdgpu_ip_block_version psp_v3_1_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 930633a0ed64..ce8548d5fbf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -721,6 +721,9 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, if (adev->nbio.funcs->query_ras_error_count) adev->nbio.funcs->query_ras_error_count(adev, &err_data); break; + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + amdgpu_xgmi_query_ras_error_count(adev, &err_data); + break; default: break; } @@ -1110,6 +1113,35 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, &amdgpu_ras_debugfs_ops); } +void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_manager *obj, *tmp; + struct ras_fs_if fs_info; + + /* + * it won't be called in resume path, no need to check + * suspend and gpu reset status + */ + if (!con) + return; + + amdgpu_ras_debugfs_create_ctrl_node(adev); + + list_for_each_entry_safe(obj, tmp, &con->head, node) { + if (!obj) + continue; + + if (amdgpu_ras_is_supported(adev, obj->head.block) && + (obj->attr_inuse == 1)) { + sprintf(fs_info.debugfs_name, "%s_err_inject", + ras_block_str(obj->head.block)); + fs_info.head = obj->head; + amdgpu_ras_debugfs_create(adev, &fs_info); + } + } +} + void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, struct ras_common_if *head) { @@ -1142,7 +1174,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) static int amdgpu_ras_fs_init(struct amdgpu_device *adev) { amdgpu_ras_sysfs_create_feature_node(adev); - amdgpu_ras_debugfs_create_ctrl_node(adev); return 0; } @@ -1846,8 +1877,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, goto interrupt; } - amdgpu_ras_debugfs_create(adev, fs_info); - r = amdgpu_ras_sysfs_create(adev, fs_info); if (r) goto sysfs; @@ -1856,7 +1885,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, cleanup: amdgpu_ras_sysfs_remove(adev, ras_block); sysfs: - amdgpu_ras_debugfs_remove(adev, ras_block); if (ih_info->cb) amdgpu_ras_interrupt_remove_handler(adev, ih_info); interrupt: @@ -1873,7 +1901,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev, return; amdgpu_ras_sysfs_remove(adev, ras_block); - amdgpu_ras_debugfs_remove(adev, ras_block); if (ih_info->cb) amdgpu_ras_interrupt_remove_handler(adev, ih_info); amdgpu_ras_feature_enable(adev, ras_block, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index a5fe29a9373e..55c3eceb390d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -592,6 +592,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, struct ras_fs_if *head); +void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev); + void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, struct ras_common_if *head); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 18e11b0fdc3e..a7e1d0425ed0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -151,76 +151,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) } /** - * amdgpu_ring_priority_put - restore a ring's priority - * - * @ring: amdgpu_ring structure holding the information - * @priority: target priority - * - * Release a request for executing at @priority - */ -void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - int i; - - if (!ring->funcs->set_priority) - return; - - if (atomic_dec_return(&ring->num_jobs[priority]) > 0) - return; - - /* no need to restore if the job is already at the lowest priority */ - if (priority == DRM_SCHED_PRIORITY_NORMAL) - return; - - mutex_lock(&ring->priority_mutex); - /* something higher prio is executing, no need to decay */ - if (ring->priority > priority) - goto out_unlock; - - /* decay priority to the next level with a job available */ - for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) { - if (i == DRM_SCHED_PRIORITY_NORMAL - || atomic_read(&ring->num_jobs[i])) { - ring->priority = i; - ring->funcs->set_priority(ring, i); - break; - } - } - -out_unlock: - mutex_unlock(&ring->priority_mutex); -} - -/** - * amdgpu_ring_priority_get - change the ring's priority - * - * @ring: amdgpu_ring structure holding the information - * @priority: target priority - * - * Request a ring's priority to be raised to @priority (refcounted). - */ -void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - if (!ring->funcs->set_priority) - return; - - if (atomic_inc_return(&ring->num_jobs[priority]) <= 0) - return; - - mutex_lock(&ring->priority_mutex); - if (priority <= ring->priority) - goto out_unlock; - - ring->priority = priority; - ring->funcs->set_priority(ring, priority); - -out_unlock: - mutex_unlock(&ring->priority_mutex); -} - -/** * amdgpu_ring_init - init driver ring struct. * * @adev: amdgpu_device pointer @@ -499,13 +429,6 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, return 0; } -void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) -{ -#if defined(CONFIG_DEBUG_FS) - debugfs_remove(ring->ent); -#endif -} - /** * amdgpu_ring_test_helper - tests ring and set sched readiness status * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 24caff085d00..9a443013d70d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -167,9 +167,6 @@ struct amdgpu_ring_funcs { uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask); void (*emit_tmz)(struct amdgpu_ring *ring, bool start); - /* priority functions */ - void (*set_priority) (struct amdgpu_ring *ring, - enum drm_sched_priority priority); /* Try to soft recover the ring to make the fence signal */ void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); int (*preempt_ib)(struct amdgpu_ring *ring); @@ -222,6 +219,7 @@ struct amdgpu_ring { struct mutex priority_mutex; /* protected by priority_mutex */ int priority; + bool has_high_prio; #if defined(CONFIG_DEBUG_FS) struct dentry *ent; @@ -258,10 +256,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum drm_sched_priority priority); -void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum drm_sched_priority priority); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, struct amdgpu_irq_src *irq_src, unsigned irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index d3d4707f2168..52509c254cbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -126,6 +126,7 @@ struct amdgpu_rlc_funcs { void (*stop)(struct amdgpu_device *adev); void (*reset)(struct amdgpu_device *adev); void (*start)(struct amdgpu_device *adev); + void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid); }; struct amdgpu_rlc { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 7854c053e85d..250a309e4dee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -93,7 +93,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info; struct ras_fs_if fs_info = { .sysfs_name = "sdma_err_count", - .debugfs_name = "sdma_err_inject", }; if (!ih_info) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 485335267d78..4b352206354b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -56,6 +56,7 @@ struct amdgpu_sdma_ras_funcs { void (*ras_fini)(struct amdgpu_device *adev); int (*query_ras_error_count)(struct amdgpu_device *adev, uint32_t instance, void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); }; struct amdgpu_sdma { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fe131c21e8a3..c10ae1cdc1b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1028,7 +1028,7 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; - if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) { + if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { uint64_t page_idx = 1; r = amdgpu_gart_bind(adev, gtt->offset, page_idx, @@ -1036,7 +1036,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, if (r) goto gart_bind_fail; - /* Patch mtype of the second part BO */ + /* The memory type of the first page defaults to UC. Now + * modify the memory type to NC from the second page of + * the BO onward. + */ flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); @@ -2565,13 +2568,3 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) return 0; #endif } - -void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) -{ -#if defined(CONFIG_DEBUG_FS) - unsigned i; - - for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++) - debugfs_remove(adev->mman.debugfs_entries[i]); -#endif -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 7551f3729445..bd05bbb4878d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -134,6 +134,5 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); -void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index f4d40855147b..9dd51f0d2c11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -28,7 +28,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) int r; struct ras_fs_if fs_info = { .sysfs_name = "umc_err_count", - .debugfs_name = "umc_err_inject", }; struct ras_ih_if ih_info = { .cb = amdgpu_umc_process_ras_data_cb, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index f96464e2c157..a41272fbcba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -493,14 +493,9 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; long r; - /* temporarily disable ib test for sriov */ - if (amdgpu_sriov_vf(adev)) - return 0; - r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); if (r) goto error; @@ -527,6 +522,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + r = amdgpu_ring_alloc(ring, 16); if (r) return r; @@ -656,15 +654,10 @@ err: int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { - struct amdgpu_device *adev = ring->adev; struct dma_fence *fence = NULL; struct amdgpu_bo *bo = NULL; long r; - /* temporarily disable ib test for sriov */ - if (amdgpu_sriov_vf(adev)) - return 0; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &bo, NULL, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f44c26c373a1..b6c960363d55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1080,8 +1080,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, struct dma_fence *fence = NULL; bool pasid_mapping_needed = false; unsigned patch_offset = 0; + bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL)); int r; + if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid); + if (amdgpu_vmid_had_gpu_reset(adev, id)) { gds_switch_needed = true; vm_flush_needed = true; @@ -3209,6 +3213,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) union drm_amdgpu_vm *args = data; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = filp->driver_priv; + long timeout = msecs_to_jiffies(2000); int r; switch (args->in.op) { @@ -3220,6 +3225,21 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: + if (amdgpu_sriov_runtime(adev)) + timeout = 8 * timeout; + + /* Wait vm idle to make sure the vmid set in SPM_VMID is + * not referenced anymore. + */ + r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true); + if (r) + return r; + + r = amdgpu_vm_wait_idle(&fpriv->vm, timeout); + if (r < 0) + return r; + + amdgpu_bo_unreserve(fpriv->vm.root.base.bo); amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 7a89c91f7b80..95b3327168ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -26,7 +26,12 @@ #include "amdgpu_xgmi.h" #include "amdgpu_smu.h" #include "amdgpu_ras.h" +#include "soc15.h" #include "df/df_3_6_offset.h" +#include "xgmi/xgmi_4_0_0_smn.h" +#include "xgmi/xgmi_4_0_0_sh_mask.h" +#include "wafl/wafl2_4_0_0_smn.h" +#include "wafl/wafl2_4_0_0_sh_mask.h" static DEFINE_MUTEX(xgmi_mutex); @@ -36,6 +41,109 @@ static DEFINE_MUTEX(xgmi_mutex); static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE]; static unsigned hive_count = 0; +static const int xgmi_pcs_err_status_reg_vg20[] = { + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000, +}; + +static const int wafl_pcs_err_status_reg_vg20[] = { + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000, +}; + +static const int xgmi_pcs_err_status_reg_arct[] = { + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000, + smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000, +}; + +/* same as vg20*/ +static const int wafl_pcs_err_status_reg_arct[] = { + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, + smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000, +}; + +static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = { + {"XGMI PCS DataLossErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)}, + {"XGMI PCS TrainingErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)}, + {"XGMI PCS CRCErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)}, + {"XGMI PCS BERExceededErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)}, + {"XGMI PCS TxMetaDataErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)}, + {"XGMI PCS ReplayBufParityErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)}, + {"XGMI PCS DataParityErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)}, + {"XGMI PCS ReplayFifoOverflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, + {"XGMI PCS ReplayFifoUnderflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, + {"XGMI PCS ElasticFifoOverflowErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, + {"XGMI PCS DeskewErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)}, + {"XGMI PCS DataStartupLimitErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)}, + {"XGMI PCS FCInitTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)}, + {"XGMI PCS RecoveryTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, + {"XGMI PCS ReadySerialTimeoutErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, + {"XGMI PCS ReadySerialAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, + {"XGMI PCS RecoveryAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)}, + {"XGMI PCS RecoveryRelockAttemptErr", + SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, +}; + +static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = { + {"WAFL PCS DataLossErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)}, + {"WAFL PCS TrainingErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)}, + {"WAFL PCS CRCErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)}, + {"WAFL PCS BERExceededErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)}, + {"WAFL PCS TxMetaDataErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)}, + {"WAFL PCS ReplayBufParityErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)}, + {"WAFL PCS DataParityErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)}, + {"WAFL PCS ReplayFifoOverflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)}, + {"WAFL PCS ReplayFifoUnderflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)}, + {"WAFL PCS ElasticFifoOverflowErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)}, + {"WAFL PCS DeskewErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)}, + {"WAFL PCS DataStartupLimitErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)}, + {"WAFL PCS FCInitTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)}, + {"WAFL PCS RecoveryTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)}, + {"WAFL PCS ReadySerialTimeoutErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)}, + {"WAFL PCS ReadySerialAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)}, + {"WAFL PCS RecoveryAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)}, + {"WAFL PCS RecoveryRelockAttemptErr", + SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)}, +}; + void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive) { return &hive->device_list; @@ -490,7 +598,6 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) }; struct ras_fs_if fs_info = { .sysfs_name = "xgmi_wafl_err_count", - .debugfs_name = "xgmi_wafl_err_inject", }; if (!adev->gmc.xgmi.supported || @@ -560,3 +667,99 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, return addr + dram_base_addr; } + +static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, + uint32_t value, + uint32_t *ue_count, + uint32_t *ce_count, + bool is_xgmi_pcs) +{ + int i; + int ue_cnt; + + if (is_xgmi_pcs) { + /* query xgmi pcs error status, + * only ue is supported */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) { + ue_cnt = (value & + xgmi_pcs_ras_fields[i].pcs_err_mask) >> + xgmi_pcs_ras_fields[i].pcs_err_shift; + if (ue_cnt) { + dev_info(adev->dev, "%s detected\n", + xgmi_pcs_ras_fields[i].err_name); + *ue_count += ue_cnt; + } + } + } else { + /* query wafl pcs error status, + * only ue is supported */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) { + ue_cnt = (value & + wafl_pcs_ras_fields[i].pcs_err_mask) >> + wafl_pcs_ras_fields[i].pcs_err_shift; + if (ue_cnt) { + dev_info(adev->dev, "%s detected\n", + wafl_pcs_ras_fields[i].err_name); + *ue_count += ue_cnt; + } + } + } + + return 0; +} + +int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + int i; + uint32_t data; + uint32_t ue_cnt = 0, ce_cnt = 0; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL)) + return -EINVAL; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + /* check xgmi pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) { + data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, true); + } + /* check wafl pcs error */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) { + data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, false); + } + break; + case CHIP_VEGA20: + default: + /* check xgmi pcs error */ + for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) { + data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, true); + } + /* check wafl pcs error */ + for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) { + data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]); + if (data) + amdgpu_xgmi_query_pcs_error_status(adev, + data, &ue_cnt, &ce_cnt, false); + } + break; + } + + err_data->ue_count += ue_cnt; + err_data->ce_count += ce_cnt; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 2aa61adee459..4a92067fe595 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -37,6 +37,12 @@ struct amdgpu_hive_info { struct task_barrier tb; }; +struct amdgpu_pcs_ras_field { + const char *err_name; + uint32_t pcs_err_mask; + uint32_t pcs_err_shift; +}; + struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock); int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); int amdgpu_xgmi_add_device(struct amdgpu_device *adev); @@ -48,6 +54,8 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev); void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev); uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr); +int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index dd30f4e61a8c..cae426c7c086 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; - if ((jiffies_to_msecs(cjiffies) > 5000)) { - DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); + if ((jiffies_to_msecs(cjiffies) > 10000)) { + DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); ctx->abort = true; } } else { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 7cefa230dec1..614e910643ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -35,6 +35,8 @@ #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" +#include "smuio/smuio_11_0_0_offset.h" +#include "smuio/smuio_11_0_0_sh_mask.h" #include "navi10_enum.h" #include "hdp/hdp_5_0_0_offset.h" #include "ivsrcid/gfx/irqsrcs_gfx_10_1.h" @@ -500,29 +502,28 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct dma_fence *f = NULL; - uint32_t scratch; - uint32_t tmp = 0; + unsigned index; + uint64_t gpu_addr; + uint32_t tmp; long r; - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); + r = amdgpu_device_wb_get(adev, &index); + if (r) return r; - } - - WREG32(scratch, 0xCAFEDEAD); + gpu_addr = adev->wb.gpu_addr + (index * 4); + adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); - r = amdgpu_ib_get(adev, NULL, 256, &ib); - if (r) { - DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); + r = amdgpu_ib_get(adev, NULL, 16, &ib); + if (r) goto err1; - } - ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); - ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); - ib.ptr[2] = 0xDEADBEEF; - ib.length_dw = 3; + ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); + ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; + ib.ptr[2] = lower_32_bits(gpu_addr); + ib.ptr[3] = upper_32_bits(gpu_addr); + ib.ptr[4] = 0xDEADBEEF; + ib.length_dw = 5; r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) @@ -530,15 +531,13 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = dma_fence_wait_timeout(f, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); r = -ETIMEDOUT; goto err2; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; } - tmp = RREG32(scratch); + tmp = adev->wb.wb[index]; if (tmp == 0xDEADBEEF) r = 0; else @@ -547,8 +546,7 @@ err2: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err1: - amdgpu_gfx_scratch_free(adev, scratch); - + amdgpu_device_wb_free(adev, index); return r; } @@ -1016,6 +1014,10 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1783,11 +1785,11 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev) adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ - WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); - WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO, + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO, adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); - WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); + WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); return 0; } @@ -2395,7 +2397,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].sched.ready = false; } - WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); + WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); for (i = 0; i < adev->usec_timeout; i++) { if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0) @@ -3211,6 +3213,22 @@ done: return r; } +static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + ring->has_high_prio = true; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } else { + ring->has_high_prio = false; + } + } +} + static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -3336,6 +3354,9 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; + /* set static priority for a compute queue/ring */ + gfx_v10_0_compute_mqd_set_priority(ring, mqd); + /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. */ @@ -3925,9 +3946,8 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) | + ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL); mutex_unlock(&adev->gfx.gpu_clock_mutex); amdgpu_gfx_off_ctrl(adev, true); return clock; @@ -4215,6 +4235,18 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } +static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + + WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = { .is_rlc_enabled = gfx_v10_0_is_rlc_enabled, .set_safe_mode = gfx_v10_0_set_safe_mode, @@ -4225,7 +4257,8 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = { .resume = gfx_v10_0_rlc_resume, .stop = gfx_v10_0_rlc_stop, .reset = gfx_v10_0_rlc_reset, - .start = gfx_v10_0_rlc_start + .start = gfx_v10_0_rlc_start, + .update_spm_vmid = gfx_v10_0_update_spm_vmid }; static int gfx_v10_0_set_powergating_state(void *handle, @@ -4420,7 +4453,7 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (flags & AMDGPU_IB_PREEMPTED) control |= INDIRECT_BUFFER_PRE_RESUME(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v10_0_ring_emit_de_meta(ring, (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 8f20a5dd44fe..733d398c61cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3346,6 +3346,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -3570,6 +3574,18 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) return 0; } +static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32(mmRLC_SPM_VMID); + + data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; + + WREG32(mmRLC_SPM_VMID, data); +} + static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) { u32 data, orig, tmp, tmp2; @@ -4221,7 +4237,8 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { .resume = gfx_v7_0_rlc_resume, .stop = gfx_v7_0_rlc_stop, .reset = gfx_v7_0_rlc_reset, - .start = gfx_v7_0_rlc_start + .start = gfx_v7_0_rlc_start, + .update_spm_vmid = gfx_v7_0_update_spm_vmid }; static int gfx_v7_0_early_init(void *handle) @@ -4338,6 +4355,11 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; if (adev->flags & AMD_IS_APU) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index fa245973de12..fc32586ef80b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1318,6 +1318,10 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return r; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1820,6 +1824,11 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; + adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFBANK); + adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, + MC_ARB_RAMCFG, NOOFRANKS); + adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; if (adev->flags & AMD_IS_APU) { @@ -4421,6 +4430,22 @@ static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) return r; } +static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + ring->has_high_prio = true; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } else { + ring->has_high_prio = false; + } + } +} + static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -4544,9 +4569,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) /* defaults */ mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR); mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR); - mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY); - mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY); - mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO); mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI); mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET); @@ -4558,6 +4580,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM); mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES); + /* set static priority for a queue/ring */ + gfx_v8_0_mqd_set_priority(ring, mqd); + mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); + /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. */ @@ -5589,6 +5615,18 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev) } } +static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32(mmRLC_SPM_VMID); + + data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; + + WREG32(mmRLC_SPM_VMID, data); +} + static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .is_rlc_enabled = gfx_v8_0_is_rlc_enabled, .set_safe_mode = gfx_v8_0_set_safe_mode, @@ -5600,7 +5638,8 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .resume = gfx_v8_0_rlc_resume, .stop = gfx_v8_0_rlc_stop, .reset = gfx_v8_0_rlc_reset, - .start = gfx_v8_0_rlc_start + .start = gfx_v8_0_rlc_start, + .update_spm_vmid = gfx_v8_0_update_spm_vmid }; static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, @@ -6094,7 +6133,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v8_0_ring_emit_de_meta(ring); } @@ -6236,104 +6275,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring) WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } -static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring, - bool acquire) -{ - struct amdgpu_device *adev = ring->adev; - int pipe_num, tmp, reg; - int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; - - pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; - - /* first me only has 2 entries, GFX and HP3D */ - if (ring->me > 0) - pipe_num -= 2; - - reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num; - tmp = RREG32(reg); - tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); - WREG32(reg, tmp); -} - -static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - int i, pipe; - bool reserve; - struct amdgpu_ring *iring; - - mutex_lock(&adev->gfx.pipe_reserve_mutex); - pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0); - if (acquire) - set_bit(pipe, adev->gfx.pipe_reserve_bitmap); - else - clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); - - if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { - /* Clear all reservations - everyone reacquires all resources */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) - gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], - true); - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) - gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], - true); - } else { - /* Lower all pipes without a current reservation */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { - iring = &adev->gfx.gfx_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v8_0_ring_set_pipe_percent(iring, reserve); - } - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) { - iring = &adev->gfx.compute_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v8_0_ring_set_pipe_percent(iring, reserve); - } - } - - mutex_unlock(&adev->gfx.pipe_reserve_mutex); -} - -static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - uint32_t pipe_priority = acquire ? 0x2 : 0x0; - uint32_t queue_priority = acquire ? 0xf : 0x0; - - mutex_lock(&adev->srbm_mutex); - vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - - WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority); - WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority); - - vi_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} -static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - struct amdgpu_device *adev = ring->adev; - bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; - - if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) - return; - - gfx_v8_0_hqd_set_priority(adev, ring, acquire); - gfx_v8_0_pipe_reserve_resources(adev, ring, acquire); -} - static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) @@ -6966,7 +6907,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, - .set_priority = gfx_v8_0_ring_set_priority_compute, .emit_wreg = gfx_v8_0_ring_emit_wreg, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ab6880958511..1081fa3d4b0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -738,9 +738,9 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); -static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if); +static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev); static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) @@ -1847,6 +1847,10 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) break; } + /* init spm vmid with 0xf */ + if (adev->gfx.rlc.funcs->update_spm_vmid) + adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + return 0; } @@ -1993,7 +1997,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, .ras_error_inject = &gfx_v9_0_ras_error_inject, - .query_ras_error_count = &gfx_v9_0_query_ras_error_count + .query_ras_error_count = &gfx_v9_0_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count, }; static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = { @@ -2004,7 +2009,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = { .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, .ras_error_inject = &gfx_v9_4_ras_error_inject, - .query_ras_error_count = &gfx_v9_4_query_ras_error_count + .query_ras_error_count = &gfx_v9_4_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, }; static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) @@ -3310,6 +3316,22 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring) WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp); } +static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) { + mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; + ring->has_high_prio = true; + mqd->cp_hqd_queue_priority = + AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; + } else { + ring->has_high_prio = false; + } + } +} + static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -3446,6 +3468,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); mqd->cp_hqd_ib_control = tmp; + /* set static priority for a queue/ring */ + gfx_v9_0_mqd_set_priority(ring, mqd); + mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); + /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. */ @@ -3964,6 +3990,63 @@ static int gfx_v9_0_soft_reset(void *handle) return 0; } +static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + struct amdgpu_ring *ring = &kiq->ring; + + BUG_ON(!ring->funcs->emit_rreg); + + spin_lock_irqsave(&kiq->ring_lock, flags); + amdgpu_ring_alloc(ring, 32); + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); + amdgpu_ring_write(ring, 9 | /* src: register*/ + (5 << 8) | /* dst: memory */ + (1 << 16) | /* count sel */ + (1 << 20)); /* write confirm */ + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + + kiq->reg_val_offs * 4)); + amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + + kiq->reg_val_offs * 4)); + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) + goto failed_kiq_read; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) + goto failed_kiq_read; + + return (uint64_t)adev->wb.wb[kiq->reg_val_offs] | + (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL; + +failed_kiq_read: + pr_err("failed to read gpu clock\n"); + return ~0; +} + static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -3971,16 +4054,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { - uint32_t tmp, lsb, msb, i = 0; - do { - if (i != 0) - udelay(1); - tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); - lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB); - msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); - i++; - } while (unlikely(tmp != msb) && (i < adev->usec_timeout)); - clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL); + clock = gfx_v9_0_kiq_read_clock(adev); } else { WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | @@ -4142,7 +4216,6 @@ static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = { { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2}, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6}, - { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1}, }; static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev) @@ -4343,18 +4416,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) goto fail; } - switch (adev->asic_type) - { - case CHIP_VEGA20: - gfx_v9_0_clear_ras_edc_counter(adev); - break; - case CHIP_ARCTURUS: - gfx_v9_4_clear_ras_edc_counter(adev); - break; - default: - break; - } - fail: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); @@ -4402,6 +4463,10 @@ static int gfx_v9_0_ecc_late_init(void *handle) if (r) return r; + if (adev->gfx.funcs && + adev->gfx.funcs->reset_ras_error_count) + adev->gfx.funcs->reset_ras_error_count(adev); + r = amdgpu_gfx_ras_late_init(adev); if (r) return r; @@ -4706,6 +4771,18 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, return 0; } +static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) +{ + u32 data; + + data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); + + data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; + data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; + + WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); +} + static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .is_rlc_enabled = gfx_v9_0_is_rlc_enabled, .set_safe_mode = gfx_v9_0_set_safe_mode, @@ -4717,7 +4794,8 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .resume = gfx_v9_0_rlc_resume, .stop = gfx_v9_0_rlc_stop, .reset = gfx_v9_0_rlc_reset, - .start = gfx_v9_0_rlc_start + .start = gfx_v9_0_rlc_start, + .update_spm_vmid = gfx_v9_0_update_spm_vmid }; static int gfx_v9_0_set_powergating_state(void *handle, @@ -4920,7 +4998,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { control |= INDIRECT_BUFFER_PRE_ENB(1); - if (!(ib->flags & AMDGPU_IB_FLAG_CE)) + if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) gfx_v9_0_ring_emit_de_meta(ring); } @@ -5045,105 +5123,6 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring) return wptr; } -static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring, - bool acquire) -{ - struct amdgpu_device *adev = ring->adev; - int pipe_num, tmp, reg; - int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; - - pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; - - /* first me only has 2 entries, GFX and HP3D */ - if (ring->me > 0) - pipe_num -= 2; - - reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num; - tmp = RREG32(reg); - tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); - WREG32(reg, tmp); -} - -static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - int i, pipe; - bool reserve; - struct amdgpu_ring *iring; - - mutex_lock(&adev->gfx.pipe_reserve_mutex); - pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0); - if (acquire) - set_bit(pipe, adev->gfx.pipe_reserve_bitmap); - else - clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); - - if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { - /* Clear all reservations - everyone reacquires all resources */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) - gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], - true); - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) - gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], - true); - } else { - /* Lower all pipes without a current reservation */ - for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { - iring = &adev->gfx.gfx_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v9_0_ring_set_pipe_percent(iring, reserve); - } - - for (i = 0; i < adev->gfx.num_compute_rings; ++i) { - iring = &adev->gfx.compute_ring[i]; - pipe = amdgpu_gfx_mec_queue_to_bit(adev, - iring->me, - iring->pipe, - 0); - reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); - gfx_v9_0_ring_set_pipe_percent(iring, reserve); - } - } - - mutex_unlock(&adev->gfx.pipe_reserve_mutex); -} - -static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - bool acquire) -{ - uint32_t pipe_priority = acquire ? 0x2 : 0x0; - uint32_t queue_priority = acquire ? 0xf : 0x0; - - mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - - WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority); - WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority); - - soc15_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} - -static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum drm_sched_priority priority) -{ - struct amdgpu_device *adev = ring->adev; - bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; - - if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) - return; - - gfx_v9_0_hqd_set_priority(adev, ring, acquire); - gfx_v9_0_pipe_reserve_resources(adev, ring, acquire); -} - static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -6323,7 +6302,7 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg, return 0; } -static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev) +static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev) { int i, j, k; @@ -6514,7 +6493,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .test_ib = gfx_v9_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, - .set_priority = gfx_v9_0_ring_set_priority_compute, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index f099f13d7f1e..17f1e7b69a60 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -893,7 +893,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, return 0; } -void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev) +void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) { int i, j, k; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h index 2e3f6f755ad4..1ffecc5c0f0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h @@ -32,4 +32,6 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if); +void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev); + #endif /* __GFX_V9_4_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index e0654a216ab5..cc866c367939 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -81,24 +81,31 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; - /* Disable AGP. */ - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0); - WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF); - - /* Program the system aperture low logical page number. */ - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); - - /* Set default page address. */ - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start - + adev->vm_manager.vram_base_offset; - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, - (u32)(value >> 12)); - WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, - (u32)(value >> 44)); + if (!amdgpu_sriov_vf(adev)) { + /* + * the new L1 policy will block SRIOV guest from writing + * these regs, and they will be programed at host. + * so skip programing these regs. + */ + /* Disable AGP. */ + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0); + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0); + WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR, + adev->gmc.vram_start >> 18); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + adev->gmc.vram_end >> 18); + + /* Set default page address. */ + value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + + adev->vm_manager.vram_base_offset; + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + (u32)(value >> 12)); + WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + (u32)(value >> 44)); + } /* Program "protection fault". */ WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, @@ -260,18 +267,6 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev) int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ gfxhub_v2_0_init_gart_aperture_regs(adev); gfxhub_v2_0_init_system_aperture_regs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 10171acbf3e1..6ceaab553130 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -948,6 +948,9 @@ static int gmc_v9_0_late_init(void *handle) } } + if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count) + adev->mmhub.funcs->reset_ras_error_count(adev); + r = amdgpu_gmc_ras_late_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 49a3a56ec017..396c2a624de0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -747,7 +747,19 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, err_data->ue_count += ded_count; } +static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + /* read back edc counter registers to reset the counters to 0 */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++) + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i])); + } +} + const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v1_0_query_ras_error_count, + .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index bde189680521..fb3f228458e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -72,11 +72,18 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0); WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF); - /* Program the system aperture low logical page number. */ - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, - adev->gmc.vram_start >> 18); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - adev->gmc.vram_end >> 18); + if (!amdgpu_sriov_vf(adev)) { + /* + * the new L1 policy will block SRIOV guest from writing + * these regs, and they will be programed at host. + * so skip programing these regs. + */ + /* Program the system aperture low logical page number. */ + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, + adev->gmc.vram_start >> 18); + WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, + adev->gmc.vram_end >> 18); + } /* Set default page address. */ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + @@ -247,18 +254,6 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) int mmhub_v2_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ mmhub_v2_0_init_gart_aperture_regs(adev); mmhub_v2_0_init_system_aperture_regs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index a5281df8d84f..0d413fabd015 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -1596,7 +1596,19 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev, err_data->ue_count += ded_count; } +static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev) +{ + uint32_t i; + + /* read back edc counter registers to reset the counters to 0 */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) { + for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++) + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i])); + } +} + const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v9_4_query_ras_error_count, + .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h new file mode 100644 index 000000000000..1b5086c7d4e6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h @@ -0,0 +1,338 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MMSCH_V2_0_H__ +#define __MMSCH_V2_0_H__ + +// addressBlock: uvd0_mmsch_dec +// base address: 0x1e000 +#define mmMMSCH_UCODE_ADDR 0x0000 +#define mmMMSCH_UCODE_ADDR_BASE_IDX 0 +#define mmMMSCH_UCODE_DATA 0x0001 +#define mmMMSCH_UCODE_DATA_BASE_IDX 0 +#define mmMMSCH_SRAM_ADDR 0x0002 +#define mmMMSCH_SRAM_ADDR_BASE_IDX 0 +#define mmMMSCH_SRAM_DATA 0x0003 +#define mmMMSCH_SRAM_DATA_BASE_IDX 0 +#define mmMMSCH_VF_SRAM_OFFSET 0x0004 +#define mmMMSCH_VF_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_DB_SRAM_OFFSET 0x0005 +#define mmMMSCH_DB_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_CTX_SRAM_OFFSET 0x0006 +#define mmMMSCH_CTX_SRAM_OFFSET_BASE_IDX 0 +#define mmMMSCH_CTL 0x0007 +#define mmMMSCH_CTL_BASE_IDX 0 +#define mmMMSCH_INTR 0x0008 +#define mmMMSCH_INTR_BASE_IDX 0 +#define mmMMSCH_INTR_ACK 0x0009 +#define mmMMSCH_INTR_ACK_BASE_IDX 0 +#define mmMMSCH_INTR_STATUS 0x000a +#define mmMMSCH_INTR_STATUS_BASE_IDX 0 +#define mmMMSCH_VF_VMID 0x000b +#define mmMMSCH_VF_VMID_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_LO 0x000c +#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_HI 0x000d +#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_CTX_SIZE 0x000e +#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_ADDR_LO 0x000f +#define mmMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_ADDR_HI 0x0010 +#define mmMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_GPCOM_SIZE 0x0011 +#define mmMMSCH_VF_GPCOM_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_HOST 0x0012 +#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_RESP 0x0013 +#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_0 0x0014 +#define mmMMSCH_VF_MAILBOX_0_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_0_RESP 0x0015 +#define mmMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_1 0x0016 +#define mmMMSCH_VF_MAILBOX_1_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_1_RESP 0x0017 +#define mmMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 0 +#define mmMMSCH_CNTL 0x001c +#define mmMMSCH_CNTL_BASE_IDX 0 +#define mmMMSCH_NONCACHE_OFFSET0 0x001d +#define mmMMSCH_NONCACHE_OFFSET0_BASE_IDX 0 +#define mmMMSCH_NONCACHE_SIZE0 0x001e +#define mmMMSCH_NONCACHE_SIZE0_BASE_IDX 0 +#define mmMMSCH_NONCACHE_OFFSET1 0x001f +#define mmMMSCH_NONCACHE_OFFSET1_BASE_IDX 0 +#define mmMMSCH_NONCACHE_SIZE1 0x0020 +#define mmMMSCH_NONCACHE_SIZE1_BASE_IDX 0 +#define mmMMSCH_PDEBUG_STATUS 0x0021 +#define mmMMSCH_PDEBUG_STATUS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_DATA_32UPPERBITS 0x0022 +#define mmMMSCH_PDEBUG_DATA_32UPPERBITS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_DATA_32LOWERBITS 0x0023 +#define mmMMSCH_PDEBUG_DATA_32LOWERBITS_BASE_IDX 0 +#define mmMMSCH_PDEBUG_EPC 0x0024 +#define mmMMSCH_PDEBUG_EPC_BASE_IDX 0 +#define mmMMSCH_PDEBUG_EXCCAUSE 0x0025 +#define mmMMSCH_PDEBUG_EXCCAUSE_BASE_IDX 0 +#define mmMMSCH_PROC_STATE1 0x0026 +#define mmMMSCH_PROC_STATE1_BASE_IDX 0 +#define mmMMSCH_LAST_MC_ADDR 0x0027 +#define mmMMSCH_LAST_MC_ADDR_BASE_IDX 0 +#define mmMMSCH_LAST_MEM_ACCESS_HI 0x0028 +#define mmMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 0 +#define mmMMSCH_LAST_MEM_ACCESS_LO 0x0029 +#define mmMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 0 +#define mmMMSCH_IOV_ACTIVE_FCN_ID 0x002a +#define mmMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 0 +#define mmMMSCH_SCRATCH_0 0x002b +#define mmMMSCH_SCRATCH_0_BASE_IDX 0 +#define mmMMSCH_SCRATCH_1 0x002c +#define mmMMSCH_SCRATCH_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_0 0x002d +#define mmMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_0 0x002e +#define mmMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_0 0x002f +#define mmMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0030 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0031 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0032 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_0 0x0033 +#define mmMMSCH_GPUIOV_DW6_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_0 0x0034 +#define mmMMSCH_GPUIOV_DW7_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_0 0x0035 +#define mmMMSCH_GPUIOV_DW8_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_1 0x0036 +#define mmMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_1 0x0037 +#define mmMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_1 0x0038 +#define mmMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0039 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1 0x003a +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x003b +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_1 0x003c +#define mmMMSCH_GPUIOV_DW6_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_1 0x003d +#define mmMMSCH_GPUIOV_DW7_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_1 0x003e +#define mmMMSCH_GPUIOV_DW8_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CNTXT 0x003f +#define mmMMSCH_GPUIOV_CNTXT_BASE_IDX 0 +#define mmMMSCH_SCRATCH_2 0x0040 +#define mmMMSCH_SCRATCH_2_BASE_IDX 0 +#define mmMMSCH_SCRATCH_3 0x0041 +#define mmMMSCH_SCRATCH_3_BASE_IDX 0 +#define mmMMSCH_SCRATCH_4 0x0042 +#define mmMMSCH_SCRATCH_4_BASE_IDX 0 +#define mmMMSCH_SCRATCH_5 0x0043 +#define mmMMSCH_SCRATCH_5_BASE_IDX 0 +#define mmMMSCH_SCRATCH_6 0x0044 +#define mmMMSCH_SCRATCH_6_BASE_IDX 0 +#define mmMMSCH_SCRATCH_7 0x0045 +#define mmMMSCH_SCRATCH_7_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_0 0x0046 +#define mmMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_0 0x0047 +#define mmMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_1 0x0048 +#define mmMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_1 0x0049 +#define mmMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 0 +#define mmMMSCH_NACK_STATUS 0x004a +#define mmMMSCH_NACK_STATUS_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX0_DATA 0x004b +#define mmMMSCH_VF_MAILBOX0_DATA_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX1_DATA 0x004c +#define mmMMSCH_VF_MAILBOX1_DATA_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x004d +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0 0x004e +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x004f +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0050 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0051 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0052 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CNTXT_IP 0x0053 +#define mmMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_2 0x0054 +#define mmMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_CONTROL_2 0x0055 +#define mmMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_2 0x0056 +#define mmMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0057 +#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0058 +#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0059 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW6_2 0x005a +#define mmMMSCH_GPUIOV_DW6_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW7_2 0x005b +#define mmMMSCH_GPUIOV_DW7_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_DW8_2 0x005c +#define mmMMSCH_GPUIOV_DW8_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x005d +#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2 0x005e +#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 0 +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x005f +#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_HEAD_2 0x0060 +#define mmMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 0 +#define mmMMSCH_VFID_FIFO_TAIL_2 0x0061 +#define mmMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_0 0x0062 +#define mmMMSCH_VM_BUSY_STATUS_0_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_1 0x0063 +#define mmMMSCH_VM_BUSY_STATUS_1_BASE_IDX 0 +#define mmMMSCH_VM_BUSY_STATUS_2 0x0064 +#define mmMMSCH_VM_BUSY_STATUS_2_BASE_IDX 0 + +#define MMSCH_VERSION_MAJOR 2 +#define MMSCH_VERSION_MINOR 0 +#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR) + +enum mmsch_v2_0_command_type { + MMSCH_COMMAND__DIRECT_REG_WRITE = 0, + MMSCH_COMMAND__DIRECT_REG_POLLING = 2, + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3, + MMSCH_COMMAND__INDIRECT_REG_WRITE = 8, + MMSCH_COMMAND__END = 0xf +}; + +struct mmsch_v2_0_init_header { + uint32_t version; + uint32_t header_size; + uint32_t vcn_init_status; + uint32_t vcn_table_offset; + uint32_t vcn_table_size; +}; + +struct mmsch_v2_0_cmd_direct_reg_header { + uint32_t reg_offset : 28; + uint32_t command_type : 4; +}; + +struct mmsch_v2_0_cmd_indirect_reg_header { + uint32_t reg_offset : 20; + uint32_t reg_idx_space : 8; + uint32_t command_type : 4; +}; + +struct mmsch_v2_0_cmd_direct_write { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t reg_value; +}; + +struct mmsch_v2_0_cmd_direct_read_modify_write { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t write_data; + uint32_t mask_value; +}; + +struct mmsch_v2_0_cmd_direct_polling { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; + uint32_t mask_value; + uint32_t wait_value; +}; + +struct mmsch_v2_0_cmd_end { + struct mmsch_v2_0_cmd_direct_reg_header cmd_header; +}; + +struct mmsch_v2_0_cmd_indirect_write { + struct mmsch_v2_0_cmd_indirect_reg_header cmd_header; + uint32_t reg_value; +}; + +static inline void mmsch_v2_0_insert_direct_wt(struct mmsch_v2_0_cmd_direct_write *direct_wt, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t value) +{ + direct_wt->cmd_header.reg_offset = reg_offset; + direct_wt->reg_value = value; + memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v2_0_cmd_direct_write)); +} + +static inline void mmsch_v2_0_insert_direct_rd_mod_wt(struct mmsch_v2_0_cmd_direct_read_modify_write *direct_rd_mod_wt, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t mask, uint32_t data) +{ + direct_rd_mod_wt->cmd_header.reg_offset = reg_offset; + direct_rd_mod_wt->mask_value = mask; + direct_rd_mod_wt->write_data = data; + memcpy((void *)init_table, direct_rd_mod_wt, + sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)); +} + +static inline void mmsch_v2_0_insert_direct_poll(struct mmsch_v2_0_cmd_direct_polling *direct_poll, + uint32_t *init_table, + uint32_t reg_offset, + uint32_t mask, uint32_t wait) +{ + direct_poll->cmd_header.reg_offset = reg_offset; + direct_poll->mask_value = mask; + direct_poll->wait_value = wait; + memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v2_0_cmd_direct_polling)); +} + +#define MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \ + mmsch_v2_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \ + init_table, (reg), \ + (mask), (data)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \ +} + +#define MMSCH_V2_0_INSERT_DIRECT_WT(reg, value) { \ + mmsch_v2_0_insert_direct_wt(&direct_wt, \ + init_table, (reg), \ + (value)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \ +} + +#define MMSCH_V2_0_INSERT_DIRECT_POLL(reg, mask, wait) { \ + mmsch_v2_0_insert_direct_poll(&direct_poll, \ + init_table, (reg), \ + (mask), (wait)); \ + init_table += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \ + table_size += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \ +} + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index cf557a428298..e08245a446fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -32,6 +32,7 @@ #include "soc15_common.h" #include "navi10_ih.h" +#define MAX_REARM_RETRY 10 static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev); @@ -284,6 +285,38 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev, } /** + * navi10_ih_irq_rearm - rearm IRQ if lost + * + * @adev: amdgpu_device pointer + * + */ +static void navi10_ih_irq_rearm(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + uint32_t reg_rptr = 0; + uint32_t v = 0; + uint32_t i = 0; + + if (ih == &adev->irq.ih) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR); + else if (ih == &adev->irq.ih1) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1); + else if (ih == &adev->irq.ih2) + reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2); + else + return; + + /* Rearm IRQ / re-write doorbell if doorbell write is lost */ + for (i = 0; i < MAX_REARM_RETRY; i++) { + v = RREG32_NO_KIQ(reg_rptr); + if ((v < ih->ring_size) && (v != ih->rptr)) + WDOORBELL32(ih->doorbell_index, ih->rptr); + else + break; + } +} + +/** * navi10_ih_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer @@ -297,6 +330,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev, /* XXX check if swapping is necessary on BE */ *ih->rptr_cpu = ih->rptr; WDOORBELL32(ih->doorbell_index, ih->rptr); + + if (amdgpu_sriov_vf(adev)) + navi10_ih_irq_rearm(adev, ih); } else WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 2d1bebdf1603..033cbbca2072 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -516,7 +516,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 36b65797434e..a44fd6060d5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -31,6 +31,9 @@ #define GFX_CMD_RESERVED_MASK 0x7FF00000 #define GFX_CMD_RESPONSE_MASK 0x80000000 +/* USBC PD FW version retrieval command */ +#define C2PMSG_CMD_GFX_USB_PD_FW_VER 0x2000000 + /* TEE Gfx Command IDs for the register interface. * Command ID must be between 0x00010000 and 0x000F0000. */ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 8ab3bf3158a9..67dd9d2d4b68 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -65,6 +65,9 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); /* memory training timeout define */ #define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 +/* For large FW files the time to complete can be very long */ +#define USBC_PD_POLLING_LIMIT_S 240 + static int psp_v11_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -1109,6 +1112,82 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); } +static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t reg_status; + int ret, i = 0; + + /* Write lower 32-bit address of the PD Controller FW */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr)); + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + /* Fireup interrupt so PSP can pick up the lower address */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000); + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35); + + if ((reg_status & 0xFFFF) != 0) { + DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n", + reg_status & 0xFFFF); + return -EIO; + } + + /* Write upper 32-bit address of the PD Controller FW */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr)); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret) + return ret; + + /* Fireup interrupt so PSP can pick up the upper address */ + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000); + + /* FW load takes very long time */ + do { + msleep(1000); + reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35); + + if (reg_status & 0x80000000) + goto done; + + } while (++i < USBC_PD_POLLING_LIMIT_S); + + return -ETIME; +done: + + if ((reg_status & 0xFFFF) != 0) { + DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n", + reg_status & 0xFFFF); + return -EIO; + } + + return 0; +} + +static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) +{ + struct amdgpu_device *adev = psp->adev; + int ret; + + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); + + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (!ret) + *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36); + + return ret; +} + static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_kdb = psp_v11_0_bootloader_load_kdb, @@ -1133,6 +1212,8 @@ static const struct psp_funcs psp_v11_0_funcs = { .mem_training = psp_v11_0_memory_training, .ring_get_wptr = psp_v11_0_ring_get_wptr, .ring_set_wptr = psp_v11_0_ring_set_wptr, + .load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw, + .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e55884d204bd..9159bd46482b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1801,13 +1801,9 @@ static int sdma_v4_0_late_init(void *handle) struct ras_ih_if ih_info = { .cb = sdma_v4_0_process_ras_data_cb, }; - int i; - /* read back edc counter registers to clear the counters */ - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { - for (i = 0; i < adev->sdma.num_instances; i++) - RREG32_SDMA(i, mmSDMA0_EDC_COUNTER); - } + if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count) + adev->sdma.funcs->reset_ras_error_count(adev); if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init) return adev->sdma.funcs->ras_late_init(adev, &ih_info); @@ -2572,10 +2568,22 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, return 0; }; +static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev) +{ + int i; + + /* read back edc counter registers to clear the counters */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { + for (i = 0; i < adev->sdma.num_instances; i++) + RREG32_SDMA(i, mmSDMA0_EDC_COUNTER); + } +} + static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = { .ras_late_init = amdgpu_sdma_ras_late_init, .ras_fini = amdgpu_sdma_ras_fini, .query_ras_error_count = sdma_v4_0_query_ras_error_count, + .reset_ras_error_count = sdma_v4_0_reset_ras_error_count, }; static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 2b488dfb2f21..a40499d51c93 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -89,6 +89,13 @@ #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 + +/* for Vega20/arcturus regiter offset change */ +#define mmROM_INDEX_VG20 0x00e4 +#define mmROM_INDEX_VG20_BASE_IDX 0 +#define mmROM_DATA_VG20 0x00e5 +#define mmROM_DATA_VG20_BASE_IDX 0 + /* * Indirect registers accessor */ @@ -309,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, { u32 *dw_ptr; u32 i, length_dw; + uint32_t rom_index_offset; + uint32_t rom_data_offset; if (bios == NULL) return false; @@ -321,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, dw_ptr = (u32 *)bios; length_dw = ALIGN(length_bytes, 4) / 4; + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20); + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20); + break; + default: + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); + break; + } + /* set rom index to 0 */ - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + WREG32(rom_index_offset, 0); /* read out the rom data */ for (i = 0; i < length_dw; i++) - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + dw_ptr[i] = RREG32(rom_data_offset); return true; } @@ -831,6 +852,15 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev) /* change this when we implement soft reset */ return true; } + +static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev) +{ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) + return; + /*read back hdp ras counter to reset it to 0 */ + RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); +} + static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, uint64_t *count1) { @@ -998,6 +1028,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .get_config_memsize = &soc15_get_config_memsize, .flush_hdp = &soc15_flush_hdp, .invalidate_hdp = &soc15_invalidate_hdp, + .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &vega20_doorbell_index_init, .get_pcie_usage = &vega20_get_pcie_usage, @@ -1243,6 +1274,10 @@ static int soc15_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); + if (adev->asic_funcs && + adev->asic_funcs->reset_hdp_ras_error_count) + adev->asic_funcs->reset_hdp_ras_error_count(adev); + if (adev->nbio.funcs->ras_late_init) r = adev->nbio.funcs->ras_late_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 793bf70e64b1..14d346321a5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -186,6 +186,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, if (rsmu_umc_index_state) umc_v6_1_disable_umc_index_mode(adev); + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + DRM_WARN("Fail to disable DF-Cstate.\n"); + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { umc_reg_offset = get_umc_6_reg_offset(adev, umc_inst, @@ -199,6 +203,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, &(err_data->ue_count)); } + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + DRM_WARN("Fail to enable DF-Cstate\n"); + if (rsmu_umc_index_state) umc_v6_1_enable_umc_index_mode(adev); } @@ -228,7 +236,11 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0); } - /* skip error address process if -ENOMEM */ + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); + + if (mc_umc_status == 0) + return; + if (!err_data->err_addr) { /* clear umc status */ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); @@ -236,7 +248,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, } err_rec = &err_data->err_addr[err_data->err_addr_cnt]; - mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); /* calculate error address if ue/ce error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && @@ -288,6 +299,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, if (rsmu_umc_index_state) umc_v6_1_disable_umc_index_mode(adev); + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + DRM_WARN("Fail to disable DF-Cstate.\n"); + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { umc_reg_offset = get_umc_6_reg_offset(adev, umc_inst, @@ -300,6 +315,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, umc_inst); } + if ((adev->asic_type == CHIP_ARCTURUS) && + amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + DRM_WARN("Fail to enable DF-Cstate\n"); + if (rsmu_umc_index_state) umc_v6_1_enable_umc_index_mode(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index c387c81f8695..f2745fd1ddb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -29,6 +29,7 @@ #include "soc15d.h" #include "amdgpu_pm.h" #include "amdgpu_psp.h" +#include "mmsch_v2_0.h" #include "vcn/vcn_2_0_0_offset.h" #include "vcn/vcn_2_0_0_sh_mask.h" @@ -54,7 +55,7 @@ static int vcn_v2_0_set_powergating_state(void *handle, enum amd_powergating_state state); static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, struct dpg_pause_state *new_state); - +static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); /** * vcn_v2_0_early_init - set function pointers * @@ -67,7 +68,10 @@ static int vcn_v2_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->vcn.num_vcn_inst = 1; - adev->vcn.num_enc_rings = 2; + if (amdgpu_sriov_vf(adev)) + adev->vcn.num_enc_rings = 1; + else + adev->vcn.num_enc_rings = 2; vcn_v2_0_set_dec_ring_funcs(adev); vcn_v2_0_set_enc_ring_funcs(adev); @@ -154,7 +158,10 @@ static int vcn_v2_0_sw_init(void *handle) for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; + if (!amdgpu_sriov_vf(adev)) + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; + else + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i; sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); if (r) @@ -163,6 +170,10 @@ static int vcn_v2_0_sw_init(void *handle) adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode; + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + return 0; } @@ -178,6 +189,8 @@ static int vcn_v2_0_sw_fini(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_virt_free_mm_table(adev); + r = amdgpu_vcn_suspend(adev); if (r) return r; @@ -203,6 +216,9 @@ static int vcn_v2_0_hw_init(void *handle) adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, 0); + if (amdgpu_sriov_vf(adev)) + vcn_v2_0_start_sriov(adev); + r = amdgpu_ring_test_helper(ring); if (r) goto done; @@ -304,6 +320,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t offset; + if (amdgpu_sriov_vf(adev)) + return; + /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, @@ -448,6 +467,9 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev) { uint32_t data; + if (amdgpu_sriov_vf(adev)) + return; + /* UVD disable CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -606,6 +628,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev) { uint32_t data = 0; + if (amdgpu_sriov_vf(adev)) + return; + /* enable UVD CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -658,6 +683,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev) uint32_t data = 0; int ret; + if (amdgpu_sriov_vf(adev)) + return; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT @@ -705,6 +733,9 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev) uint32_t data = 0; int ret; + if (amdgpu_sriov_vf(adev)) + return; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { /* Before power off, this indicator has to be turned on */ data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); @@ -1215,6 +1246,9 @@ static int vcn_v2_0_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE); + if (amdgpu_sriov_vf(adev)) + return 0; + if (enable) { /* wait for STATUS to clear */ if (vcn_v2_0_is_idle(handle)) @@ -1631,6 +1665,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; + if (amdgpu_sriov_vf(adev)) + return 0; + WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 4); if (r) @@ -1667,6 +1704,11 @@ static int vcn_v2_0_set_powergating_state(void *handle, int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (amdgpu_sriov_vf(adev)) { + adev->vcn.cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + if (state == adev->vcn.cur_state) return 0; @@ -1680,6 +1722,215 @@ static int vcn_v2_0_set_powergating_state(void *handle, return ret; } +static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev, + struct amdgpu_mm_table *table) +{ + uint32_t data = 0, loop; + uint64_t addr = table->gpu_addr; + struct mmsch_v2_0_init_header *header; + uint32_t size; + int i; + + header = (struct mmsch_v2_0_init_header *)table->cpu_addr; + size = header->header_size + header->vcn_table_size; + + /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr + * of memory descriptor location + */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); + + /* 2, update vmid of descriptor */ + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID); + data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + /* use domain0 for MM scheduler */ + data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data); + + /* 3, notify mmsch about the size of this descriptor */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size); + + /* 4, set resp to zero */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0); + + adev->vcn.inst->ring_dec.wptr = 0; + adev->vcn.inst->ring_dec.wptr_old = 0; + vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec); + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + adev->vcn.inst->ring_enc[i].wptr = 0; + adev->vcn.inst->ring_enc[i].wptr_old = 0; + vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]); + } + + /* 5, kick off the initialization and wait until + * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero + */ + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); + + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + loop = 1000; + while ((data & 0x10000002) != 0x10000002) { + udelay(10); + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); + loop--; + if (!loop) + break; + } + + if (!loop) { + DRM_ERROR("failed to init MMSCH, " \ + "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data); + return -EBUSY; + } + + return 0; +} + +static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) +{ + int r; + uint32_t tmp; + struct amdgpu_ring *ring; + uint32_t offset, size; + uint32_t table_size = 0; + struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} }; + struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} }; + struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} }; + struct mmsch_v2_0_cmd_end end = { {0} }; + struct mmsch_v2_0_init_header *header; + uint32_t *init_table = adev->virt.mm_table.cpu_addr; + uint8_t i = 0; + + header = (struct mmsch_v2_0_init_header *)init_table; + direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + direct_poll.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_POLLING; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) { + header->version = MMSCH_VERSION; + header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2; + + header->vcn_table_offset = header->header_size; + + init_table += header->vcn_table_offset; + + size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + + MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + 0xFFFFFFFF, 0x00000004); + + /* mc resume*/ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + tmp = AMDGPU_UCODE_ID_VCN; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[tmp].tmr_mc_addr_lo); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[tmp].tmr_mc_addr_hi); + offset = 0; + } else { + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr)); + offset = size; + } + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), + size); + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr + offset)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr + offset)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), + AMDGPU_VCN_STACK_SIZE); + + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst->gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst->gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), + 0); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), + AMDGPU_VCN_CONTEXT_SIZE); + + for (r = 0; r < adev->vcn.num_enc_rings; ++r) { + ring = &adev->vcn.inst->ring_enc[r]; + ring->wptr = 0; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), + lower_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), + upper_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), + ring->ring_size / 4); + } + + ring = &adev->vcn.inst->ring_dec; + ring->wptr = 0; + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), + lower_32_bits(ring->gpu_addr)); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, + mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), + upper_32_bits(ring->gpu_addr)); + /* force RBC into idle state */ + tmp = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + MMSCH_V2_0_INSERT_DIRECT_WT( + SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); + + /* add end packet */ + tmp = sizeof(struct mmsch_v2_0_cmd_end); + memcpy((void *)init_table, &end, tmp); + table_size += (tmp / 4); + header->vcn_table_size = table_size; + + } + return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table); +} + static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { .name = "vcn_v2_0", .early_init = vcn_v2_0_early_init, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 8d56afd76eb3..0ec5f25adf56 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1169,7 +1169,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep, if (!dev) return -EINVAL; - dev->kfd2kgd->get_tile_config(dev->kgd, &config); + amdgpu_amdkfd_get_tile_config(dev->kgd, &config); args->gb_addr_config = config.gb_addr_config; args->num_banks = config.num_banks; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 692abfd2088a..77ea0f0cb163 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1734,7 +1734,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), - (void *)&(mem_obj->cpu_ptr), true); + (void *)&(mem_obj->cpu_ptr), false); return retval; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 1f8365575b12..15476fca8fa6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -187,7 +187,7 @@ static int create_signal_event(struct file *devkfd, if (p->signal_mapped_size && p->signal_event_count == p->signal_mapped_size / 8) { if (!p->signal_event_limit_reached) { - pr_warn("Signal event wasn't created because limit was reached\n"); + pr_debug("Signal event wasn't created because limit was reached\n"); p->signal_event_limit_reached = true; } return -ENOSPC; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 436b7f518979..48cda3073b70 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -87,9 +87,21 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, int retval; struct kfd_mem_obj *mqd_mem_obj = NULL; - /* From V9, for CWSR, the control stack is located on the next page - * boundary after the mqd, we will use the gtt allocation function - * instead of sub-allocation function. + /* For V9 only, due to a HW bug, the control stack of a user mode + * compute queue needs to be allocated just behind the page boundary + * of its regular MQD buffer. So we allocate an enlarged MQD buffer: + * the first page of the buffer serves as the regular MQD buffer + * purpose and the remaining is for control stack. Although the two + * parts are in the same buffer object, they need different memory + * types: MQD part needs UC (uncached) as usual, while control stack + * needs NC (non coherent), which is different from the UC type which + * is used when control stack is allocated in user space. + * + * Because of all those, we use the gtt allocation function instead + * of sub-allocation function for this enlarged MQD buffer. Moreover, + * in order to achieve two memory types in a single buffer object, we + * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct + * amdgpu memory functions to do so. */ if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 22abdbc6dfd7..fe0cd49d4ea7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -327,10 +327,10 @@ err_alloc_mem: static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) { struct qcm_process_device *qpd = &pdd->qpd; - uint32_t flags = ALLOC_MEM_FLAGS_GTT | - ALLOC_MEM_FLAGS_NO_SUBSTITUTE | - ALLOC_MEM_FLAGS_WRITABLE | - ALLOC_MEM_FLAGS_EXECUTABLE; + uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | + KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | + KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | + KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; void *kaddr; int ret; @@ -641,6 +641,11 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, /* Indicate to other users that MM is no longer valid */ p->mm = NULL; + /* Signal the eviction fence after user mode queues are + * destroyed. This allows any BOs to be freed without + * triggering pointless evictions or waiting for fences. + */ + dma_fence_signal(p->ef); mutex_unlock(&p->mutex); @@ -692,8 +697,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) { struct kfd_dev *dev = pdd->dev; struct qcm_process_device *qpd = &pdd->qpd; - uint32_t flags = ALLOC_MEM_FLAGS_GTT | - ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE; + uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT + | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE + | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; void *kaddr; int ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 5303877c081a..aa0bfa78a667 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -490,7 +490,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.num_sdma_queues_per_engine); sysfs_show_32bit_prop(buffer, "num_cp_queues", dev->node_props.num_cp_queues); - sysfs_show_64bit_prop(buffer, "unique_id", + sysfs_show_64bit_prop(buffer, "unique_id", dev->node_props.unique_id); if (dev->gpu) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 48661b9bb2b8..f7ab9ae58a06 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -902,7 +902,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.asic_id.chip_family = adev->family; - init_data.asic_id.pci_revision_id = adev->rev_id; + init_data.asic_id.pci_revision_id = adev->pdev->revision; init_data.asic_id.hw_internal_rev = adev->external_rev_id; init_data.asic_id.vram_width = adev->gmc.vram_width; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index c4fd148bf6e0..5b70ed3cdb88 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -412,6 +412,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->dig_be = config->link_enc_inst; link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + link->dp.mst_supported = config->mst_supported; display->adjust.disable = 1; link->adjust.auth_delay = 2; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 318b474ff20e..2f2b0eb4ebcd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -400,8 +400,8 @@ bool dm_helpers_dp_mst_start_top_mgr( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); - return false; + DRM_ERROR("Failed to find connector for link!"); + return false; } if (boot) { @@ -423,8 +423,8 @@ void dm_helpers_dp_mst_stop_top_mgr( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); - return; + DRM_ERROR("Failed to find connector for link!"); + return; } DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", @@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); + DRM_ERROR("Failed to find connector for link!"); return false; } @@ -463,7 +463,7 @@ bool dm_helpers_dp_write_dpcd( struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); + DRM_ERROR("Failed to find connector for link!"); return false; } @@ -483,7 +483,7 @@ bool dm_helpers_submit_i2c( bool result; if (!aconnector) { - DRM_ERROR("Failed to found connector for link!"); + DRM_ERROR("Failed to find connector for link!"); return false; } @@ -538,7 +538,7 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link) struct amdgpu_dm_connector *aconnector = link->priv; if (!aconnector) { - BUG_ON("Failed to found connector for link!"); + BUG_ON("Failed to find connector for link!"); return true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 00c8627eb60e..a9108e46f517 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -207,7 +207,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, dsc_caps, NULL, - &dc_sink->sink_dsc_caps.dsc_dec_caps)) + &dc_sink->dsc_caps.dsc_dec_caps)) return false; return true; @@ -262,8 +262,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) #if defined(CONFIG_DRM_AMD_DC_DCN) if (!validate_dsc_caps_on_connector(aconnector)) - memset(&aconnector->dc_sink->sink_dsc_caps, - 0, sizeof(aconnector->dc_sink->sink_dsc_caps)); + memset(&aconnector->dc_sink->dsc_caps, + 0, sizeof(aconnector->dc_sink->dsc_caps)); #endif } } @@ -537,7 +537,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); if (vars[i].dsc_enabled && dc_dsc_compute_config( params[i].sink->ctx->dc->res_pool->dscs[0], - ¶ms[i].sink->sink_dsc_caps.dsc_dec_caps, + ¶ms[i].sink->dsc_caps.dsc_dec_caps, params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, 0, params[i].timing, @@ -558,7 +558,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); dc_dsc_compute_config( param.sink->ctx->dc->res_pool->dscs[0], - ¶m.sink->sink_dsc_caps.dsc_dec_caps, + ¶m.sink->dsc_caps.dsc_dec_caps, param.sink->ctx->dc->debug.dsc_min_slice_height_override, (int) kbps, param.timing, &dsc_config); @@ -755,14 +755,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].sink = stream->sink; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; params[count].port = aconnector->port; - params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported; + params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy); if (!dc_dsc_compute_bandwidth_range( stream->sink->ctx->dc->res_pool->dscs[0], stream->sink->ctx->dc->debug.dsc_min_slice_height_override, dsc_policy.min_target_bpp, dsc_policy.max_target_bpp, - &stream->sink->sink_dsc_caps.dsc_dec_caps, + &stream->sink->dsc_caps.dsc_dec_caps, &stream->timing, ¶ms[count].bw_range)) params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); @@ -844,7 +844,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (!aconnector || !aconnector->dc_sink) continue; - if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported) + if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) continue; if (computed_streams[i]) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 5d081c42e81b..2c6db379afae 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -3265,33 +3265,33 @@ bool bw_calcs(struct dc_context *ctx, bw_fixed_to_int(bw_mul(data-> stutter_exit_watermark[9], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[0].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[1].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[2].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_entry_wm_ns[3].b_mark = + calcs_output->stutter_entry_wm_ns[0].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].b_mark = + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_entry_wm_ns[3].b_mark = + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].b_mark = + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].b_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_entry_wm_ns[5].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[9], bw_int_to_fixed(1000))); + stutter_entry_watermark[9], bw_int_to_fixed(1000))); calcs_output->urgent_wm_ns[0].b_mark = bw_fixed_to_int(bw_mul(data-> diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index f0f07b160152..3960a8db94cb 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -712,6 +712,11 @@ unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_ case PRID_DALI_DF: case PRID_DALI_E3: case PRID_DALI_E4: + case PRID_POLLOCK_94: + case PRID_POLLOCK_95: + case PRID_POLLOCK_E9: + case PRID_POLLOCK_EA: + case PRID_POLLOCK_EB: return 0; default: break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 64cbd5462c79..ab267ddd4abe 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -46,6 +46,7 @@ /* Constants */ #define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */ +#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */ /* Macros */ @@ -720,6 +721,13 @@ void rn_clk_mgr_construct( } else { struct clk_log_info log_info = {0}; + clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr); + + /* SMU Version 55.51.0 and up no longer have an issue + * that needs to limit minimum dispclk */ + if (clk_mgr->smu_ver >= SMU_VER_55_51_0) + debug->min_disp_clk_khz = 0; + /* TODO: Check we get what we expect during bringup */ clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6dece1ee30bf..df285f57fe92 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1378,6 +1378,10 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) } dc->hwss.optimize_bandwidth(dc, context); + + dc->clk_optimized_required = false; + dc->wm_optimized_required = false; + return true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 02e1ad318203..fb603bd46fac 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -585,14 +585,14 @@ static void read_current_link_settings_on_detect(struct dc_link *link) LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; } -static bool detect_dp( - struct dc_link *link, - struct display_sink_capability *sink_caps, - bool *converter_disable_audio, - struct audio_support *audio_support, - enum dc_detect_reason reason) +static bool detect_dp(struct dc_link *link, + struct display_sink_capability *sink_caps, + bool *converter_disable_audio, + struct audio_support *audio_support, + enum dc_detect_reason reason) { bool boot = false; + sink_caps->signal = link_detect_sink(link, reason); sink_caps->transaction_type = get_ddc_transaction_type(sink_caps->signal); @@ -609,9 +609,8 @@ static bool detect_dp( sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST; link->type = dc_connection_mst_branch; - dal_ddc_service_set_transaction_type( - link->ddc, - sink_caps->transaction_type); + dal_ddc_service_set_transaction_type(link->ddc, + sink_caps->transaction_type); /* * This call will initiate MST topology discovery. Which @@ -640,13 +639,10 @@ static bool detect_dp( if (reason == DETECT_REASON_BOOT) boot = true; - dm_helpers_dp_update_branch_info( - link->ctx, - link); + dm_helpers_dp_update_branch_info(link->ctx, link); - if (!dm_helpers_dp_mst_start_top_mgr( - link->ctx, - link, boot)) { + if (!dm_helpers_dp_mst_start_top_mgr(link->ctx, + link, boot)) { /* MST not supported */ link->type = dc_connection_single; sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; @@ -654,7 +650,7 @@ static bool detect_dp( } if (link->type != dc_connection_mst_branch && - is_dp_active_dongle(link)) { + is_dp_active_dongle(link)) { /* DP active dongles */ link->type = dc_connection_active_dongle; if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) { @@ -665,14 +661,15 @@ static bool detect_dp( return true; } - if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER) + if (link->dpcd_caps.dongle_type != + DISPLAY_DONGLE_DP_HDMI_CONVERTER) *converter_disable_audio = true; } } else { /* DP passive dongles */ sink_caps->signal = dp_passive_dongle_detection(link->ddc, - sink_caps, - audio_support); + sink_caps, + audio_support); } return true; @@ -973,6 +970,9 @@ static bool dc_link_detect_helper(struct dc_link *link, break; } + if (link->local_sink->edid_caps.panel_patch.disable_fec) + link->ctx->dc->debug.disable_fec = true; + // Check if edid is the same if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK))) same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); @@ -1498,9 +1498,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx) } } -static enum dc_status enable_link_dp( - struct dc_state *state, - struct pipe_ctx *pipe_ctx) +static enum dc_status enable_link_dp(struct dc_state *state, + struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; enum dc_status status; @@ -1532,7 +1531,8 @@ static enum dc_status enable_link_dp( pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; if (state->clk_mgr && !apply_seamless_boot_optimization) - state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); + state->clk_mgr->funcs->update_clocks(state->clk_mgr, + state, false); // during mode switch we do DP_SET_POWER off then on, and OUI is lost dpcd_set_source_specific_data(link); @@ -1540,21 +1540,20 @@ static enum dc_status enable_link_dp( skip_video_pattern = true; if (link_settings.link_rate == LINK_RATE_LOW) - skip_video_pattern = false; - - if (perform_link_training_with_retries( - &link_settings, - skip_video_pattern, - LINK_TRAINING_ATTEMPTS, - pipe_ctx, - pipe_ctx->stream->signal)) { + skip_video_pattern = false; + + if (perform_link_training_with_retries(&link_settings, + skip_video_pattern, + LINK_TRAINING_ATTEMPTS, + pipe_ctx, + pipe_ctx->stream->signal)) { link->cur_link_settings = link_settings; status = DC_OK; - } - else + } else { status = DC_FAIL_DP_LINK_TRAINING; + } - if (link->preferred_training_settings.fec_enable != NULL) + if (link->preferred_training_settings.fec_enable) fec_enable = *link->preferred_training_settings.fec_enable; else fec_enable = true; @@ -1766,8 +1765,7 @@ static void write_i2c_retimer_setting( slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1785,8 +1783,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1798,8 +1795,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1819,8 +1815,7 @@ static void write_i2c_retimer_setting( slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1838,8 +1833,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1851,8 +1845,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1870,8 +1863,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1882,8 +1874,7 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1894,10 +1885,14 @@ static void write_i2c_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set retimer failed"); } static void write_i2c_default_retimer_setting( @@ -1922,8 +1917,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1934,8 +1928,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0B to 0xDA or 0xD8 */ buffer[0] = 0x0B; @@ -1946,8 +1939,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1958,8 +1950,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0C to 0x1D or 0x91 */ buffer[0] = 0x0C; @@ -1970,8 +1961,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1982,8 +1972,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; if (is_vga_mode) { @@ -1998,8 +1987,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -2010,8 +1998,7 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -2022,9 +2009,13 @@ static void write_i2c_default_retimer_setting( offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", slave_address, buffer[0], buffer[1], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set default retimer failed"); } static void write_i2c_redriver_setting( @@ -2053,8 +2044,7 @@ static void write_i2c_redriver_setting( slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + DC_LOG_DEBUG("Set redriver failed"); } static void disable_link(struct dc_link *link, enum signal_type signal) @@ -2960,6 +2950,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst; config.dpms_off = dpms_off; config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + config.mst_supported = (pipe_ctx->stream->signal == + SIGNAL_TYPE_DISPLAY_PORT_MST); cp_psp->funcs.update_stream_config(cp_psp->handle, &config); } } @@ -3077,9 +3069,14 @@ void core_link_enable_stream( if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal)) - dp_set_dsc_enable(pipe_ctx, true); + dc_is_virtual_signal(pipe_ctx->stream->signal)) { + /* Here we only need to enable DSC on RX. DSC HW programming + * was done earlier, as part of timing programming. + */ + dp_set_dsc_on_rx(pipe_ctx, true); + } } + dc->hwss.enable_stream(pipe_ctx); /* Set DPS PPS SDP (AKA "info frames") */ @@ -3106,7 +3103,7 @@ void core_link_enable_stream( } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) - dp_set_dsc_enable(pipe_ctx, true); + dp_set_dsc_on_rx(pipe_ctx, true); } } @@ -3410,7 +3407,7 @@ uint32_t dc_link_bandwidth_kbps( link_bw_kbps *= 8; /* 8 bits per byte*/ link_bw_kbps *= link_setting->lane_count; - if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { + if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) { /* Account for FEC overhead. * We have to do it based on caps, * and not based on FEC being set ready, @@ -3454,3 +3451,11 @@ void dc_link_overwrite_extended_receiver_cap( dp_overwrite_extended_receiver_cap(link); } +bool dc_link_is_fec_supported(const struct dc_link *link) +{ + return (dc_is_dp_signal(link->connector_signal) && + link->link_enc->features.fec_supported && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && + !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); +} + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 93127bc90f3c..9553755be286 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1446,11 +1446,15 @@ enum link_training_result dc_link_dp_perform_link_training( &link->preferred_training_settings, <_settings); - /* 1. set link rate, lane count and spread. */ + /* Configure lttpr mode */ + if (!link->is_lttpr_mode_transparent) + configure_lttpr_mode(link); + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) start_clock_recovery_pattern_early(link, <_settings, DPRX); - else - dpcd_set_link_settings(link, <_settings); + + /* 1. set link rate, lane count and spread. */ + dpcd_set_link_settings(link, <_settings); if (link->preferred_training_settings.fec_enable != NULL) fec_enable = *link->preferred_training_settings.fec_enable; @@ -1460,8 +1464,6 @@ enum link_training_result dc_link_dp_perform_link_training( dp_set_fec_ready(link, fec_enable); if (!link->is_lttpr_mode_transparent) { - /* Configure lttpr mode */ - configure_lttpr_mode(link); /* 2. perform link training (set link training done * to false is done as well) @@ -1669,11 +1671,11 @@ enum link_training_result dc_link_dp_sync_lt_attempt( dp_set_panel_mode(link, panel_mode); /* Attempt to train with given link training settings */ - /* Set link rate, lane count and spread. */ if (link->ctx->dc->work_arounds.lt_early_cr_pattern) start_clock_recovery_pattern_early(link, <_settings, DPRX); - else - dpcd_set_link_settings(link, <_settings); + + /* Set link rate, lane count and spread. */ + dpcd_set_link_settings(link, <_settings); /* 2. perform link training (set link training done * to false is done as well) @@ -3720,7 +3722,8 @@ static void set_crtc_test_pattern(struct dc_link *link, struct pipe_ctx *odm_pipe; enum controller_dp_color_space controller_color_space; int opp_cnt = 1; - uint16_t count = 0; + int offset = 0; + int dpg_width = width; switch (test_pattern_color_space) { case DP_TEST_PATTERN_COLOR_SPACE_RGB: @@ -3742,33 +3745,30 @@ static void set_crtc_test_pattern(struct dc_link *link, for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; + dpg_width = width / opp_cnt; + offset = dpg_width; - width /= opp_cnt; + opp->funcs->opp_set_disp_pattern_generator(opp, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + dpg_width, + height, + 0); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; - odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp, controller_test_pattern, controller_color_space, color_depth, NULL, - width, - height); - } - opp->funcs->opp_set_disp_pattern_generator(opp, - controller_test_pattern, - controller_color_space, - color_depth, - NULL, - width, - height); - /* wait for dpg to blank pixel data with test pattern */ - for (count = 0; count < 1000; count++) { - if (opp->funcs->dpg_is_blanked(opp)) - break; - udelay(100); + dpg_width, + height, + offset); + offset += offset; } } } @@ -3786,11 +3786,12 @@ static void set_crtc_test_pattern(struct dc_link *link, else if (opp->funcs->opp_set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; + int dpg_width = width; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; - width /= opp_cnt; + dpg_width = width / opp_cnt; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp; @@ -3800,16 +3801,18 @@ static void set_crtc_test_pattern(struct dc_link *link, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - width, - height); + dpg_width, + height, + 0); } opp->funcs->opp_set_disp_pattern_generator(opp, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_COLOR_SPACE_UDEFINED, color_depth, NULL, - width, - height); + dpg_width, + height, + 0); } } break; @@ -3987,6 +3990,11 @@ bool dc_link_dp_set_test_pattern( default: break; } + + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) + pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable( + pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); /* update MSA to requested color space */ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, &pipe_ctx->stream->timing, @@ -3994,9 +4002,27 @@ bool dc_link_dp_set_test_pattern( pipe_ctx->stream->use_vsc_sdp_for_colorimetry, link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range + else + pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); + resource_build_info_frame(pipe_ctx); + link->dc->hwss.update_info_frame(pipe_ctx); + } + /* CRTC Patterns */ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); - + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VACTIVE); + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) + pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable( + pipe_ctx->stream_res.tg); /* Set Test Pattern state */ link->test_pattern_enabled = true; } @@ -4126,8 +4152,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready) struct link_encoder *link_enc = link->link_enc; uint8_t fec_config = 0; - if (link->dc->debug.disable_fec || - IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)) + if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec) return; if (link_enc->funcs->fec_set_ready && @@ -4162,8 +4187,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) { struct link_encoder *link_enc = link->link_enc; - if (link->dc->debug.disable_fec || - IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)) + if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec) return; if (link_enc->funcs->fec_set_enable && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 58634f191a55..ac2103dec9e7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -394,7 +394,7 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc, DC_LOG_DSC("\tslice_width %d", config->slice_width); } -static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) +bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) { struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; @@ -431,6 +431,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; @@ -535,6 +536,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; DC_LOG_DSC(" "); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 572ce3842535..3a1a5aef524d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2171,10 +2171,10 @@ enum dc_status dc_validate_global_state( if (pipe_ctx->stream != stream) continue; - if (dc->res_pool->funcs->get_default_swizzle_mode && + if (dc->res_pool->funcs->patch_unknown_plane_state && pipe_ctx->plane_state && pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) { - result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state); + result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state); if (result != DC_OK) return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index bc1220dce3b1..1e6413a79d47 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.74" +#define DC_VER "3.2.76" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -230,6 +230,7 @@ struct dc_config { bool forced_clocks; bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well bool multi_mon_pp_mclk_switch; + bool psr_on_dmub; }; enum visual_confirm { @@ -389,6 +390,7 @@ struct dc_debug_options { int always_scale; bool disable_pplib_clock_request; bool disable_clock_gate; + bool disable_mem_low_power; bool disable_dmcu; bool disable_psr; bool force_abm_enable; @@ -410,7 +412,6 @@ struct dc_debug_options { bool dmub_offload_enabled; bool dmcub_emulation; bool dmub_command_table; /* for testing only */ - bool psr_on_dmub; struct dc_bw_validation_profile bw_val_profile; bool disable_fec; bool disable_48mhz_pwrdwn; @@ -1024,6 +1025,11 @@ struct dc_sink_dsc_caps { struct dsc_dec_dpcd_caps dsc_dec_caps; }; +struct dc_sink_fec_caps { + bool is_rx_fec_supported; + bool is_topology_fec_supported; +}; + /* * The sink structure contains EDID and other display device properties */ @@ -1037,7 +1043,8 @@ struct dc_sink { struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX]; bool converter_disable_audio; - struct dc_sink_dsc_caps sink_dsc_caps; + struct dc_sink_dsc_caps dsc_caps; + struct dc_sink_fec_caps fec_caps; /* private to DC core */ struct dc_link *link; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index c45c7680fa58..00ff5e98278c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -333,4 +333,7 @@ bool dc_submit_i2c_oem( uint32_t dc_bandwidth_in_kbps_from_timing( const struct dc_crtc_timing *timing); + +bool dc_link_is_fec_supported(const struct dc_link *link); + #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 299f6e00f576..0d210104ba0a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -231,6 +231,7 @@ struct dc_panel_patch { unsigned int extra_t7_ms; unsigned int skip_scdc_overwrite; unsigned int delay_ignore_msa; + unsigned int disable_fec; }; struct dc_edid_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 2c932c29f1f9..9c88a92bd96a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -144,7 +144,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, } } - if (!pipe_ctx || !&pipe_ctx->plane_res || !&pipe_ctx->stream_res) + if (!pipe_ctx) return false; // First, set the psr version @@ -235,6 +235,6 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx) */ void dmub_psr_destroy(struct dmub_psr **dmub) { - kfree(dmub); + kfree(*dmub); *dmub = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 385250e1e3fd..21c7c1b010ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2717,30 +2717,20 @@ void dcn10_optimize_bandwidth( hws->funcs.verify_allow_pstate_change_high(dc); if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - if (context->stream_count == 0) { + if (context->stream_count == 0) context->bw_ctx.bw.dcn.clk.phyclk_khz = 0; - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - true); - } else if (dc->clk_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) { - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - true); - } - } - - if (dc->wm_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) { - hubbub->funcs->program_watermarks(hubbub, - &context->bw_ctx.bw.dcn.watermarks, - dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, true); } - dc->clk_optimized_required = false; - dc->wm_optimized_required = false; + hubbub->funcs->program_watermarks(hubbub, + &context->bw_ctx.bw.dcn.watermarks, + dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, + true); + dcn10_stereo_hw_frame_pack_wa(dc, context); if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index a9a43b397db9..63acb8ff7462 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -299,7 +299,6 @@ void optc1_set_vtg_params(struct timing_generator *optc, uint32_t asic_blank_end; uint32_t v_init; uint32_t v_fp2 = 0; - int32_t vertical_line_start; struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -316,9 +315,8 @@ void optc1_set_vtg_params(struct timing_generator *optc, patched_crtc_timing.v_border_top; /* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */ - vertical_line_start = asic_blank_end - optc1->vstartup_start + 1; - if (vertical_line_start < 0) - v_fp2 = -vertical_line_start; + if (optc1->vstartup_start > asic_blank_end) + v_fp2 = optc1->vstartup_start - asic_blank_end; /* Interlace */ if (REG(OTG_INTERLACE_CONTROL)) { @@ -1195,7 +1193,7 @@ static void optc1_enable_stereo(struct timing_generator *optc, REG_UPDATE_3(OTG_STEREO_CONTROL, OTG_STEREO_EN, stereo_en, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0, - OTG_STEREO_SYNC_OUTPUT_POLARITY, 0); + OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1); if (flags->PROGRAM_POLARITY) REG_UPDATE(OTG_STEREO_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 3b71898e859e..95fda0b7523e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1233,7 +1233,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont return DC_OK; } -static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state) +static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; @@ -1295,7 +1295,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = { .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, .add_stream_to_ctx = dcn10_add_stream_to_ctx, - .get_default_swizzle_mode = dcn10_get_default_swizzle_mode, + .patch_unknown_plane_state = dcn10_patch_unknown_plane_state, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 13e057d7ee93..42bba7c9548b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -369,84 +369,6 @@ void dpp2_set_cursor_attributes( } } -#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) - -bool dpp2_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps) -{ - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ - if (scl_data->viewport.width != scl_data->h_active && - scl_data->viewport.height != scl_data->v_active && - dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && - scl_data->format == PIXEL_FORMAT_FP16) - return false; - - if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - - /* TODO: add lb check */ - - /* No support for programming ratio of 8, drop to 7.99999.. */ - if (scl_data->ratios.horz.value == (8ll << 32)) - scl_data->ratios.horz.value--; - if (scl_data->ratios.vert.value == (8ll << 32)) - scl_data->ratios.vert.value--; - if (scl_data->ratios.horz_c.value == (8ll << 32)) - scl_data->ratios.horz_c.value--; - if (scl_data->ratios.vert_c.value == (8ll << 32)) - scl_data->ratios.vert_c.value--; - - /* Set default taps if none are provided */ - if (in_taps->h_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz) > 4) - scl_data->taps.h_taps = 8; - else - scl_data->taps.h_taps = 4; - } else - scl_data->taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert) > 4) - scl_data->taps.v_taps = 8; - else - scl_data->taps.v_taps = 4; - } else - scl_data->taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4) - scl_data->taps.v_taps_c = 4; - else - scl_data->taps.v_taps_c = 2; - } else - scl_data->taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4) - scl_data->taps.h_taps_c = 4; - else - scl_data->taps.h_taps_c = 2; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - /* Only 1 and even h_taps_c are supported by hw */ - scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; - else - scl_data->taps.h_taps_c = in_taps->h_taps_c; - - if (!dpp->ctx->dc->debug.always_scale) { - if (IDENTITY_RATIO(scl_data->ratios.horz)) - scl_data->taps.h_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert)) - scl_data->taps.v_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.horz_c)) - scl_data->taps.h_taps_c = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert_c)) - scl_data->taps.v_taps_c = 1; - } - - return true; -} - void oppn20_dummy_program_regamma_pwl( struct dpp *dpp, const struct pwl_params *params, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 6bdfee20b6a7..1b1ae9ce2799 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -369,6 +369,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_ dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable; dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth; dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; + dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; // TODO: in addition to validating slice height (pic height must be divisible by slice height), // see what happens when the same condition doesn't apply for slice_width/pic_width. @@ -531,7 +532,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6; reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size; - reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf; } static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 97c0c8ced8e5..03f0c9914520 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -307,7 +307,8 @@ void dcn20_init_blank( COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, - otg_active_height); + otg_active_height, + 0); if (num_opps == 2) { bottom_opp->funcs->opp_set_disp_pattern_generator( @@ -317,7 +318,8 @@ void dcn20_init_blank( COLOR_DEPTH_UNDEFINED, &black_color, otg_active_width, - otg_active_height); + otg_active_height, + 0); } hws->funcs.wait_for_blank_complete(opp); @@ -621,6 +623,13 @@ enum dc_status dcn20_enable_stream_timing( /* TODO check if timing_changed, disable stream if timing changed */ + /* Have to setup DSC here to make sure the bandwidth sent to DIG BE won't be bigger than + * what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag will be automatically + * set at a later time when the video is enabled (DP_VID_STREAM_EN = 1). + */ + if (pipe_ctx->stream->timing.flags.DSC) + dp_set_dsc_on_stream(pipe_ctx, true); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; opp_cnt++; @@ -974,7 +983,8 @@ void dcn20_blank_pixel_data( stream->timing.display_color_depth, &black_color, width, - height); + height, + 0); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator( @@ -985,7 +995,8 @@ void dcn20_blank_pixel_data( stream->timing.display_color_depth, &black_color, width, - height); + height, + 0); } if (!blank) @@ -1656,22 +1667,16 @@ void dcn20_optimize_bandwidth( { struct hubbub *hubbub = dc->res_pool->hubbub; - if (dc->wm_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) { - /* program dchubbub watermarks */ - hubbub->funcs->program_watermarks(hubbub, - &context->bw_ctx.bw.dcn.watermarks, - dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, - true); - dc->wm_optimized_required = false; - } + /* program dchubbub watermarks */ + hubbub->funcs->program_watermarks(hubbub, + &context->bw_ctx.bw.dcn.watermarks, + dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, + true); - if (dc->clk_optimized_required || IS_DIAG_DC(dc->ctx->dce_environment)) { - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - true); - dc->wm_optimized_required = false; - } + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + true); } bool dcn20_update_bandwidth( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 023cc71fad0f..138321e151eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -45,7 +45,8 @@ void opp2_set_disp_pattern_generator( enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, - int height) + int height, + int offset) { struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); enum test_pattern_color_format bit_depth; @@ -92,6 +93,11 @@ void opp2_set_disp_pattern_generator( DPG_ACTIVE_WIDTH, width, DPG_ACTIVE_HEIGHT, height); + /* set DPG offset */ + REG_SET_2(DPG_OFFSET_SEGMENT, 0, + DPG_X_OFFSET, offset, + DPG_SEGMENT_WIDTH, 0); + switch (test_pattern) { case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES: case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA: diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index 4093bec172c1..64c5b429c79a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -36,6 +36,7 @@ #define OPP_DPG_REG_LIST(id) \ SRI(DPG_CONTROL, DPG, id), \ SRI(DPG_DIMENSIONS, DPG, id), \ + SRI(DPG_OFFSET_SEGMENT, DPG, id), \ SRI(DPG_COLOUR_B_CB, DPG, id), \ SRI(DPG_COLOUR_G_Y, DPG, id), \ SRI(DPG_COLOUR_R_CR, DPG, id), \ @@ -53,6 +54,7 @@ uint32_t FMT_422_CONTROL; \ uint32_t DPG_CONTROL; \ uint32_t DPG_DIMENSIONS; \ + uint32_t DPG_OFFSET_SEGMENT; \ uint32_t DPG_COLOUR_B_CB; \ uint32_t DPG_COLOUR_G_Y; \ uint32_t DPG_COLOUR_R_CR; \ @@ -68,6 +70,8 @@ OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \ OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \ OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \ + OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \ + OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \ OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \ OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \ OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \ @@ -97,6 +101,8 @@ type DPG_HRES; \ type DPG_ACTIVE_WIDTH; \ type DPG_ACTIVE_HEIGHT; \ + type DPG_X_OFFSET; \ + type DPG_SEGMENT_WIDTH; \ type DPG_COLOUR0_R_CR; \ type DPG_COLOUR1_R_CR; \ type DPG_COLOUR0_B_CB; \ @@ -144,7 +150,8 @@ void opp2_set_disp_pattern_generator( enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, - int height); + int height, + int offset); bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 78971b6b195c..de7b12520d72 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -153,6 +153,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = { .xfc_supported = true, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, + .number_of_cursors = 1, }; struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { @@ -220,7 +221,8 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = { .xfc_supported = true, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, - .ptoi_supported = 0 + .ptoi_supported = 0, + .number_of_cursors = 1, }; struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { @@ -335,6 +337,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { .use_urgent_burst_bw = 0 }; +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 560.0, + .fabricclk_mhz = 560.0, + .dispclk_mhz = 513.0, + .dppclk_mhz = 513.0, + .phyclk_mhz = 540.0, + .socclk_mhz = 560.0, + .dscclk_mhz = 171.0, + .dram_speed_mts = 8960.0, + }, + { + .state = 1, + .dcfclk_mhz = 694.0, + .fabricclk_mhz = 694.0, + .dispclk_mhz = 642.0, + .dppclk_mhz = 642.0, + .phyclk_mhz = 600.0, + .socclk_mhz = 694.0, + .dscclk_mhz = 214.0, + .dram_speed_mts = 11104.0, + }, + { + .state = 2, + .dcfclk_mhz = 875.0, + .fabricclk_mhz = 875.0, + .dispclk_mhz = 734.0, + .dppclk_mhz = 734.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 875.0, + .dscclk_mhz = 245.0, + .dram_speed_mts = 14000.0, + }, + { + .state = 3, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 1000.0, + .dispclk_mhz = 1100.0, + .dppclk_mhz = 1100.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1000.0, + .dscclk_mhz = 367.0, + .dram_speed_mts = 16000.0, + }, + { + .state = 4, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + /*Extra state, no dispclk ramping*/ + { + .state = 5, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + }, + .num_states = 5, + .sr_exit_time_us = 8.6, + .sr_enter_plus_exit_time_us = 10.9, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 40.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 40.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 2, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 131, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 8, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404.0, + .dummy_pstate_latency_us = 5.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3850, + .xfc_bus_transport_time_us = 20, + .xfc_xbuf_latency_tolerance_us = 4, + .use_urgent_burst_bw = 0 +}; + struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL @@ -1143,6 +1256,7 @@ static const struct encoder_feature_support link_enc_feature = { .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, + .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, @@ -2041,14 +2155,17 @@ int dcn20_populate_dml_pipes_from_context( /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; /* - * Use max cursor settings for calculations to minimize + * For graphic plane, cursor number is 1, nv12 is 0 * bw calculations due to cursor on/off */ - pipes[pipe_cnt].pipe.src.num_cursors = 2; + if (res_ctx->pipe_ctx[i].plane_state && + res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) + pipes[pipe_cnt].pipe.src.num_cursors = 0; + else + pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors; + pipes[pipe_cnt].pipe.src.cur0_src_width = 256; pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit; - pipes[pipe_cnt].pipe.src.cur1_src_width = 256; - pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit; if (!res_ctx->pipe_ctx[i].plane_state) { pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; @@ -2298,6 +2415,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) + stream->timing.v_border_bottom; dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; @@ -3026,7 +3144,7 @@ static struct dc_cap_funcs cap_funcs = { }; -enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state) +enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; @@ -3052,7 +3170,7 @@ static struct resource_funcs dcn20_res_pool_funcs = { .add_stream_to_ctx = dcn20_add_stream_to_ctx, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, - .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link @@ -3290,6 +3408,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( uint32_t hw_internal_rev) { + if (ASICREV_IS_NAVI14_M(hw_internal_rev)) + return &dcn2_0_nv14_soc; + if (ASICREV_IS_NAVI12_P(hw_internal_rev)) return &dcn2_0_nv12_soc; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index f5893840b79b..5eadca0ae7ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -159,7 +159,7 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream); enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); -enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state); +enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state); void dcn20_patch_bounding_box( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index dce4966eca20..158f7c8b55ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -156,7 +156,8 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { .xfc_supported = false, .xfc_fill_bw_overhead_percent = 10.0, .xfc_fill_constant_bytes = 0, - .ptoi_supported = 0 + .ptoi_supported = 0, + .number_of_cursors = 1, }; struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { @@ -1589,6 +1590,7 @@ static const struct encoder_feature_support link_enc_feature = { .max_hdmi_pixel_clock = 600000, .hdmi_ycbcr420_supported = true, .dp_ycbcr420_supported = true, + .fec_supported = true, .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, @@ -1729,6 +1731,19 @@ static int dcn21_populate_dml_pipes_from_context( return pipe_cnt; } +enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) +{ + enum dc_status result = DC_OK; + + if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) { + plane_state->dcc.enable = 1; + /* align to our worst case block width */ + plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024; + } + result = dcn20_patch_unknown_plane_state(plane_state); + return result; +} + static struct resource_funcs dcn21_res_pool_funcs = { .destroy = dcn21_destroy_resource_pool, .link_enc_create = dcn21_link_encoder_create, @@ -1738,7 +1753,7 @@ static struct resource_funcs dcn21_res_pool_funcs = { .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, - .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, + .patch_unknown_plane_state = dcn21_patch_unknown_plane_state, .set_mcif_arb_params = dcn20_set_mcif_arb_params, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, .update_bw_bounding_box = update_bw_bounding_box @@ -1785,6 +1800,7 @@ static bool dcn21_resource_construct( dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; + dc->caps.is_apu = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -1848,7 +1864,7 @@ static bool dcn21_resource_construct( goto create_fail; } - if (dc->debug.psr_on_dmub) { + if (dc->config.psr_on_dmub) { pool->base.psr = dmub_psr_create(ctx); if (pool->base.psr == NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index 626d22d437f4..968c46dfb506 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -32,6 +32,7 @@ struct cp_psp_stream_config { uint8_t otg_inst; uint8_t link_enc_inst; uint8_t stream_enc_inst; + uint8_t mst_supported; void *dm_stream_ctx; bool dpms_off; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index a56b611db15e..dfd3be452766 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -69,6 +69,7 @@ struct _vcs_dpi_voltage_scaling_st { struct _vcs_dpi_soc_bounding_box_st { struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES]; + unsigned int num_states; double sr_exit_time_us; double sr_enter_plus_exit_time_us; double urgent_latency_us; @@ -111,7 +112,6 @@ struct _vcs_dpi_soc_bounding_box_st { double xfc_bus_transport_time_us; double xfc_xbuf_latency_tolerance_us; int use_urgent_burst_bw; - unsigned int num_states; double min_dcfclk; bool do_urgent_latency_adjustment; double urgent_latency_adjustment_fabric_clock_component_us; @@ -204,6 +204,7 @@ struct _vcs_dpi_ip_params_st { unsigned int LineBufferFixedBpp; unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one; unsigned int bug_forcing_LC_req_same_size_fixed; + unsigned int number_of_cursors; }; struct _vcs_dpi_display_xfc_params_st { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index f285b76888fb..d523fc9547e7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -124,7 +124,7 @@ struct resource_funcs { struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream); - enum dc_status (*get_default_swizzle_mode)( + enum dc_status (*patch_unknown_plane_state)( struct dc_plane_state *plane_state); struct stream_encoder *(*find_first_free_match_stream_enc_for_link)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index e94e5fbf2aa2..64f401e4db54 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -85,6 +85,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable); void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); +bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable); bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index c59740084ebc..7c2a3328b208 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -39,6 +39,7 @@ struct dsc_config { uint32_t pic_height; enum dc_pixel_encoding pixel_encoding; enum dc_color_depth color_depth; /* Bits per component */ + bool is_odm; struct dc_dsc_config dc_dsc_cfg; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index fb748f082c56..c2b392a533b1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -68,6 +68,7 @@ struct encoder_feature_support { unsigned int max_hdmi_pixel_clock; bool hdmi_ycbcr420_supported; bool dp_ycbcr420_supported; + bool fec_supported; }; union dpcd_psr_configuration { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 7575564b2265..2717352eb697 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -310,7 +310,8 @@ struct opp_funcs { enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, - int height); + int height, + int offset); bool (*dpg_is_blanked)( struct output_pixel_processor *opp); diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 0cb8967f0c45..10b5fa9d2588 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -50,6 +50,7 @@ enum dmub_cmd_type { DMUB_CMD__REG_REG_WAIT = 4, DMUB_CMD__PLAT_54186_WA = 5, DMUB_CMD__PSR = 64, + DMUB_CMD__ABM = 66, DMUB_CMD__VBIOS = 128, }; @@ -256,6 +257,52 @@ struct dmub_rb_cmd_psr_set_version { struct dmub_cmd_psr_set_version_data psr_set_version_data; }; +struct dmub_cmd_abm_set_pipe_data { + uint32_t ramping_boundary; + uint32_t otg_inst; +}; + +struct dmub_rb_cmd_abm_set_pipe { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_pipe_data abm_set_pipe_data; +}; + +struct dmub_cmd_abm_set_backlight_data { + uint32_t frame_ramp; +}; + +struct dmub_rb_cmd_abm_set_backlight { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_backlight_data abm_set_backlight_data; +}; + +struct dmub_cmd_abm_set_level_data { + uint32_t level; +}; + +struct dmub_rb_cmd_abm_set_level { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_level_data abm_set_level_data; +}; + +struct dmub_cmd_abm_set_ambient_level_data { + uint32_t ambient_lux; +}; + +struct dmub_rb_cmd_abm_set_ambient_level { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_ambient_level_data abm_set_ambient_level_data; +}; + +struct dmub_cmd_abm_set_pwm_frac_data { + uint32_t fractional_pwm; +}; + +struct dmub_rb_cmd_abm_set_pwm_frac { + struct dmub_cmd_header header; + struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data; +}; + union dmub_rb_cmd { struct dmub_rb_cmd_read_modify_write read_modify_write; struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq; @@ -272,6 +319,11 @@ union dmub_rb_cmd { struct dmub_rb_cmd_psr_enable psr_enable; struct dmub_rb_cmd_psr_set_level psr_set_level; struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa; + struct dmub_rb_cmd_abm_set_pipe abm_set_pipe; + struct dmub_rb_cmd_abm_set_backlight abm_set_backlight; + struct dmub_rb_cmd_abm_set_level abm_set_level; + struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level; + struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac; }; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h index ce793f47f234..d37535d21928 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h @@ -45,4 +45,13 @@ enum psr_version { PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU }; +enum dmub_cmd_abm_type { + DMUB_CMD__ABM_INIT_CONFIG = 0, + DMUB_CMD__ABM_SET_PIPE = 1, + DMUB_CMD__ABM_SET_BACKLIGHT = 2, + DMUB_CMD__ABM_SET_LEVEL = 3, + DMUB_CMD__ABM_SET_AMBIENT_LEVEL = 4, + DMUB_CMD__ABM_SET_PWM_FRAC = 5, +}; + #endif /* _DMUB_CMD_DAL_H_ */ diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index ea7015f869c9..8a87d0ed90ae 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -134,11 +134,6 @@ #define PICASSO_A0 0x41 /* DCN1_01 */ #define RAVEN2_A0 0x81 -#define RAVEN2_15D8_REV_94 0x94 -#define RAVEN2_15D8_REV_95 0x95 -#define RAVEN2_15D8_REV_E9 0xE9 -#define RAVEN2_15D8_REV_EA 0xEA -#define RAVEN2_15D8_REV_EB 0xEB #define RAVEN1_F0 0xF0 #define RAVEN_UNKNOWN 0xFF #ifndef ASICREV_IS_RAVEN @@ -149,16 +144,17 @@ #define PRID_DALI_E3 0xE3 #define PRID_DALI_E4 0xE4 +#define PRID_POLLOCK_94 0x94 +#define PRID_POLLOCK_95 0x95 +#define PRID_POLLOCK_E9 0xE9 +#define PRID_POLLOCK_EA 0xEA +#define PRID_POLLOCK_EB 0xEB + #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0)) #ifndef ASICREV_IS_RAVEN2 #define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RENOIR_A0)) #endif #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN)) -#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \ - || eChipRev == RAVEN2_15D8_REV_95 \ - || eChipRev == RAVEN2_15D8_REV_E9 \ - || eChipRev == RAVEN2_15D8_REV_EA \ - || eChipRev == RAVEN2_15D8_REV_EB) #define FAMILY_RV 142 /* DCN 1*/ diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 89a709267019..d66f9d8eefb4 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -124,36 +124,37 @@ enum dc_log_type { #define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \ (1 << LOG_DETECTION_EDID_PARSER)) -#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \ - (1 << LOG_WARNING) | \ - (1 << LOG_EVENT_MODE_SET) | \ - (1 << LOG_EVENT_DETECTION) | \ - (1 << LOG_EVENT_LINK_TRAINING) | \ - (1 << LOG_EVENT_LINK_LOSS) | \ - (1 << LOG_EVENT_UNDERFLOW) | \ - (1 << LOG_RESOURCE) | \ - (1 << LOG_FEATURE_OVERRIDE) | \ - (1 << LOG_DETECTION_EDID_PARSER) | \ - (1 << LOG_DC) | \ - (1 << LOG_HW_HOTPLUG) | \ - (1 << LOG_HW_SET_MODE) | \ - (1 << LOG_HW_RESUME_S3) | \ - (1 << LOG_HW_HPD_IRQ) | \ - (1 << LOG_SYNC) | \ - (1 << LOG_BANDWIDTH_VALIDATION) | \ - (1 << LOG_MST) | \ - (1 << LOG_DETECTION_DP_CAPS) | \ - (1 << LOG_BACKLIGHT)) | \ - (1 << LOG_I2C_AUX) | \ - (1 << LOG_IF_TRACE) | \ - (1 << LOG_DTN) /* | \ - (1 << LOG_DEBUG) | \ - (1 << LOG_BIOS) | \ - (1 << LOG_SURFACE) | \ - (1 << LOG_SCALER) | \ - (1 << LOG_DML) | \ - (1 << LOG_HW_LINK_TRAINING) | \ - (1 << LOG_HW_AUDIO)| \ - (1 << LOG_BANDWIDTH_CALCS)*/ +#define DC_DEFAULT_LOG_MASK ((1ULL << LOG_ERROR) | \ + (1ULL << LOG_WARNING) | \ + (1ULL << LOG_EVENT_MODE_SET) | \ + (1ULL << LOG_EVENT_DETECTION) | \ + (1ULL << LOG_EVENT_LINK_TRAINING) | \ + (1ULL << LOG_EVENT_LINK_LOSS) | \ + (1ULL << LOG_EVENT_UNDERFLOW) | \ + (1ULL << LOG_RESOURCE) | \ + (1ULL << LOG_FEATURE_OVERRIDE) | \ + (1ULL << LOG_DETECTION_EDID_PARSER) | \ + (1ULL << LOG_DC) | \ + (1ULL << LOG_HW_HOTPLUG) | \ + (1ULL << LOG_HW_SET_MODE) | \ + (1ULL << LOG_HW_RESUME_S3) | \ + (1ULL << LOG_HW_HPD_IRQ) | \ + (1ULL << LOG_SYNC) | \ + (1ULL << LOG_BANDWIDTH_VALIDATION) | \ + (1ULL << LOG_MST) | \ + (1ULL << LOG_DETECTION_DP_CAPS) | \ + (1ULL << LOG_BACKLIGHT)) | \ + (1ULL << LOG_I2C_AUX) | \ + (1ULL << LOG_IF_TRACE) | \ + (1ULL << LOG_HDMI_FRL) | \ + (1ULL << LOG_DTN) /* | \ + (1ULL << LOG_DEBUG) | \ + (1ULL << LOG_BIOS) | \ + (1ULL << LOG_SURFACE) | \ + (1ULL << LOG_SCALER) | \ + (1ULL << LOG_DML) | \ + (1ULL << LOG_HW_LINK_TRAINING) | \ + (1ULL << LOG_HW_AUDIO)| \ + (1ULL << LOG_BANDWIDTH_CALCS)*/ #endif /* __DAL_LOGGER_TYPES_H__ */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index 83eaec4c6ad7..cc1d3f470b99 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -52,8 +52,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp) * hdcp is not desired */ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && - !hdcp->connection.displays[i].adjust.disable) { + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->displays[i].adjust.disable) { is_auth_needed = 1; break; } @@ -73,8 +73,8 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp) * hdcp is not desired */ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && - !hdcp->connection.displays[i].adjust.disable) { + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->displays[i].adjust.disable) { is_auth_needed = 1; break; } @@ -114,6 +114,9 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, } else if (is_in_hdcp2_dp_states(hdcp)) { status = mod_hdcp_hdcp2_dp_execution(hdcp, event_ctx, &input->hdcp2); + } else { + event_ctx->unexpected_event = 1; + goto out; } out: return status; @@ -373,8 +376,8 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, goto out; display->state = MOD_HDCP_DISPLAY_INACTIVE; - /* request authentication for remaining displays*/ - if (get_active_display_count(hdcp) > 0) + /* request authentication when connection is not reset */ + if (current_state(hdcp) != HDCP_UNINITIALIZED) callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output); out: @@ -481,10 +484,8 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_DISPLAY_PORT: - mode = MOD_HDCP_MODE_DP; - break; case SIGNAL_TYPE_DISPLAY_PORT_MST: - mode = MOD_HDCP_MODE_DP_MST; + mode = MOD_HDCP_MODE_DP; break; default: break; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index b09d2f5502b3..5cb4546be0ef 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -165,7 +165,6 @@ struct mod_hdcp_auth_counters { /* contains values per connection */ struct mod_hdcp_connection { struct mod_hdcp_link link; - struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; uint8_t is_repeater; uint8_t is_km_stored; uint8_t is_hdcp1_revoked; @@ -201,6 +200,8 @@ struct mod_hdcp { struct mod_hdcp_config config; /* per connection */ struct mod_hdcp_connection connection; + /* per displays */ + struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; /* per authentication attempt */ struct mod_hdcp_authentication auth; /* per state in an authentication */ @@ -391,13 +392,13 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp); /* hdcp version helpers */ static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp) { - return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP || - hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP); } static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp) { - return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP && + hdcp->connection.link.dp.mst_supported); } static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp) @@ -518,7 +519,7 @@ static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp) uint8_t i; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_active(&hdcp->connection.displays[i])) + if (is_display_active(&hdcp->displays[i])) added_count++; return added_count; } @@ -529,7 +530,7 @@ static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp) uint8_t i; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_added(&hdcp->connection.displays[i])) + if (is_display_added(&hdcp->displays[i])) added_count++; return added_count; } @@ -541,8 +542,8 @@ static inline struct mod_hdcp_display *get_first_added_display( struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_added(&hdcp->connection.displays[i])) { - display = &hdcp->connection.displays[i]; + if (is_display_added(&hdcp->displays[i])) { + display = &hdcp->displays[i]; break; } return display; @@ -555,9 +556,9 @@ static inline struct mod_hdcp_display *get_active_display_at_index( struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (hdcp->connection.displays[i].index == index && - is_display_active(&hdcp->connection.displays[i])) { - display = &hdcp->connection.displays[i]; + if (hdcp->displays[i].index == index && + is_display_active(&hdcp->displays[i])) { + display = &hdcp->displays[i]; break; } return display; @@ -570,8 +571,8 @@ static inline struct mod_hdcp_display *get_empty_display_container( struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (!is_display_active(&hdcp->connection.displays[i])) { - display = &hdcp->connection.displays[i]; + if (!is_display_active(&hdcp->displays[i])) { + display = &hdcp->displays[i]; break; } return display; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c index 5bc6706d2af7..f3711914364e 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -230,6 +230,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { fail_and_restart_in_ms(0, &status, output); break; + } else if (conn->hdcp1_retry_count < conn->link.adjust.hdcp1.min_auth_retries_wa) { + fail_and_restart_in_ms(0, &status, output); + break; } if (conn->is_repeater) { set_watchdog_in_ms(hdcp, 5000, output); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c index ff9d54812e62..816759d10cbc 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -65,6 +65,7 @@ enum mod_hdcp_ddc_message_id { MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, @@ -101,6 +102,7 @@ static const uint8_t hdcp_i2c_offsets[] = { [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x80, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, @@ -135,6 +137,7 @@ static const uint32_t hdcp_dpcd_addrs[] = { [MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, [MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, + [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x69340, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, [MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, @@ -474,14 +477,27 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp) enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp) { - enum mod_hdcp_status status; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; if (is_dp_hdcp(hdcp)) { + uint32_t device_count = 0; + uint32_t rx_id_list_size = 0; + uint32_t bytes_read = 0; + hdcp->auth.msg.hdcp2.rx_id_list[0] = 12; status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, - hdcp->auth.msg.hdcp2.rx_id_list+1, - sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1); - + hdcp->auth.msg.hdcp2.rx_id_list+1, + HDCP_MAX_AUX_TRANSACTION_SIZE); + if (status == MOD_HDCP_STATUS_SUCCESS) { + bytes_read = HDCP_MAX_AUX_TRANSACTION_SIZE; + device_count = HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) + + (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4); + rx_id_list_size = MIN((21 + 5 * device_count), + (sizeof(hdcp->auth.msg.hdcp2.rx_id_list) - 1)); + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2, + hdcp->auth.msg.hdcp2.rx_id_list + 1 + bytes_read, + (rx_id_list_size - 1) / HDCP_MAX_AUX_TRANSACTION_SIZE * HDCP_MAX_AUX_TRANSACTION_SIZE); + } } else { status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, hdcp->auth.msg.hdcp2.rx_id_list, diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index 6e844825ad23..d3192b9d0c3d 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -37,10 +37,11 @@ /* default logs */ #define HDCP_ERROR_TRACE(hdcp, status) \ HDCP_LOG_ERR(hdcp, \ - "[Link %d] WARNING %s IN STATE %s", \ + "[Link %d] WARNING %s IN STATE %s STAY COUNT %d", \ hdcp->config.index, \ mod_hdcp_status_to_str(status), \ - mod_hdcp_state_id_to_str(hdcp->state.id)) + mod_hdcp_state_id_to_str(hdcp->state.id), \ + hdcp->state.stay_count) #define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \ HDCP_LOG_VER(hdcp, \ "[Link %d] HDCP 1.4 enabled on display %d", \ @@ -111,6 +112,9 @@ sizeof(hdcp->auth.msg.hdcp1.bksv)); \ HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ + HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \ + sizeof(hdcp->auth.msg.hdcp1.bstatus)); \ HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ sizeof(hdcp->auth.msg.hdcp1.an)); \ HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index d9cb2383d6de..b87e9d2862bc 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -177,11 +177,11 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp); for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) if (is_display_encryption_enabled( - &hdcp->connection.displays[i])) { - hdcp->connection.displays[i].state = + &hdcp->displays[i])) { + hdcp->displays[i].state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; HDCP_HDCP1_DISABLED_TRACE(hdcp, - hdcp->connection.displays[i].index); + hdcp->displays[i].index); } return MOD_HDCP_STATUS_SUCCESS; @@ -301,14 +301,14 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || - hdcp->connection.displays[i].adjust.disable) + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->displays[i].adjust.disable) continue; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id; - hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION; psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); @@ -316,8 +316,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE; - hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; - HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index); } return MOD_HDCP_STATUS_SUCCESS; @@ -421,11 +421,11 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp); for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) if (is_display_encryption_enabled( - &hdcp->connection.displays[i])) { - hdcp->connection.displays[i].state = + &hdcp->displays[i])) { + hdcp->displays[i].state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; HDCP_HDCP2_DISABLED_TRACE(hdcp, - hdcp->connection.displays[i].index); + hdcp->displays[i].index); } return MOD_HDCP_STATUS_SUCCESS; @@ -747,10 +747,10 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || - hdcp->connection.displays[i].adjust.disable) + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->displays[i].adjust.disable) continue; - hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION; @@ -759,8 +759,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) break; - hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; - HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index); } return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index 891bca555e17..c088602bc1a0 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -102,6 +102,7 @@ enum mod_hdcp_status { struct mod_hdcp_displayport { uint8_t rev; uint8_t assr_supported; + uint8_t mst_supported; }; struct mod_hdcp_hdmi { @@ -110,8 +111,7 @@ struct mod_hdcp_hdmi { enum mod_hdcp_operation_mode { MOD_HDCP_MODE_OFF, MOD_HDCP_MODE_DEFAULT, - MOD_HDCP_MODE_DP, - MOD_HDCP_MODE_DP_MST + MOD_HDCP_MODE_DP }; enum mod_hdcp_display_state { @@ -157,7 +157,8 @@ struct mod_hdcp_display_adjustment { struct mod_hdcp_link_adjustment_hdcp1 { uint8_t disable : 1; uint8_t postpone_encryption : 1; - uint8_t reserved : 6; + uint8_t min_auth_retries_wa : 1; + uint8_t reserved : 5; }; enum mod_hdcp_force_hdcp_type { diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h new file mode 100644 index 000000000000..82b6cc25205e --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _wafl2_4_0_0_SH_MASK_HEADER +#define _wafl2_4_0_0_SH_MASK_HEADER + +//PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18 +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L +#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h new file mode 100644 index 000000000000..4a51a90c611a --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _wafl2_4_0_0_SMN_HEADER +#define _wafl2_4_0_0_SMN_HEADER + +#define smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS 0x11cf0210 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h new file mode 100644 index 000000000000..f37712f05b03 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h @@ -0,0 +1,69 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _xgmi_4_0_0_SH_MASK_HEADER +#define _xgmi_4_0_0_SH_MASK_HEADER + +//PCS_GOPX16_PCS_ERROR_STATUS +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18 +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L +#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h new file mode 100644 index 000000000000..6ccbac4ce87e --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h @@ -0,0 +1,29 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _xgmi_4_0_0_SMN_HEADER +#define _xgmi_4_0_0_SMN_HEADER + +#define smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS 0x11af0210 + +#endif diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index abc0eb4ac493..a3c238c39ef5 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -167,27 +167,6 @@ struct tile_config { #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 -/* - * Allocation flag domains - * NOTE: This must match the corresponding definitions in kfd_ioctl.h. - */ -#define ALLOC_MEM_FLAGS_VRAM (1 << 0) -#define ALLOC_MEM_FLAGS_GTT (1 << 1) -#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) -#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) -#define ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4) - -/* - * Allocation flags attributes/access options. - * NOTE: This must match the corresponding definitions in kfd_ioctl.h. - */ -#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31) -#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30) -#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29) -#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */ -#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) -#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */ - /** * struct kfd2kgd_calls * @@ -223,8 +202,6 @@ struct tile_config { * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID. * Only used for no cp scheduling mode * - * @get_tile_config: Returns GPU-specific tiling mode information - * * @set_vm_context_page_table_base: Program page table base for a VMID * * @invalidate_tlbs: Invalidate TLBs for a specific PASID @@ -310,8 +287,6 @@ struct kfd2kgd_calls { void (*set_scratch_backing_va)(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); - int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config); - void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base); uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 7a4f377005a1..f18e3fadbc26 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -121,20 +121,20 @@ static int smu_feature_update_enable_state(struct smu_context *smu, if (enabled) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, - feature_low); + feature_low, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, - feature_high); + feature_high, NULL); if (ret) return ret; } else { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, - feature_low); + feature_low, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, - feature_high); + feature_high, NULL); if (ret) return ret; } @@ -195,21 +195,13 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t return -EINVAL; if (if_version) { - ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, if_version); + ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); if (ret) return ret; } if (smu_version) { - ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, smu_version); + ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); if (ret) return ret; } @@ -218,17 +210,19 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t } int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max) + uint32_t min, uint32_t max, bool lock_needed) { int ret = 0; - if (min < 0 && max < 0) - return -EINVAL; - if (!smu_clk_dpm_is_enabled(smu, clk_type)) return 0; + if (lock_needed) + mutex_lock(&smu->mutex); ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max); + if (lock_needed) + mutex_unlock(&smu->mutex); + return ret; } @@ -251,7 +245,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (max > 0) { param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, - param); + param, NULL); if (ret) return ret; } @@ -259,7 +253,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (min > 0) { param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - param); + param, NULL); if (ret) return ret; } @@ -335,12 +329,8 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); - ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex, - param); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, ¶m); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, + param, ¶m); if (ret) return ret; @@ -542,7 +532,8 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int ret = smu_send_smc_msg_with_param(smu, drv2smu ? SMU_MSG_TransferTableDram2Smu : SMU_MSG_TransferTableSmu2Dram, - table_id | ((argument & 0xFFFF) << 16)); + table_id | ((argument & 0xFFFF) << 16), + NULL); if (ret) return ret; @@ -900,6 +891,7 @@ static int smu_sw_init(void *handle) mutex_init(&smu->sensor_lock); mutex_init(&smu->metrics_lock); + mutex_init(&smu->message_lock); smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; @@ -1992,7 +1984,7 @@ int smu_set_mp1_state(struct smu_context *smu, return 0; } - ret = smu_send_smc_msg(smu, msg); + ret = smu_send_smc_msg(smu, msg, NULL); if (ret) pr_err("[PrepareMp1] Failed!\n"); @@ -2056,8 +2048,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_set_watermarks_table(smu, table, clock_ranges); - smu->watermarks_bitmap |= WATERMARKS_EXIST; - smu->watermarks_bitmap &= ~WATERMARKS_LOADED; + + if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) { + smu->watermarks_bitmap |= WATERMARKS_EXIST; + smu->watermarks_bitmap &= ~WATERMARKS_LOADED; + } } mutex_unlock(&smu->mutex); @@ -2667,12 +2662,3 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu) return ret; } - -int smu_send_smc_msg(struct smu_context *smu, - enum smu_message_type msg) -{ - int ret; - - ret = smu_send_smc_msg_with_param(smu, msg, 0); - return ret; -} diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index d3c4e7a8c1b1..cc4427ebf169 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -374,13 +374,13 @@ arcturus_set_single_dpm_table(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | 0xFF)); + (clk_id << 16 | 0xFF), + &num_of_levels); if (ret) { pr_err("[%s] failed to get dpm levels!\n", __func__); return ret; } - smu_read_smc_arg(smu, &num_of_levels); if (!num_of_levels) { pr_err("[%s] number of clk levels is invalid!\n", __func__); return -EINVAL; @@ -390,12 +390,12 @@ arcturus_set_single_dpm_table(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | i)); + (clk_id << 16 | i), + &clk); if (ret) { pr_err("[%s] failed to get dpm freq by index!\n", __func__); return ret; } - smu_read_smc_arg(smu, &clk); if (!clk) { pr_err("[%s] clk value is invalid!\n", __func__); return -EINVAL; @@ -553,13 +553,13 @@ static int arcturus_run_btc(struct smu_context *smu) { int ret = 0; - ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL); if (ret) { pr_err("RunAfllBtc failed!\n"); return ret; } - return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc); + return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); } static int arcturus_populate_umd_state_clk(struct smu_context *smu) @@ -744,7 +744,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_GFXCLK << 16) | (freq & 0xffff)); + (PPCLK_GFXCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s gfxclk !\n", max ? "max" : "min"); @@ -759,7 +760,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_UCLK << 16) | (freq & 0xffff)); + (PPCLK_UCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s memclk !\n", max ? "max" : "min"); @@ -774,7 +776,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_SOCCLK << 16) | (freq & 0xffff)); + (PPCLK_SOCCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s socclk !\n", max ? "max" : "min"); @@ -1289,12 +1292,11 @@ static int arcturus_get_power_limit(struct smu_context *smu, return -EINVAL; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, - power_src << 16); + power_src << 16, &asic_default_power_limit); if (ret) { pr_err("[%s] get PPT limit failed!", __func__); return ret; } - smu_read_smc_arg(smu, &asic_default_power_limit); } else { /* the last hope to figure out the ppt limit */ if (!pptable) { @@ -1498,7 +1500,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + 1 << workload_type, + NULL); if (ret) { pr_err("Fail to set workload type %d\n", workload_type); return ret; @@ -2233,7 +2236,7 @@ static int arcturus_set_df_cstate(struct smu_context *smu, return -EINVAL; } - return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state); + return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } static const struct pptable_funcs arcturus_ppt_funcs = { @@ -2299,7 +2302,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 97b6714e83e6..657a6f17e91f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -362,6 +362,7 @@ struct smu_context struct mutex mutex; struct mutex sensor_lock; struct mutex metrics_lock; + struct mutex message_lock; uint64_t pool_size; struct smu_table_context smu_table; @@ -371,6 +372,9 @@ struct smu_context struct amd_pp_display_configuration *display_config; struct smu_baco_context smu_baco; void *od_settings; +#if defined(CONFIG_DEBUG_FS) + struct dentry *debugfs_sclk; +#endif uint32_t pstate_sclk; uint32_t pstate_mclk; @@ -514,8 +518,7 @@ struct pptable_funcs { int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu); int (*system_features_control)(struct smu_context *smu, bool en); int (*send_smc_msg_with_param)(struct smu_context *smu, - enum smu_message_type msg, uint32_t param); - int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg); + enum smu_message_type msg, uint32_t param, uint32_t *read_arg); int (*init_display_count)(struct smu_context *smu, uint32_t count); int (*set_allowed_mask)(struct smu_context *smu); int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); @@ -707,7 +710,7 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max, bool lock_needed); int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); + uint32_t min, uint32_t max, bool lock_needed); int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index acccdf621b4e..1c88219fe403 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -29,6 +29,7 @@ #define SMU11_DRIVER_IF_VERSION_VG20 0x13 #define SMU11_DRIVER_IF_VERSION_ARCT 0x12 #define SMU11_DRIVER_IF_VERSION_NV10 0x35 +#define SMU11_DRIVER_IF_VERSION_NV12 0x33 #define SMU11_DRIVER_IF_VERSION_NV14 0x36 /* MP Apertures */ @@ -182,9 +183,8 @@ int smu_v11_0_system_features_control(struct smu_context *smu, int smu_v11_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param); - -int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg); + uint32_t param, + uint32_t *read_arg); int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index d79e54b5ebf6..7fbebc1979cf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -40,14 +40,13 @@ struct smu_12_0_cmn2aisc_mapping { int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, uint16_t msg); -int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg); - int smu_v12_0_wait_for_response(struct smu_context *smu); int smu_v12_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param); + uint32_t param, + uint32_t *read_arg); int smu_v12_0_check_fw_status(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 0d73a49166af..6e41f3c9ff1b 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -661,14 +661,14 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) if (enable) { /* vcn dpm on is a prerequisite for vcn power gate messages */ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL); if (ret) return ret; } power_gate->vcn_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); if (ret) return ret; } @@ -686,14 +686,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) if (enable) { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL); if (ret) return ret; } power_gate->jpeg_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL); if (ret) return ret; } @@ -970,7 +970,7 @@ static int navi10_force_clk_levels(struct smu_context *smu, if (ret) return size; - ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false); if (ret) return size; break; @@ -1042,7 +1042,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu) int ret = 0; uint32_t max_freq = 0; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL); if (ret) return ret; @@ -1063,19 +1063,11 @@ static int navi10_display_config_changed(struct smu_context *smu) int ret = 0; if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && - !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { - ret = smu_write_watermarks_table(smu); - if (ret) - return ret; - - smu->watermarks_bitmap |= WATERMARKS_LOADED; - } - - if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, - smu->display_config->num_display); + smu->display_config->num_display, + NULL); if (ret) return ret; } @@ -1102,7 +1094,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); + ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -1128,7 +1120,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; } @@ -1400,7 +1392,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u if (workload_type < 0) return -EINVAL; smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + 1 << workload_type, NULL); return ret; } @@ -1465,7 +1457,8 @@ static int navi10_notify_smc_display_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, - min_clocks.dcef_clock_in_sr/100); + min_clocks.dcef_clock_in_sr/100, + NULL); if (ret) { pr_err("Attempt to set divider for DCEFCLK Failed!"); return ret; @@ -1493,6 +1486,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu, *clock_ranges) { int i; + int ret = 0; Watermarks_t *table = watermarks; if (!table || !clock_ranges) @@ -1544,6 +1538,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu, clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; } + smu->watermarks_bitmap |= WATERMARKS_EXIST; + + /* pass data to smu controller */ + if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + ret = smu_write_watermarks_table(smu); + if (ret) { + pr_err("Failed to update WMTABLE!"); + return ret; + } + smu->watermarks_bitmap |= WATERMARKS_LOADED; + } + return 0; } @@ -1674,10 +1680,10 @@ static int navi10_set_standard_performance_level(struct smu_context *smu) return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO); } - ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -1742,10 +1748,10 @@ static int navi10_set_peak_performance_level(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -1855,12 +1861,11 @@ static int navi10_get_power_limit(struct smu_context *smu, return -EINVAL; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, - power_src << 16); + power_src << 16, &asic_default_power_limit); if (ret) { pr_err("[%s] get PPT limit failed!", __func__); return ret; } - smu_read_smc_arg(smu, &asic_default_power_limit); } else { /* the last hope to figure out the ppt limit */ if (!pptable) { @@ -1900,7 +1905,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, pptable->PcieLaneCount[i] : pcie_width_cap); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, - smu_pcie_arg); + smu_pcie_arg, + NULL); if (ret) return ret; @@ -1946,13 +1952,13 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetVoltageByDpm, - param); + param, + &value); if (ret) { pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); return ret; } - smu_read_smc_arg(smu, &value); *voltage = (uint16_t)value; return 0; @@ -2209,7 +2215,7 @@ static int navi10_run_btc(struct smu_context *smu) { int ret = 0; - ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc); + ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL); if (ret) pr_err("RunBtc failed!\n"); @@ -2221,9 +2227,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable) int result = 0; if (!enable) - result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE); + result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL); else - result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE); + result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL); return result; } @@ -2332,7 +2338,6 @@ static const struct pptable_funcs navi10_ppt_funcs = { .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 6024139d5a29..653faadaafb3 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -342,14 +342,14 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable) if (enable) { /* vcn dpm on is a prerequisite for vcn power gate messages */ if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); if (ret) return ret; } power_gate->vcn_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); if (ret) return ret; } @@ -367,14 +367,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) if (enable) { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); if (ret) return ret; } power_gate->jpeg_gated = false; } else { if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); if (ret) return ret; } @@ -423,7 +423,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) return ret; force_freq = highest ? max_freq : min_freq; - ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); + ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false); if (ret) return ret; } @@ -456,7 +456,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { if (ret) return ret; - ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false); if (ret) return ret; } @@ -622,22 +622,24 @@ static int renoir_force_clk_levels(struct smu_context *smu, return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, soft_max_level == 0 ? min_freq : - soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq); + soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq, + NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, soft_min_level == 2 ? max_freq : - soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq); + soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq, + NULL); if (ret) return ret; break; case SMU_SOCCLK: GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL); if (ret) return ret; break; @@ -645,10 +647,10 @@ static int renoir_force_clk_levels(struct smu_context *smu, case SMU_FCLK: GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL); if (ret) return ret; break; @@ -681,7 +683,8 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u } ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + 1 << workload_type, + NULL); if (ret) { pr_err_once("Fail to set workload type %d\n", workload_type); return ret; @@ -701,7 +704,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false); if (ret) return ret; @@ -709,7 +712,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) if (ret) return ret; - ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false); if (ret) return ret; @@ -810,9 +813,10 @@ static int renoir_set_watermarks_table( clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; } + smu->watermarks_bitmap |= WATERMARKS_EXIST; + /* pass data to smu controller */ - if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && - !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) { ret = smu_write_watermarks_table(smu); if (ret) { pr_err("Failed to update WMTABLE!"); @@ -913,7 +917,6 @@ static const struct pptable_funcs renoir_ppt_funcs = { .powergate_vcn = smu_v12_0_powergate_vcn, .powergate_jpeg = smu_v12_0_powergate_jpeg, .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, - .read_smc_arg = smu_v12_0_read_arg, .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, .gfx_off_control = smu_v12_0_gfx_off_control, .init_smc_tables = smu_v12_0_init_smc_tables, diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index 7bd200ffcda8..6900877de845 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -79,12 +79,13 @@ #define smu_set_default_od_settings(smu, initialize) \ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg); +#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) \ + ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param), (read_arg)) : 0) + +static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg) { + return smu_send_smc_msg_with_param(smu, msg, 0, read_arg); +} -#define smu_send_smc_msg_with_param(smu, msg, param) \ - ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0) -#define smu_read_smc_arg(smu, arg) \ - ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0) #define smu_alloc_dpm_context(smu) \ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0) #define smu_init_display_count(smu, count) \ diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c9e5ce135fd4..3a5d00573d2c 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -64,7 +64,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) +static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -92,7 +92,8 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) int smu_v11_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param) + uint32_t param, + uint32_t *read_arg) { struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -101,11 +102,12 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, if (index < 0) return index; + mutex_lock(&smu->message_lock); ret = smu_v11_0_wait_for_response(smu); if (ret) { pr_err("Msg issuing pre-check failed and " "SMU may be not in the right state!\n"); - return ret; + goto out; } WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); @@ -115,10 +117,21 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index); ret = smu_v11_0_wait_for_response(smu); - if (ret) + if (ret) { pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n", smu_get_message_name(smu, msg), index, param, ret); - + goto out; + } + if (read_arg) { + ret = smu_v11_0_read_arg(smu, read_arg); + if (ret) { + pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n", + smu_get_message_name(smu, msg), index, param, ret); + goto out; + } + } +out: + mutex_unlock(&smu->message_lock); return ret; } @@ -262,6 +275,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) case CHIP_NAVI10: smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10; break; + case CHIP_NAVI12: + smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12; + break; case CHIP_NAVI14: smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14; break; @@ -671,12 +687,14 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSystemVirtualDramAddrHigh, - address_high); + address_high, + NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSystemVirtualDramAddrLow, - address_low); + address_low, + NULL); if (ret) return ret; @@ -685,15 +703,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) address_low = (uint32_t)lower_32_bits(address); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, - address_high); + address_high, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, - address_low); + address_low, NULL); if (ret) return ret; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, - (uint32_t)memory_pool->size); + (uint32_t)memory_pool->size, NULL); if (ret) return ret; @@ -757,7 +775,7 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) int ret; ret = smu_send_smc_msg_with_param(smu, - SMU_MSG_SetMinDeepSleepDcefclk, clk); + SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); if (ret) pr_err("SMU11 attempt to set divider for DCEFCLK Failed!"); @@ -784,11 +802,13 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu) if (driver_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(driver_table->mc_address)); + upper_32_bits(driver_table->mc_address), + NULL); if (!ret) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(driver_table->mc_address)); + lower_32_bits(driver_table->mc_address), + NULL); } return ret; @@ -802,11 +822,13 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu) if (tool_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetToolsDramAddrHigh, - upper_32_bits(tool_table->mc_address)); + upper_32_bits(tool_table->mc_address), + NULL); if (!ret) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetToolsDramAddrLow, - lower_32_bits(tool_table->mc_address)); + lower_32_bits(tool_table->mc_address), + NULL); } return ret; @@ -819,7 +841,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) if (!smu->pm_enabled) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); return ret; } @@ -837,12 +859,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu) bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, - feature_mask[1]); + feature_mask[1], NULL); if (ret) goto failed; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, - feature_mask[0]); + feature_mask[0], NULL); if (ret) goto failed; @@ -862,17 +884,11 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu, return -EINVAL; if (bitmap_empty(feature->enabled, feature->feature_num)) { - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_high); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); if (ret) return ret; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_low); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); if (ret) return ret; @@ -894,7 +910,7 @@ int smu_v11_0_system_features_control(struct smu_context *smu, int ret = 0; ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : - SMU_MSG_DisableAllSmuFeatures)); + SMU_MSG_DisableAllSmuFeatures), NULL); if (ret) return ret; @@ -923,7 +939,7 @@ int smu_v11_0_notify_display_change(struct smu_context *smu) return ret; if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); return ret; } @@ -947,30 +963,24 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, return -EINVAL; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, - clk_id << 16); + clk_id << 16, clock); if (ret) { pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); return ret; } - ret = smu_read_smc_arg(smu, clock); - if (ret) - return ret; - if (*clock != 0) return 0; /* if DC limit is zero, return AC limit */ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, - clk_id << 16); + clk_id << 16, clock); if (ret) { pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!"); return ret; } - ret = smu_read_smc_arg(smu, clock); - - return ret; + return 0; } int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) @@ -1106,7 +1116,7 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) return -EOPNOTSUPP; } - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); if (ret) { pr_err("[%s] Set power limit Failed!\n", __func__); return ret; @@ -1136,11 +1146,7 @@ int smu_v11_0_get_current_clk_freq(struct smu_context *smu, ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq); else { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq, - (asic_clk_id << 16)); - if (ret) - return ret; - - ret = smu_read_smc_arg(smu, &freq); + (asic_clk_id << 16), &freq); if (ret) return ret; } @@ -1375,9 +1381,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) - ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); else - ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); break; default: break; @@ -1515,7 +1521,8 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, int ret = 0; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetXgmiMode, - pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, + NULL); return ret; } @@ -1628,14 +1635,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) { int ret = 0; - ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME); + ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); return ret; } static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq) { - return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq); + return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); } bool smu_v11_0_baco_is_support(struct smu_context *smu) @@ -1704,12 +1711,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) data |= 0x80000000; WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL); } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL); } } else { - ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco); + ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL); if (ret) goto out; @@ -1777,19 +1784,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c param = (clk_id & 0xffff) << 16; if (max) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param); - if (ret) - goto failed; - ret = smu_read_smc_arg(smu, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); if (ret) goto failed; } if (min) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param); - if (ret) - goto failed; - ret = smu_read_smc_arg(smu, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); if (ret) goto failed; } @@ -1811,7 +1812,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ if (max > 0) { param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, - param); + param, NULL); if (ret) return ret; } @@ -1819,7 +1820,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ if (min > 0) { param = (uint32_t)((clk_id << 16) | (min & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, - param); + param, NULL); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 518e6597bf2d..d52e624f16d3 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -50,7 +50,7 @@ int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) +static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -78,7 +78,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu) int smu_v12_0_send_msg_with_param(struct smu_context *smu, enum smu_message_type msg, - uint32_t param) + uint32_t param, + uint32_t *read_arg) { struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -87,11 +88,12 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, if (index < 0) return index; + mutex_lock(&smu->message_lock); ret = smu_v12_0_wait_for_response(smu); if (ret) { pr_err("Msg issuing pre-check failed and " "SMU may be not in the right state!\n"); - return ret; + goto out; } WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); @@ -101,10 +103,21 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index); ret = smu_v12_0_wait_for_response(smu); - if (ret) + if (ret) { pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n", index, ret, param); - + goto out; + } + if (read_arg) { + ret = smu_v12_0_read_arg(smu, read_arg); + if (ret) { + pr_err("Failed to read message arg 0x%x, response 0x%x param 0x%x\n", + index, ret, param); + goto out; + } + } +out: + mutex_unlock(&smu->message_lock); return ret; } @@ -163,9 +176,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) return 0; if (gate) - return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma); + return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL); else - return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma); + return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL); } int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) @@ -174,9 +187,9 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) return 0; if (gate) - return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); else - return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn); + return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn, NULL); } int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) @@ -185,9 +198,9 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) return 0; if (gate) - return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0); + return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); else - return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0); + return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL); } int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) @@ -196,7 +209,9 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) return 0; return smu_v12_0_send_msg_with_param(smu, - SMU_MSG_SetGfxCGPG, enable ? 1 : 0); + SMU_MSG_SetGfxCGPG, + enable ? 1 : 0, + NULL); } int smu_v12_0_read_sensor(struct smu_context *smu, @@ -262,10 +277,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) int ret = 0, timeout = 500; if (enable) { - ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); } else { - ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); + ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); /* confirm gfx is back to "on" state, timeout is 0.5 second */ while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) { @@ -331,17 +346,11 @@ int smu_v12_0_get_enabled_mask(struct smu_context *smu, if (!feature_mask || num < 2) return -EINVAL; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_high); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high); if (ret) return ret; - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); - if (ret) - return ret; - ret = smu_read_smc_arg(smu, &feature_mask_low); + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low); if (ret) return ret; @@ -388,14 +397,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: - ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency); + ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max); if (ret) { pr_err("Attempt to get max GX frequency from SMC Failed !\n"); goto failed; } - ret = smu_read_smc_arg(smu, max); - if (ret) - goto failed; break; case SMU_UCLK: case SMU_FCLK: @@ -419,14 +425,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: - ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency); + ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min); if (ret) { pr_err("Attempt to get min GX frequency from SMC Failed !\n"); goto failed; } - ret = smu_read_smc_arg(smu, min); - if (ret) - goto failed; break; case SMU_UCLK: case SMU_FCLK: @@ -450,7 +453,7 @@ failed: } int smu_v12_0_mode2_reset(struct smu_context *smu){ - return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2); + return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL); } int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -461,39 +464,39 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL); if (ret) return ret; break; case SMU_FCLK: case SMU_MCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL); if (ret) return ret; break; case SMU_SOCCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL); if (ret) return ret; break; case SMU_VCLK: - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL); if (ret) return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL); if (ret) return ret; break; @@ -512,11 +515,13 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu) if (driver_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(driver_table->mc_address)); + upper_32_bits(driver_table->mc_address), + NULL); if (!ret) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(driver_table->mc_address)); + lower_32_bits(driver_table->mc_address), + NULL); } return ret; diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 4ad8d6c14ee5..d7fa8c02c166 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -587,7 +587,7 @@ static int vega20_check_powerplay_table(struct smu_context *smu) static int vega20_run_btc_afll(struct smu_context *smu) { - return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL); } #define FEATURE_MASK(feature) (1ULL << feature) @@ -670,13 +670,13 @@ vega20_set_single_dpm_table(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | 0xFF)); + (clk_id << 16 | 0xFF), + &num_of_levels); if (ret) { pr_err("[GetNumOfDpmLevel] failed to get dpm levels!"); return ret; } - smu_read_smc_arg(smu, &num_of_levels); if (!num_of_levels) { pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!"); return -EINVAL; @@ -687,12 +687,12 @@ vega20_set_single_dpm_table(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - (clk_id << 16 | i)); + (clk_id << 16 | i), + &clk); if (ret) { pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!"); return ret; } - smu_read_smc_arg(smu, &clk); if (!clk) { pr_err("[GetDpmFreqByIndex] clk value is invalid!"); return -EINVAL; @@ -1200,7 +1200,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_GFXCLK << 16) | (freq & 0xffff)); + (PPCLK_GFXCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s gfxclk !\n", max ? "max" : "min"); @@ -1215,7 +1216,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_UCLK << 16) | (freq & 0xffff)); + (PPCLK_UCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s memclk !\n", max ? "max" : "min"); @@ -1230,7 +1232,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_SOCCLK << 16) | (freq & 0xffff)); + (PPCLK_SOCCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s socclk !\n", max ? "max" : "min"); @@ -1245,7 +1248,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, single_dpm_table->dpm_state.soft_min_level; ret = smu_send_smc_msg_with_param(smu, (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), - (PPCLK_FCLK << 16) | (freq & 0xffff)); + (PPCLK_FCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set soft %s fclk !\n", max ? "max" : "min"); @@ -1260,7 +1264,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max, if (!max) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (PPCLK_DCEFCLK << 16) | (freq & 0xffff)); + (PPCLK_DCEFCLK << 16) | (freq & 0xffff), + NULL); if (ret) { pr_err("Failed to set hard min dcefclk !\n"); return ret; @@ -1421,7 +1426,9 @@ static int vega20_force_clk_levels(struct smu_context *smu, } ret = smu_send_smc_msg_with_param(smu, - SMU_MSG_SetMinLinkDpmByIndex, soft_min_level); + SMU_MSG_SetMinLinkDpmByIndex, + soft_min_level, + NULL); if (ret) pr_err("Failed to set min link dpm level!\n"); @@ -1477,13 +1484,13 @@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetAVFSVoltageByDpm, - ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq)); + ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq), + voltage); if (ret) { pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); return ret; } - smu_read_smc_arg(smu, voltage); *voltage = *voltage / VOLTAGE_SCALE; return 0; @@ -1956,8 +1963,10 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u workload_type = smu_workload_get_type(smu, smu->power_profile_mode); if (workload_type < 0) return -EINVAL; - smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type); + smu_send_smc_msg_with_param(smu, + SMU_MSG_SetWorkloadMask, + 1 << workload_type, + NULL); return ret; } @@ -2029,7 +2038,8 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu, dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level); + (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level, + NULL); if (ret) { pr_err("[%s] Set hard min uclk failed!", __func__); return ret; @@ -2047,7 +2057,7 @@ static int vega20_pre_display_config_changed(struct smu_context *smu) if (!smu->smu_dpm.dpm_context) return -EINVAL; - smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0); + smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL); ret = vega20_set_uclk_to_highest_dpm_level(smu, &dpm_table->mem_table); if (ret) @@ -2074,7 +2084,8 @@ static int vega20_display_config_changed(struct smu_context *smu) smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, - smu->display_config->num_display); + smu->display_config->num_display, + NULL); } return ret; @@ -2247,7 +2258,8 @@ vega20_notify_smc_display_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, - min_clocks.dcef_clock_in_sr/100); + min_clocks.dcef_clock_in_sr/100, + NULL); if (ret) { pr_err("Attempt to set divider for DCEFCLK Failed!"); return ret; @@ -2262,7 +2274,8 @@ vega20_notify_smc_display_config(struct smu_context *smu) memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100; ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level); + (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level, + NULL); if (ret) { pr_err("[%s] Set hard min uclk failed!", __func__); return ret; @@ -2853,8 +2866,10 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu) struct smu_table_context *table_context = &smu->smu_table; PPTable_t *pptable = table_context->driver_pptable; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget, - (uint32_t)pptable->FanTargetTemperature); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetFanTemperatureTarget, + (uint32_t)pptable->FanTargetTemperature, + NULL); return ret; } @@ -2864,15 +2879,13 @@ static int vega20_get_fan_speed_rpm(struct smu_context *smu, { int ret; - ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); + ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm, speed); if (ret) { pr_err("Attempt to get current RPM from SMC Failed!\n"); return ret; } - smu_read_smc_arg(smu, speed); - return 0; } @@ -3137,7 +3150,7 @@ static int vega20_set_df_cstate(struct smu_context *smu, return -EINVAL; } - return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state); + return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } static int vega20_update_pcie_parameters(struct smu_context *smu, @@ -3155,7 +3168,8 @@ static int vega20_update_pcie_parameters(struct smu_context *smu, pptable->PcieLaneCount[i] : pcie_width_cap); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, - smu_pcie_arg); + smu_pcie_arg, + NULL); } return ret; @@ -3229,7 +3243,6 @@ static const struct pptable_funcs vega20_ppt_funcs = { .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .system_features_control = smu_v11_0_system_features_control, .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, .init_display_count = smu_v11_0_init_display_count, .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 63bccd201b97..90fd9c30ae5a 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -84,6 +84,24 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, EXPORT_SYMBOL(drm_sched_entity_init); /** + * drm_sched_entity_modify_sched - Modify sched of an entity + * @entity: scheduler entity to init + * @sched_list: the list of new drm scheds which will replace + * existing entity->sched_list + * @num_sched_list: number of drm sched in sched_list + */ +void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, + struct drm_gpu_scheduler **sched_list, + unsigned int num_sched_list) +{ + WARN_ON(!num_sched_list || !sched_list); + + entity->sched_list = sched_list; + entity->num_sched_list = num_sched_list; +} +EXPORT_SYMBOL(drm_sched_entity_modify_sched); + +/** * drm_sched_entity_is_idle - Check if entity is idle * * @entity: scheduler entity |