diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-08-20 14:29:37 +0200 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2023-08-20 14:29:37 +0200 |
commit | 642073c306e66daca108cb630d169129e50a6ba3 (patch) | |
tree | fe2edb842a30b67d369e5934602560aa3719b02e /drivers/gpu/drm | |
parent | tty: gdm724x: simplify gdm_tty_write() (diff) | |
parent | Merge tag 'tty-6.5-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/greg... (diff) | |
download | linux-642073c306e66daca108cb630d169129e50a6ba3.tar.xz linux-642073c306e66daca108cb630d169129e50a6ba3.zip |
Merge commit b320441c04c9 ("Merge tag 'tty-6.5-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty") into tty-next
We need the serial-core fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/gpu/drm')
57 files changed, 562 insertions, 308 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a3b86b86dc47..6dc950c1b689 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1296,6 +1296,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); +bool amdgpu_sg_display_supported(struct amdgpu_device *adev); bool amdgpu_device_pcie_dynamic_switching_supported(void); bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); bool amdgpu_device_aspm_support_quirk(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 040f4cb6ab2d..fb78a8f47587 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -295,7 +295,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, if (!p->gang_size) { ret = -EINVAL; - goto free_partial_kdata; + goto free_all_kdata; } for (i = 0; i < p->gang_size; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a2cdde0ca0a7..6238701cde23 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1459,6 +1459,32 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) } /* + * On APUs with >= 64GB white flickering has been observed w/ SG enabled. + * Disable S/G on such systems until we have a proper fix. + * https://gitlab.freedesktop.org/drm/amd/-/issues/2354 + * https://gitlab.freedesktop.org/drm/amd/-/issues/2735 + */ +bool amdgpu_sg_display_supported(struct amdgpu_device *adev) +{ + switch (amdgpu_sg_display) { + case -1: + break; + case 0: + return false; + case 1: + return true; + default: + return false; + } + if ((totalram_pages() << (PAGE_SHIFT - 10)) + + (adev->gmc.real_vram_size / 1024) >= 64000000) { + DRM_WARN("Disabling S/G due to >=64GB RAM\n"); + return false; + } + return true; +} + +/* * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic * speed switching. Until we have confirmation from Intel that a specific host * supports it, it's safer that we keep it disabled for all. @@ -3696,10 +3722,11 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) { if (amdgpu_mcbp == 1) adev->gfx.mcbp = true; - - if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) && - (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) && - adev->gfx.num_gfx_rings) + else if (amdgpu_mcbp == 0) + adev->gfx.mcbp = false; + else if ((adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 0)) && + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 0, 0)) && + adev->gfx.num_gfx_rings) adev->gfx.mcbp = true; if (amdgpu_sriov_vf(adev)) @@ -4367,6 +4394,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); cancel_delayed_work_sync(&adev->delayed_init_work); + flush_delayed_work(&adev->gfx.gfx_off_delay_work); amdgpu_ras_suspend(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index c694b41f6461..7537f5aa76f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -552,6 +552,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev) } /** + * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether + * fence driver interrupts need to be restored. + * + * @ring: ring that to be checked + * + * Interrupts for rings that belong to GFX IP don't need to be restored + * when the target power state is s0ix. + * + * Return true if need to restore interrupts, false otherwise. + */ +static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool is_gfx_power_domain = false; + + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_SDMA: + /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ + if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) + is_gfx_power_domain = true; + break; + case AMDGPU_RING_TYPE_GFX: + case AMDGPU_RING_TYPE_COMPUTE: + case AMDGPU_RING_TYPE_KIQ: + case AMDGPU_RING_TYPE_MES: + is_gfx_power_domain = true; + break; + default: + break; + } + + return !(adev->in_s0ix && is_gfx_power_domain); +} + +/** * amdgpu_fence_driver_hw_fini - tear down the fence driver * for all possible rings. * @@ -579,7 +614,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) amdgpu_fence_driver_force_completion(ring); if (!drm_dev_is_unplugged(adev_to_drm(adev)) && - ring->fence_drv.irq_src) + ring->fence_drv.irq_src && + amdgpu_fence_need_ring_interrupt_restore(ring)) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); @@ -655,7 +691,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) continue; /* enable the interrupt */ - if (ring->fence_drv.irq_src) + if (ring->fence_drv.irq_src && + amdgpu_fence_need_ring_interrupt_restore(ring)) amdgpu_irq_get(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index a33d4bc34cee..fd81b04559d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -692,15 +692,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) { - /* If going to s2idle, no need to wait */ - if (adev->in_s0ix) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, - AMD_IP_BLOCK_TYPE_GFX, true)) - adev->gfx.gfx_off_state = true; - } else { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, delay); - } } } else { if (adev->gfx.gfx_off_req_count == 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c index b779ee4bbaa7..e1ee1c7117fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c @@ -397,7 +397,7 @@ void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring) struct amdgpu_ring_mux *mux = &adev->gfx.muxer; WARN_ON(!ring->is_sw_ring); - if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) { + if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) { if (amdgpu_mcbp_scan(mux) > 0) amdgpu_mcbp_trigger_preempt(mux); return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 9c9cca129498..565a1fa436d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -239,8 +239,13 @@ static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) for (i = 1; i < MAX_XCP; i++) { ret = amdgpu_xcp_drm_dev_alloc(&p_ddev); - if (ret) + if (ret == -ENOSPC) { + dev_warn(adev->dev, + "Skip xcp node #%d when out of drm node resource.", i); + return 0; + } else if (ret) { return ret; + } /* Redirect all IOCTLs to the primary device */ adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev; @@ -328,6 +333,9 @@ int amdgpu_xcp_dev_register(struct amdgpu_device *adev, return 0; for (i = 1; i < MAX_XCP; i++) { + if (!adev->xcp_mgr->xcp[i].ddev) + break; + ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data); if (ret) return ret; @@ -345,6 +353,9 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev) return; for (i = 1; i < MAX_XCP; i++) { + if (!adev->xcp_mgr->xcp[i].ddev) + break; + p_ddev = adev->xcp_mgr->xcp[i].ddev; drm_dev_unplug(p_ddev); p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 3a7af59e83ca..0451533ddde4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -471,8 +471,12 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) case IP_VERSION(11, 0, 3): if ((adev->gfx.me_fw_version >= 1505) && (adev->gfx.pfp_fw_version >= 1600) && - (adev->gfx.mec_fw_version >= 512)) - adev->gfx.cp_gfx_shadow = true; + (adev->gfx.mec_fw_version >= 512)) { + if (amdgpu_sriov_vf(adev)) + adev->gfx.cp_gfx_shadow = true; + else + adev->gfx.cp_gfx_shadow = false; + } break; default: adev->gfx.cp_gfx_shadow = false; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index e1a392bcea70..af5685f4cb34 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -137,14 +137,15 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp) int ret; int retry_loop; + /* Wait for bootloader to signify that it is ready having bit 31 of + * C2PMSG_35 set to 1. All other bits are expected to be cleared. + * If there is an error in processing command, bits[7:0] will be set. + * This is applicable for PSP v13.0.6 and newer. + */ for (retry_loop = 0; retry_loop < 10; retry_loop++) { - /* Wait for bootloader to signify that is - ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, 0xffffffff, false); if (ret == 0) return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 49f40d9f16e8..f5a6f562e2a8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1543,11 +1543,7 @@ static bool kfd_ignore_crat(void) if (ignore_crat) return true; -#ifndef KFD_SUPPORT_IOMMU_V2 ret = true; -#else - ret = false; -#endif return ret; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0b3dc754e06b..a53e0757fe64 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -194,11 +194,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd, kfd_device_info_set_event_interrupt_class(kfd); - /* Raven */ - if (gc_version == IP_VERSION(9, 1, 0) || - gc_version == IP_VERSION(9, 2, 2)) - kfd->device_info.needs_iommu_device = true; - if (gc_version < IP_VERSION(11, 0, 0)) { /* Navi2x+, Navi1x+ */ if (gc_version == IP_VERSION(10, 3, 6)) @@ -233,10 +228,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd, asic_type != CHIP_TONGA) kfd->device_info.supports_cwsr = true; - if (asic_type == CHIP_KAVERI || - asic_type == CHIP_CARRIZO) - kfd->device_info.needs_iommu_device = true; - if (asic_type != CHIP_HAWAII && !vf) kfd->device_info.needs_pci_atomics = true; } @@ -249,7 +240,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) uint32_t gfx_target_version = 0; switch (adev->asic_type) { -#ifdef KFD_SUPPORT_IOMMU_V2 #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: gfx_target_version = 70000; @@ -262,7 +252,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) if (!vf) f2g = &gfx_v8_kfd2kgd; break; -#endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_HAWAII: gfx_target_version = 70001; @@ -298,7 +287,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) gfx_target_version = 90000; f2g = &gfx_v9_kfd2kgd; break; -#ifdef KFD_SUPPORT_IOMMU_V2 /* Raven */ case IP_VERSION(9, 1, 0): case IP_VERSION(9, 2, 2): @@ -306,7 +294,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) if (!vf) f2g = &gfx_v9_kfd2kgd; break; -#endif /* Vega12 */ case IP_VERSION(9, 2, 1): gfx_target_version = 90004; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2df153828ff4..01192f5abe46 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2538,18 +2538,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) } switch (dev->adev->asic_type) { - case CHIP_CARRIZO: - device_queue_manager_init_vi(&dqm->asic_ops); - break; - case CHIP_KAVERI: - device_queue_manager_init_cik(&dqm->asic_ops); - break; - case CHIP_HAWAII: device_queue_manager_init_cik_hawaii(&dqm->asic_ops); break; + case CHIP_CARRIZO: case CHIP_TONGA: case CHIP_FIJI: case CHIP_POLARIS10: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 61fc62f3e003..4a17bb7c7b27 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1965,7 +1965,14 @@ int kfd_topology_add_device(struct kfd_node *gpu) const char *asic_name = amdgpu_asic_name[gpu->adev->asic_type]; gpu_id = kfd_generate_gpu_id(gpu); - pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id); + if (gpu->xcp && !gpu->xcp->ddev) { + dev_warn(gpu->adev->dev, + "Won't add GPU (ID: 0x%x) to topology since it has no drm node assigned.", + gpu_id); + return 0; + } else { + pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id); + } /* Check to see if this gpu device exists in the topology_device_list. * If so, assign the gpu to that device, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0fa739fd6a9c..e5554a36e8c8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1638,9 +1638,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } break; } - if (init_data.flags.gpu_vm_support && - (amdgpu_sg_display == 0)) - init_data.flags.gpu_vm_support = false; + if (init_data.flags.gpu_vm_support) + init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev); if (init_data.flags.gpu_vm_support) adev->mode_info.gpu_vm_support = true; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 9bc86deac9e8..b885c39bd16b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -1320,7 +1320,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (computed_streams[i]) continue; - if (!res_pool->funcs->remove_stream_from_ctx || + if (res_pool->funcs->remove_stream_from_ctx && res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) return -EINVAL; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 20d4d08a6a2f..6966420dfbac 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -777,7 +777,8 @@ void dce110_edp_wait_for_hpd_ready( dal_gpio_destroy_irq(&hpd); /* ensure that the panel is detected */ - ASSERT(edp_hpd_high); + if (!edp_hpd_high) + DC_LOG_DC("%s: wait timed out!\n", __func__); } void dce110_edp_power_control( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 4cc8de2627ce..9f2e24398cd7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = { .timing_trace = false, .clock_trace = true, .disable_pplib_clock_request = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index e5b7ef7422b8..50dc83404644 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes( int cur_rom_en = 0; if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || - color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) - cur_rom_en = 1; + color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } + } REG_UPDATE_3(CURSOR0_CONTROL, CUR0_MODE, color_format, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index ce41a8309582..222af2fae745 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1581,9 +1581,9 @@ static int smu_disable_dpms(struct smu_context *smu) /* * For SMU 13.0.4/11, PMFW will handle the features disablement properly - * for gpu reset case. Driver involvement is unnecessary. + * for gpu reset and S0i3 cases. Driver involvement is unnecessary. */ - if (amdgpu_in_reset(adev)) { + if (amdgpu_in_reset(adev) || adev->in_s0ix) { switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 11): diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 0cda3b276f61..f0800c0c5168 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -588,7 +588,9 @@ err0_out: return -ENOMEM; } -static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu) +static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu, + bool use_metrics_v3, + bool use_metrics_v2) { struct smu_table_context *smu_table= &smu->smu_table; SmuMetricsExternal_t *metrics_ext = @@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s uint32_t throttler_status = 0; int i; - if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && - (smu->smc_fw_version >= 0x3A4900)) { + if (use_metrics_v3) { for (i = 0; i < THROTTLER_COUNT; i++) throttler_status |= (metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0); - } else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && - (smu->smc_fw_version >= 0x3A4300)) { + } else if (use_metrics_v2) { for (i = 0; i < THROTTLER_COUNT; i++) throttler_status |= (metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0); @@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_THROTTLER_STATUS: - *value = sienna_cichlid_get_throttler_status_locked(smu); + *value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2); break; case METRICS_CURR_FANSPEED: *value = use_metrics_v3 ? metrics_v3->CurrFanSpeed : @@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] : use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1]; - gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu); + gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2); gpu_metrics->indep_throttle_status = smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, sienna_cichlid_throttler_map); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 3d188616ba24..0fb6be11a0cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -332,10 +332,13 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) table_context->power_play_table; struct smu_baco_context *smu_baco = &smu->smu_baco; PPTable_t *pptable = smu->smu_table.driver_pptable; +#if 0 + PPTable_t *pptable = smu->smu_table.driver_pptable; const OverDriveLimits_t * const overdrive_upperlimits = &pptable->SkuTable.OverDriveLimitsBasicMax; const OverDriveLimits_t * const overdrive_lowerlimits = &pptable->SkuTable.OverDriveLimitsMin; +#endif if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; @@ -347,18 +350,30 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu) if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO) smu_baco->maco_support = true; + /* + * We are in the transition to a new OD mechanism. + * Disable the OD feature support for SMU13 temporarily. + * TODO: get this reverted when new OD mechanism online + */ +#if 0 if (!overdrive_lowerlimits->FeatureCtrlMask || !overdrive_upperlimits->FeatureCtrlMask) smu->od_enabled = false; - table_context->thermal_controller_type = - powerplay_table->thermal_controller_type; - /* * Instead of having its own buffer space and get overdrive_table copied, * smu->od_settings just points to the actual overdrive_table */ smu->od_settings = &powerplay_table->overdrive_table; +#else + smu->od_enabled = false; +#endif + + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; + + smu->adev->pm.no_fan = + !(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT)); return 0; } @@ -1140,7 +1155,6 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; - const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; uint32_t gen_speed, lane_width; int i, curr_freq, size = 0; int32_t min_value, max_value; @@ -1256,7 +1270,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, (pcie_table->pcie_lane[i] == 6) ? "x16" : "", pcie_table->clk_freq[i], (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && - (lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ? + (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 1ac552142763..dc6104a04dce 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -81,9 +81,10 @@ #define EPSILON 1 #define smnPCIE_ESM_CTRL 0x193D0 -#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1ab40288 +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288 #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L #define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 +#define MAX_LINK_WIDTH 6 static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), @@ -708,16 +709,19 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu, *value = SMUQ10_TO_UINT(metrics->SocketPower) << 8; break; case METRICS_TEMPERATURE_HOTSPOT: - *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature); + *value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; case METRICS_TEMPERATURE_MEM: - *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature); + *value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; /* This is the max of all VRs and not just SOC VR. * No need to define another data type for the same. */ case METRICS_TEMPERATURE_VRSOC: - *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature); + *value = SMUQ10_TO_UINT(metrics->MaxVrTemperature) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; break; default: *value = UINT_MAX; @@ -1966,6 +1970,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table struct amdgpu_device *adev = smu->adev; int ret = 0, inst0, xcc0; MetricsTable_t *metrics; + u16 link_width_level; inst0 = adev->sdma.instance[0].aid_id; xcc0 = GET_INST(GC, 0); @@ -1993,9 +1998,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->average_socket_power = SMUQ10_TO_UINT(metrics->SocketPower); - /* Energy is reported in 15.625mJ units */ - gpu_metrics->energy_accumulator = - SMUQ10_TO_UINT(metrics->SocketEnergyAcc); + /* Energy counter reported in 15.259uJ (2^-16) units */ + gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc; gpu_metrics->current_gfxclk = SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]); @@ -2017,8 +2021,12 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->throttle_status = 0; if (!(adev->flags & AMD_IS_APU)) { + link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); + if (link_width_level > MAX_LINK_WIDTH) + link_width_level = 0; + gpu_metrics->pcie_link_width = - smu_v13_0_6_get_current_pcie_link_width_level(smu); + DECODE_LANE_WIDTH(link_width_level); gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index b1f0937ccade..62f2886ab4df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -323,10 +323,12 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) struct smu_baco_context *smu_baco = &smu->smu_baco; PPTable_t *smc_pptable = table_context->driver_pptable; BoardTable_t *BoardTable = &smc_pptable->BoardTable; +#if 0 const OverDriveLimits_t * const overdrive_upperlimits = &smc_pptable->SkuTable.OverDriveLimitsBasicMax; const OverDriveLimits_t * const overdrive_lowerlimits = &smc_pptable->SkuTable.OverDriveLimitsMin; +#endif if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) smu->dc_controlled_by_gpio = true; @@ -338,18 +340,22 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) smu_baco->maco_support = true; +#if 0 if (!overdrive_lowerlimits->FeatureCtrlMask || !overdrive_upperlimits->FeatureCtrlMask) smu->od_enabled = false; - table_context->thermal_controller_type = - powerplay_table->thermal_controller_type; - /* * Instead of having its own buffer space and get overdrive_table copied, * smu->od_settings just points to the actual overdrive_table */ smu->od_settings = &powerplay_table->overdrive_table; +#else + smu->od_enabled = false; +#endif + + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; return 0; } diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 504d51c42f79..aadb396508c5 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -2517,9 +2517,11 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data) }; int int_status[3], i; - if (it6505->enable_drv_hold || pm_runtime_get_if_in_use(dev) <= 0) + if (it6505->enable_drv_hold || !it6505->powered) return IRQ_HANDLED; + pm_runtime_get_sync(dev); + int_status[0] = it6505_read(it6505, INT_STATUS_01); int_status[1] = it6505_read(it6505, INT_STATUS_02); int_status[2] = it6505_read(it6505, INT_STATUS_03); diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 5163e5224aad..9663601ce098 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -774,9 +774,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | - MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO_NO_HSA | - MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP | - MIPI_DSI_MODE_NO_EOT_PACKET; + MIPI_DSI_MODE_VIDEO_HSE; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index e0dbd9140726..1f470968ed14 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3456,6 +3456,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto connector->base.id, connector->name); return NULL; } + if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) { + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n", + connector->base.id, connector->name); + } /* it is incorrect if hsync/vsync width is zero */ if (!hsync_pulse_width || !vsync_pulse_width) { @@ -3502,27 +3506,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) { mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC; } else { - switch (pt->misc & DRM_EDID_PT_SYNC_MASK) { - case DRM_EDID_PT_ANALOG_CSYNC: - case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC: - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n", - connector->base.id, connector->name); - mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC; - break; - case DRM_EDID_PT_DIGITAL_CSYNC: - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n", - connector->base.id, connector->name); - mode->flags |= DRM_MODE_FLAG_CSYNC; - mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? - DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC; - break; - case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC: - mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? - DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; - mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? - DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; - break; - } + mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ? + DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; + mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ? + DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; } set_size: diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 4ea6507a77e5..baaf0e0feb06 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -623,7 +623,13 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct int ret; if (obj->import_attach) { + /* Reset both vm_ops and vm_private_data, so we don't end up with + * vm_ops pointing to our implementation if the dma-buf backend + * doesn't set those fields. + */ vma->vm_private_data = NULL; + vma->vm_ops = NULL; + ret = dma_buf_mmap(obj->dma_buf, vma, 0); /* Drop the reference drm_gem_mmap_obj() acquired.*/ diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index f0ee9bcf661d..b0c6a2a86f2f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -662,10 +662,24 @@ static const struct intel_display_device_info xe_lpdp_display = { BIT(TRANSCODER_C) | BIT(TRANSCODER_D), }; +/* + * Separate detection for no display cases to keep the display id array simple. + * + * IVB Q requires subvendor and subdevice matching to differentiate from IVB D + * GT2 server. + */ +static bool has_no_display(struct pci_dev *pdev) +{ + static const struct pci_device_id ids[] = { + INTEL_IVB_Q_IDS(0), + {} + }; + + return pci_match_id(ids, pdev); +} + #undef INTEL_VGA_DEVICE -#undef INTEL_QUANTA_VGA_DEVICE #define INTEL_VGA_DEVICE(id, info) { id, info } -#define INTEL_QUANTA_VGA_DEVICE(info) { 0x16a, info } static const struct { u32 devid; @@ -690,7 +704,6 @@ static const struct { INTEL_IRONLAKE_M_IDS(&ilk_m_display), INTEL_SNB_D_IDS(&snb_display), INTEL_SNB_M_IDS(&snb_display), - INTEL_IVB_Q_IDS(NULL), /* must be first IVB in list */ INTEL_IVB_M_IDS(&ivb_display), INTEL_IVB_D_IDS(&ivb_display), INTEL_HSW_IDS(&hsw_display), @@ -775,6 +788,11 @@ intel_display_device_probe(struct drm_i915_private *i915, bool has_gmdid, if (has_gmdid) return probe_gmdid_display(i915, gmdid_ver, gmdid_rel, gmdid_step); + if (has_no_display(pdev)) { + drm_dbg_kms(&i915->drm, "Device doesn't have display\n"); + return &no_display; + } + for (i = 0; i < ARRAY_SIZE(intel_display_ids); i++) { if (intel_display_ids[i].devid == pdev->device) return intel_display_ids[i].info; diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 21f92123c844..67e3aaf9b432 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2752,7 +2752,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void) __drm_atomic_helper_connector_reset(&sdvo_connector->base.base, &conn_state->base.base); - INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes); + intel_panel_init_alloc(&sdvo_connector->base); return sdvo_connector; } diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 23857cc08eca..2702ad4c26c8 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -165,14 +165,60 @@ static u32 preparser_disable(bool state) return MI_ARB_CHECK | 1 << 8 | state; } -u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg) +static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine) { - u32 gsi_offset = gt->uncore->gsi_offset; + switch (engine->id) { + case RCS0: + return GEN12_CCS_AUX_INV; + case BCS0: + return GEN12_BCS0_AUX_INV; + case VCS0: + return GEN12_VD0_AUX_INV; + case VCS2: + return GEN12_VD2_AUX_INV; + case VECS0: + return GEN12_VE0_AUX_INV; + case CCS0: + return GEN12_CCS0_AUX_INV; + default: + return INVALID_MMIO_REG; + } +} + +static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine) +{ + i915_reg_t reg = gen12_get_aux_inv_reg(engine); + + if (IS_PONTEVECCHIO(engine->i915)) + return false; + + /* + * So far platforms supported by i915 having flat ccs do not require + * AUX invalidation. Check also whether the engine requires it. + */ + return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915); +} + +u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs) +{ + i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine); + u32 gsi_offset = engine->gt->uncore->gsi_offset; + + if (!gen12_needs_ccs_aux_inv(engine)) + return cs; *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; *cs++ = AUX_INV; - *cs++ = MI_NOOP; + + *cs++ = MI_SEMAPHORE_WAIT_TOKEN | + MI_SEMAPHORE_REGISTER_POLL | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; + *cs++ = 0; + *cs++ = 0; return cs; } @@ -202,8 +248,13 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) { struct intel_engine_cs *engine = rq->engine; - if (mode & EMIT_FLUSH) { - u32 flags = 0; + /* + * On Aux CCS platforms the invalidation of the Aux + * table requires quiescing memory traffic beforehand + */ + if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) { + u32 bit_group_0 = 0; + u32 bit_group_1 = 0; int err; u32 *cs; @@ -211,32 +262,40 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (err) return err; - flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; - flags |= PIPE_CONTROL_FLUSH_L3; - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH; + + /* + * When required, in MTL and beyond platforms we + * need to set the CCS_FLUSH bit in the pipe control + */ + if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70)) + bit_group_0 |= PIPE_CONTROL_CCS_FLUSH; + + bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH; + bit_group_1 |= PIPE_CONTROL_FLUSH_L3; + bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; /* Wa_1409600907:tgl,adl-p */ - flags |= PIPE_CONTROL_DEPTH_STALL; - flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; - flags |= PIPE_CONTROL_FLUSH_ENABLE; + bit_group_1 |= PIPE_CONTROL_DEPTH_STALL; + bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE; + bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE; - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - flags |= PIPE_CONTROL_QW_WRITE; + bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX; + bit_group_1 |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_CS_STALL; + bit_group_1 |= PIPE_CONTROL_CS_STALL; if (!HAS_3D_PIPELINE(engine->i915)) - flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS; + bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS; else if (engine->class == COMPUTE_CLASS) - flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; + bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) return PTR_ERR(cs); - cs = gen12_emit_pipe_control(cs, - PIPE_CONTROL0_HDC_PIPELINE_FLUSH, - flags, LRC_PPHWSP_SCRATCH_ADDR); + cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1, + LRC_PPHWSP_SCRATCH_ADDR); intel_ring_advance(rq, cs); } @@ -267,10 +326,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) else if (engine->class == COMPUTE_CLASS) flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; - if (!HAS_FLAT_CCS(rq->engine->i915)) - count = 8 + 4; - else - count = 8; + count = 8; + if (gen12_needs_ccs_aux_inv(rq->engine)) + count += 8; cs = intel_ring_begin(rq, count); if (IS_ERR(cs)) @@ -285,11 +343,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - if (!HAS_FLAT_CCS(rq->engine->i915)) { - /* hsdes: 1809175790 */ - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_GFX_CCS_AUX_NV); - } + cs = gen12_emit_aux_table_inv(engine, cs); *cs++ = preparser_disable(false); intel_ring_advance(rq, cs); @@ -300,21 +354,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) { - intel_engine_mask_t aux_inv = 0; - u32 cmd, *cs; + u32 cmd = 4; + u32 *cs; - cmd = 4; if (mode & EMIT_INVALIDATE) { cmd += 2; - if (!HAS_FLAT_CCS(rq->engine->i915) && - (rq->engine->class == VIDEO_DECODE_CLASS || - rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) { - aux_inv = rq->engine->mask & - ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0); - if (aux_inv) - cmd += 4; - } + if (gen12_needs_ccs_aux_inv(rq->engine)) + cmd += 8; } cs = intel_ring_begin(rq, cmd); @@ -338,6 +385,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) cmd |= MI_INVALIDATE_TLB; if (rq->engine->class == VIDEO_DECODE_CLASS) cmd |= MI_INVALIDATE_BSD; + + if (gen12_needs_ccs_aux_inv(rq->engine) && + rq->engine->class == COPY_ENGINE_CLASS) + cmd |= MI_FLUSH_DW_CCS; } *cs++ = cmd; @@ -345,14 +396,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) *cs++ = 0; /* upper addr */ *cs++ = 0; /* value */ - if (aux_inv) { /* hsdes: 1809175790 */ - if (rq->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VD0_AUX_NV); - else - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VE0_AUX_NV); - } + cs = gen12_emit_aux_table_inv(rq->engine, cs); if (mode & EMIT_INVALIDATE) *cs++ = preparser_disable(false); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h index 655e5c00ddc2..867ba697aceb 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h @@ -13,6 +13,7 @@ #include "intel_gt_regs.h" #include "intel_gpu_commands.h" +struct intel_engine_cs; struct intel_gt; struct i915_request; @@ -46,28 +47,32 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); -u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg); +u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs); static inline u32 * -__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) +__gen8_emit_pipe_control(u32 *batch, u32 bit_group_0, + u32 bit_group_1, u32 offset) { memset(batch, 0, 6 * sizeof(u32)); - batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0; - batch[1] = flags1; + batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; + batch[1] = bit_group_1; batch[2] = offset; return batch + 6; } -static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) +static inline u32 *gen8_emit_pipe_control(u32 *batch, + u32 bit_group_1, u32 offset) { - return __gen8_emit_pipe_control(batch, 0, flags, offset); + return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset); } -static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) +static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0, + u32 bit_group_1, u32 offset) { - return __gen8_emit_pipe_control(batch, flags0, flags1, offset); + return __gen8_emit_pipe_control(batch, bit_group_0, + bit_group_1, offset); } static inline u32 * diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 5d143e2a8db0..2bd8d98d2110 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -121,6 +121,7 @@ #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ #define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */ +#define MI_SEMAPHORE_REGISTER_POLL (1 << 16) #define MI_SEMAPHORE_POLL (1 << 15) #define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) #define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) @@ -299,6 +300,7 @@ #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) +#define PIPE_CONTROL_CCS_FLUSH (1<<13) /* MTL+ */ #define PIPE_CONTROL_WRITE_FLUSH (1<<12) #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index 718cb2c80f79..2cdfb2f713d0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -332,9 +332,11 @@ #define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) #define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4) #define BSD_HWS_PGA_GEN7 _MMIO(0x4180) -#define GEN12_GFX_CCS_AUX_NV _MMIO(0x4208) -#define GEN12_VD0_AUX_NV _MMIO(0x4218) -#define GEN12_VD1_AUX_NV _MMIO(0x4228) + +#define GEN12_CCS_AUX_INV _MMIO(0x4208) +#define GEN12_VD0_AUX_INV _MMIO(0x4218) +#define GEN12_VE0_AUX_INV _MMIO(0x4238) +#define GEN12_BCS0_AUX_INV _MMIO(0x4248) #define GEN8_RTCR _MMIO(0x4260) #define GEN8_M1TCR _MMIO(0x4264) @@ -342,14 +344,12 @@ #define GEN8_BTCR _MMIO(0x426c) #define GEN8_VTCR _MMIO(0x4270) -#define GEN12_VD2_AUX_NV _MMIO(0x4298) -#define GEN12_VD3_AUX_NV _MMIO(0x42a8) -#define GEN12_VE0_AUX_NV _MMIO(0x4238) - #define BLT_HWS_PGA_GEN7 _MMIO(0x4280) -#define GEN12_VE1_AUX_NV _MMIO(0x42b8) +#define GEN12_VD2_AUX_INV _MMIO(0x4298) +#define GEN12_CCS0_AUX_INV _MMIO(0x42c8) #define AUX_INV REG_BIT(0) + #define VEBOX_HWS_PGA_GEN7 _MMIO(0x4380) #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a4ec20aaafe2..9477c2422321 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1364,10 +1364,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) IS_DG2_G11(ce->engine->i915)) cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - /* hsdes: 1809175790 */ - if (!HAS_FLAT_CCS(ce->engine->i915)) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_GFX_CCS_AUX_NV); + cs = gen12_emit_aux_table_inv(ce->engine, cs); /* Wa_16014892111 */ if (IS_MTL_GRAPHICS_STEP(ce->engine->i915, M, STEP_A0, STEP_B0) || @@ -1392,17 +1389,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - /* hsdes: 1809175790 */ - if (!HAS_FLAT_CCS(ce->engine->i915)) { - if (ce->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VD0_AUX_NV); - else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VE0_AUX_NV); - } - - return cs; + return gen12_emit_aux_table_inv(ce->engine, cs); } static void diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index ee9f83af7cf6..477df260ae3a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -470,12 +470,19 @@ int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val) ret = slpc_set_param(slpc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, val); - if (ret) + if (ret) { guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n", val, ERR_PTR(ret)); - else + } else { slpc->ignore_eff_freq = val; + /* Set min to RPn when we disable efficient freq */ + if (val) + ret = slpc_set_param(slpc, + SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, + slpc->min_freq); + } + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&slpc->lock); return ret; @@ -602,9 +609,8 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc) return ret; if (!slpc->min_freq_softlimit) { - ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit); - if (unlikely(ret)) - return ret; + /* Min softlimit is initialized to RPn */ + slpc->min_freq_softlimit = slpc->min_freq; slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit; } else { return intel_guc_slpc_set_min_freq(slpc, @@ -755,6 +761,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) return ret; } + /* Set cached value of ignore efficient freq */ + intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq); + /* Revert SLPC min/max to softlimits if necessary */ ret = slpc_set_softlimits(slpc); if (unlikely(ret)) { @@ -765,9 +774,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) /* Set cached media freq ratio mode */ intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode); - /* Set cached value of ignore efficient freq */ - intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq); - return 0; } diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 2a0438f12a14..af9afdb53c7f 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -491,7 +491,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, return; } - msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, reg); + msg_length = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, value); // check the msg in DATA register. msg = vgpu_vreg(vgpu, offset + 4); diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 8ef93889061a..5ec293011d99 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -449,8 +449,11 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) } } while (unlikely(is_barrier(active))); - if (!__i915_active_fence_set(active, fence)) + fence = __i915_active_fence_set(active, fence); + if (!fence) __i915_active_acquire(ref); + else + dma_fence_put(fence); out: i915_active_release(ref); @@ -469,13 +472,9 @@ __i915_active_set_fence(struct i915_active *ref, return NULL; } - rcu_read_lock(); prev = __i915_active_fence_set(active, fence); - if (prev) - prev = dma_fence_get_rcu(prev); - else + if (!prev) __i915_active_acquire(ref); - rcu_read_unlock(); return prev; } @@ -1019,10 +1018,11 @@ void i915_request_add_active_barriers(struct i915_request *rq) * * Records the new @fence as the last active fence along its timeline in * this active tracker, moving the tracking callbacks from the previous - * fence onto this one. Returns the previous fence (if not already completed), - * which the caller must ensure is executed before the new fence. To ensure - * that the order of fences within the timeline of the i915_active_fence is - * understood, it should be locked by the caller. + * fence onto this one. Gets and returns a reference to the previous fence + * (if not already completed), which the caller must put after making sure + * that it is executed before the new fence. To ensure that the order of + * fences within the timeline of the i915_active_fence is understood, it + * should be locked by the caller. */ struct dma_fence * __i915_active_fence_set(struct i915_active_fence *active, @@ -1031,7 +1031,23 @@ __i915_active_fence_set(struct i915_active_fence *active, struct dma_fence *prev; unsigned long flags; - if (fence == rcu_access_pointer(active->fence)) + /* + * In case of fences embedded in i915_requests, their memory is + * SLAB_FAILSAFE_BY_RCU, then it can be reused right after release + * by new requests. Then, there is a risk of passing back a pointer + * to a new, completely unrelated fence that reuses the same memory + * while tracked under a different active tracker. Combined with i915 + * perf open/close operations that build await dependencies between + * engine kernel context requests and user requests from different + * timelines, this can lead to dependency loops and infinite waits. + * + * As a countermeasure, we try to get a reference to the active->fence + * first, so if we succeed and pass it back to our user then it is not + * released and potentially reused by an unrelated request before the + * user has a chance to set up an await dependency on it. + */ + prev = i915_active_fence_get(active); + if (fence == prev) return fence; GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); @@ -1040,27 +1056,56 @@ __i915_active_fence_set(struct i915_active_fence *active, * Consider that we have two threads arriving (A and B), with * C already resident as the active->fence. * - * A does the xchg first, and so it sees C or NULL depending - * on the timing of the interrupt handler. If it is NULL, the - * previous fence must have been signaled and we know that - * we are first on the timeline. If it is still present, - * we acquire the lock on that fence and serialise with the interrupt - * handler, in the process removing it from any future interrupt - * callback. A will then wait on C before executing (if present). - * - * As B is second, it sees A as the previous fence and so waits for - * it to complete its transition and takes over the occupancy for - * itself -- remembering that it needs to wait on A before executing. + * Both A and B have got a reference to C or NULL, depending on the + * timing of the interrupt handler. Let's assume that if A has got C + * then it has locked C first (before B). * * Note the strong ordering of the timeline also provides consistent * nesting rules for the fence->lock; the inner lock is always the * older lock. */ spin_lock_irqsave(fence->lock, flags); - prev = xchg(__active_fence_slot(active), fence); - if (prev) { - GEM_BUG_ON(prev == fence); + if (prev) spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + + /* + * A does the cmpxchg first, and so it sees C or NULL, as before, or + * something else, depending on the timing of other threads and/or + * interrupt handler. If not the same as before then A unlocks C if + * applicable and retries, starting from an attempt to get a new + * active->fence. Meanwhile, B follows the same path as A. + * Once A succeeds with cmpxch, B fails again, retires, gets A from + * active->fence, locks it as soon as A completes, and possibly + * succeeds with cmpxchg. + */ + while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) { + if (prev) { + spin_unlock(prev->lock); + dma_fence_put(prev); + } + spin_unlock_irqrestore(fence->lock, flags); + + prev = i915_active_fence_get(active); + GEM_BUG_ON(prev == fence); + + spin_lock_irqsave(fence->lock, flags); + if (prev) + spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + } + + /* + * If prev is NULL then the previous fence must have been signaled + * and we know that we are first on the timeline. If it is still + * present then, having the lock on that fence already acquired, we + * serialise with the interrupt handler, in the process of removing it + * from any future interrupt callback. A will then wait on C before + * executing (if present). + * + * As B is second, it sees A as the previous fence and so waits for + * it to complete its transition and takes over the occupancy for + * itself -- remembering that it needs to wait on A before executing. + */ + if (prev) { __list_del_entry(&active->cb.node); spin_unlock(prev->lock); /* serialise with prev->cb_list */ } @@ -1077,11 +1122,7 @@ int i915_active_fence_set(struct i915_active_fence *active, int err = 0; /* Must maintain timeline ordering wrt previous active requests */ - rcu_read_lock(); fence = __i915_active_fence_set(active, &rq->fence); - if (fence) /* but the previous fence may not belong to that timeline! */ - fence = dma_fence_get_rcu(fence); - rcu_read_unlock(); if (fence) { err = i915_request_await_dma_fence(rq, fence); dma_fence_put(fence); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 894068bb37b6..833b73edefdb 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1661,6 +1661,11 @@ __i915_request_ensure_parallel_ordering(struct i915_request *rq, request_to_parent(rq)->parallel.last_rq = i915_request_get(rq); + /* + * Users have to put a reference potentially got by + * __i915_active_fence_set() to the returned request + * when no longer needed + */ return to_request(__i915_active_fence_set(&timeline->last_request, &rq->fence)); } @@ -1707,6 +1712,10 @@ __i915_request_ensure_ordering(struct i915_request *rq, 0); } + /* + * Users have to put the reference to prev potentially got + * by __i915_active_fence_set() when no longer needed + */ return prev; } @@ -1760,6 +1769,8 @@ __i915_request_add_to_timeline(struct i915_request *rq) prev = __i915_request_ensure_ordering(rq, timeline); else prev = __i915_request_ensure_parallel_ordering(rq, timeline); + if (prev) + i915_request_put(prev); /* * Make sure that no request gazumped us - if it was allocated after diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c index 5f26090b0c98..89585b31b985 100644 --- a/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c @@ -310,7 +310,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n", sig_cfg.mode.hactive, new_hactive); - sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive; + sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive; sig_cfg.mode.hactive = new_hactive; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index f75c6f09dd2a..622f6eb9a8bf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -967,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) /* Determine display colour depth for everything except LVDS now, * DP requires this before mode_valid() is called. */ - if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) nouveau_connector_detect_depth(connector); /* Find the native mode if this is a digital panel, if we didn't @@ -1408,8 +1408,7 @@ nouveau_connector_create(struct drm_device *dev, ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index, &nv_connector->conn); if (ret) { - kfree(nv_connector); - return ERR_PTR(ret); + goto drm_conn_err; } ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsHotplug", @@ -1426,8 +1425,7 @@ nouveau_connector_create(struct drm_device *dev, if (ret) { nvif_event_dtor(&nv_connector->hpd); nvif_conn_dtor(&nv_connector->conn); - kfree(nv_connector); - return ERR_PTR(ret); + goto drm_conn_err; } } } @@ -1475,4 +1473,9 @@ nouveau_connector_create(struct drm_device *dev, drm_connector_register(connector); return connector; + +drm_conn_err: + drm_connector_cleanup(connector); + kfree(nv_connector); + return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c index 40c8ea43c42f..b8ac66b4a2c4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c @@ -26,6 +26,8 @@ #include "head.h" #include "ior.h" +#include <drm/display/drm_dp.h> + #include <subdev/bios.h> #include <subdev/bios/init.h> #include <subdev/gpio.h> @@ -634,6 +636,50 @@ nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp) return outp->dp.rates != 0; } +/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps() + * converted to work inside nvkm. This is a temporary holdover until we start + * passing the drm_dp_aux device through NVKM + */ +static int +nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp) +{ + struct nvkm_i2c_aux *aux = outp->dp.aux; + u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; + int ret; + + ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE); + if (ret < 0) + return ret; + + /* + * Prior to DP1.3 the bit represented by + * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. + * If it is set DP_DPCD_REV at 0000h could be at a value less than + * the true capability of the panel. The only way to check is to + * then compare 0000h and 2200h. + */ + if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) + return 0; + + ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext)); + if (ret < 0) + return ret; + + if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { + OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n", + outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]); + return 0; + } + + if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext))) + return 0; + + memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)); + + return 0; +} + void nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr) { @@ -689,7 +735,7 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr) memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr)); } - if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, sizeof(outp->dp.dpcd))) { + if (!nvkm_dp_read_dpcd_caps(outp)) { const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 }; const u8 *rate; int rate_max; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index 00dbeda7e346..de161e7a04aa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -117,6 +117,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *); extern const struct gf100_grctx_func gk110_grctx; void gk110_grctx_generate_r419eb0(struct gf100_gr *); +void gk110_grctx_generate_r419f78(struct gf100_gr *); extern const struct gf100_grctx_func gk110b_grctx; extern const struct gf100_grctx_func gk208_grctx; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index 94233d0119df..52a234b1ef01 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -906,7 +906,9 @@ static void gk104_grctx_generate_r419f78(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; - nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000); + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c index 4391458e1fb2..3acdd9eeb74a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c @@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr) nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000); } +void +gk110_grctx_generate_r419f78(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000); +} + const struct gf100_grctx_func gk110_grctx = { .main = gf100_grctx_generate_main, @@ -854,4 +863,5 @@ gk110_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c index 7b9a34f9ec3c..5597e87624ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c @@ -103,4 +103,5 @@ gk110b_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c index c78d07a8bb7d..612656496541 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c @@ -568,4 +568,5 @@ gk208_grctx = { .dist_skip_table = gf117_grctx_generate_dist_skip_table, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index beac66eb2a80..9906974ac3f0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c @@ -988,4 +988,5 @@ gm107_grctx = { .r406500 = gm107_grctx_generate_r406500, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r419e00 = gm107_grctx_generate_r419e00, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c index 3b6c8100a242..a7775aa18541 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c @@ -206,19 +206,6 @@ tu102_gr_av_to_init_veid(struct nvkm_blob *blob, struct gf100_gr_pack **ppack) return gk20a_gr_av_to_init_(blob, 64, 0x00100000, ppack); } -int -tu102_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif) -{ - int ret; - - ret = gm200_gr_load(gr, ver, fwif); - if (ret) - return ret; - - return gk20a_gr_load_net(gr, "gr/", "sw_veid_bundle_init", ver, tu102_gr_av_to_init_veid, - &gr->bundle_veid); -} - static const struct gf100_gr_fwif tu102_gr_fwif[] = { { 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr }, diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c index 8f4f137a2af6..213008499caa 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c @@ -404,38 +404,30 @@ static int jdi_panel_add(struct jdi_panel *jdi) ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(jdi->supplies), jdi->supplies); - if (ret < 0) { - dev_err(dev, "failed to init regulator, ret=%d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, + "failed to init regulator, ret=%d\n", ret); jdi->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(jdi->enable_gpio)) { - ret = PTR_ERR(jdi->enable_gpio); - dev_err(dev, "cannot get enable-gpio %d\n", ret); - return ret; + return dev_err_probe(dev, PTR_ERR(jdi->enable_gpio), + "cannot get enable-gpio %d\n", ret); } jdi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(jdi->reset_gpio)) { - ret = PTR_ERR(jdi->reset_gpio); - dev_err(dev, "cannot get reset-gpios %d\n", ret); - return ret; - } + if (IS_ERR(jdi->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(jdi->reset_gpio), + "cannot get reset-gpios %d\n", ret); jdi->dcdc_en_gpio = devm_gpiod_get(dev, "dcdc-en", GPIOD_OUT_LOW); - if (IS_ERR(jdi->dcdc_en_gpio)) { - ret = PTR_ERR(jdi->dcdc_en_gpio); - dev_err(dev, "cannot get dcdc-en-gpio %d\n", ret); - return ret; - } + if (IS_ERR(jdi->dcdc_en_gpio)) + return dev_err_probe(dev, PTR_ERR(jdi->dcdc_en_gpio), + "cannot get dcdc-en-gpio %d\n", ret); jdi->backlight = drm_panel_create_dsi_backlight(jdi->dsi); - if (IS_ERR(jdi->backlight)) { - ret = PTR_ERR(jdi->backlight); - dev_err(dev, "failed to register backlight %d\n", ret); - return ret; - } + if (IS_ERR(jdi->backlight)) + return dev_err_probe(dev, PTR_ERR(jdi->backlight), + "failed to register backlight %d\n", ret); drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c index 102e1fc7ee38..be4ec5bb5223 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c @@ -569,6 +569,7 @@ static const struct of_device_id s6d7aa0_of_match[] = { }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, s6d7aa0_of_match); static struct mipi_dsi_driver s6d7aa0_driver = { .probe = s6d7aa0_probe, diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index aaba36b3a674..b38d0e95cd54 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -999,21 +999,21 @@ static const struct panel_desc auo_g104sn02 = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; -static const struct drm_display_mode auo_g121ean01_mode = { - .clock = 66700, - .hdisplay = 1280, - .hsync_start = 1280 + 58, - .hsync_end = 1280 + 58 + 8, - .htotal = 1280 + 58 + 8 + 70, - .vdisplay = 800, - .vsync_start = 800 + 6, - .vsync_end = 800 + 6 + 4, - .vtotal = 800 + 6 + 4 + 10, +static const struct display_timing auo_g121ean01_timing = { + .pixelclock = { 60000000, 74400000, 90000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 20, 50, 100 }, + .hback_porch = { 20, 50, 100 }, + .hsync_len = { 30, 100, 200 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 2, 10, 25 }, + .vback_porch = { 2, 10, 25 }, + .vsync_len = { 4, 18, 50 }, }; static const struct panel_desc auo_g121ean01 = { - .modes = &auo_g121ean01_mode, - .num_modes = 1, + .timings = &auo_g121ean01_timing, + .num_timings = 1, .bpc = 8, .size = { .width = 261, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index ea993d7162e8..307a890fde13 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -310,7 +310,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle); void qxl_gem_object_free(struct drm_gem_object *gobj); int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index d636ba685451..17df5c7ccf69 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, { struct qxl_device *qdev = to_qxl(dev); struct qxl_bo *qobj; + struct drm_gem_object *gobj; uint32_t handle; int r; struct qxl_surface surf; @@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, r = qxl_gem_object_create_with_handle(qdev, file_priv, QXL_GEM_DOMAIN_CPU, - args->size, &surf, &qobj, + args->size, &surf, &gobj, &handle); if (r) return r; + qobj = gem_to_qxl_bo(gobj); qobj->is_dumb = true; + drm_gem_object_put(gobj); args->pitch = pitch; args->handle = handle; return 0; diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index a08da0bd9098..fc5e3763c359 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, return 0; } +/* + * If the caller passed a valid gobj pointer, it is responsible to call + * drm_gem_object_put() when it no longer needs to acess the object. + * + * If gobj is NULL, it is handled internally. + */ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, struct drm_file *file_priv, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle) { - struct drm_gem_object *gobj; int r; + struct drm_gem_object *local_gobj; - BUG_ON(!qobj); BUG_ON(!handle); r = qxl_gem_object_create(qdev, size, 0, domain, false, false, surf, - &gobj); + &local_gobj); if (r) return -ENOMEM; - r = drm_gem_handle_create(file_priv, gobj, handle); + r = drm_gem_handle_create(file_priv, local_gobj, handle); if (r) return r; - /* drop reference from allocate - handle holds it now */ - *qobj = gem_to_qxl_bo(gobj); - drm_gem_object_put(gobj); + + if (gobj) + *gobj = local_gobj; + else + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put(local_gobj); + return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 30f58b21372a..dd0f834d881c 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -38,7 +38,6 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc *qxl_alloc = data; int ret; - struct qxl_bo *qobj; uint32_t handle; u32 domain = QXL_GEM_DOMAIN_VRAM; @@ -50,7 +49,7 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr domain, qxl_alloc->size, NULL, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); @@ -386,7 +385,6 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc_surf *param = data; - struct qxl_bo *qobj; int handle; int ret; int size, actual_stride; @@ -406,7 +404,7 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi QXL_GEM_DOMAIN_SURFACE, size, &surf, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index a530ecc4d207..bf34498c1b6d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -833,12 +833,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane, * need align with 2 pixel. */ if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) { - DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); + DRM_DEBUG_KMS("Invalid Source: Yuv format not support odd xpos\n"); return -EINVAL; } if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) { - DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n"); + DRM_DEBUG_KMS("Invalid Source: Yuv format does not support this rotation\n"); return -EINVAL; } @@ -846,7 +846,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, struct vop *vop = to_vop(crtc); if (!vop->data->afbc) { - DRM_ERROR("vop does not support AFBC\n"); + DRM_DEBUG_KMS("vop does not support AFBC\n"); return -EINVAL; } @@ -855,15 +855,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane, return ret; if (new_plane_state->src.x1 || new_plane_state->src.y1) { - DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", - new_plane_state->src.x1, - new_plane_state->src.y1, fb->offsets[0]); + DRM_DEBUG_KMS("AFBC does not support offset display, " \ + "xpos=%d, ypos=%d, offset=%d\n", + new_plane_state->src.x1, new_plane_state->src.y1, + fb->offsets[0]); return -EINVAL; } if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) { - DRM_ERROR("No rotation support in AFBC, rotation=%d\n", - new_plane_state->rotation); + DRM_DEBUG_KMS("No rotation support in AFBC, rotation=%d\n", + new_plane_state->rotation); return -EINVAL; } } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7139a522b2f3..54e3083076b7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -519,7 +519,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, if (bo->pin_count) { *locked = false; - *busy = false; + if (busy) + *busy = false; return false; } |