diff options
Diffstat (limited to 'drivers/gpu')
192 files changed, 2398 insertions, 2156 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 12d61edb3597..ff7bf1a9f967 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1028,12 +1028,15 @@ struct amdgpu_gfx_config { }; struct amdgpu_cu_info { - uint32_t number; /* total active CU number */ - uint32_t ao_cu_mask; uint32_t max_waves_per_simd; uint32_t wave_front_size; uint32_t max_scratch_slots_per_cu; uint32_t lds_size; + + /* total active CU number */ + uint32_t number; + uint32_t ao_cu_mask; + uint32_t ao_cu_bitmap[4][4]; uint32_t bitmap[4][4]; }; @@ -1924,7 +1927,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_need_post(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); -int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 5f8ada1d872b..37971d9402e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -101,7 +101,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) if (adev->kfd) { struct kgd2kfd_shared_resources gpu_resources = { .compute_vmid_bitmap = 0xFF00, - .num_mec = adev->gfx.mec.num_mec, .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe }; @@ -122,7 +121,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) /* According to linux/bitmap.h we shouldn't use bitmap_clear if * nbits is not compile time constant */ - last_valid_bit = adev->gfx.mec.num_mec + last_valid_bit = 1 /* only first MEC can have compute queues */ * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1cf78f4dd339..1e8e1123ddf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", adev->clock.default_dispclk / 100); adev->clock.default_dispclk = 60000; + } else if (adev->clock.default_dispclk <= 60000) { + DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 62500; } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index f621ee115c98..5e771bc11b00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -198,12 +198,16 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id) result = idr_find(&fpriv->bo_list_handles, id); if (result) { - if (kref_get_unless_zero(&result->refcount)) + if (kref_get_unless_zero(&result->refcount)) { + rcu_read_unlock(); mutex_lock(&result->lock); - else + } else { + rcu_read_unlock(); result = NULL; + } + } else { + rcu_read_unlock(); } - rcu_read_unlock(); return result; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index c6dba1eaefbd..c0a806280257 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -838,6 +838,12 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, return -EINVAL; mode_info = info->mode_info; + if (mode_info) { + /* if the displays are off, vblank time is max */ + mode_info->vblank_time_us = 0xffffffff; + /* always set the reference clock */ + mode_info->ref_clock = adev->clock.spll.reference_freq; + } if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index aeee6840e82b..5599c01b265d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -64,7 +64,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, return 0; } -int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) +static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; @@ -497,7 +497,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, &e->user_invalidated) && e->user_pages) { /* We acquired a page array, but somebody - * invalidated it. Free it an try again + * invalidated it. Free it and try again */ release_pages(e->user_pages, e->robj->tbo.ttm->num_pages, @@ -1069,10 +1069,8 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) { int i; - for (i = 0; i < p->num_post_dep_syncobjs; ++i) { - drm_syncobj_replace_fence(p->filp, p->post_dep_syncobjs[i], - p->fence); - } + for (i = 0; i < p->num_post_dep_syncobjs; ++i) + drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); } static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b2c960b2ea82..4a8fc15467cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1162,16 +1162,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero return; if (state == VGA_SWITCHEROO_ON) { - unsigned d3_delay = dev->pdev->d3_delay; - pr_info("amdgpu: switched on\n"); /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; amdgpu_device_resume(dev, true, true); - dev->pdev->d3_delay = d3_delay; - dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); } else { @@ -3804,7 +3800,7 @@ int amdgpu_debugfs_init(struct drm_minor *minor) return 0; } #else -static int amdgpu_debugfs_test_ib_init(struct amdgpu_device *adev) +static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) { return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4c7c2628ace4..b59f37c83fa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -67,9 +67,10 @@ * - 3.15.0 - Export more gpu info for gfx9 * - 3.16.0 - Add reserved vmid support * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS. + * - 3.18.0 - Export gpu always on cu bitmap */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 17 +#define KMS_DRIVER_MINOR 18 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -247,14 +248,28 @@ MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = module_param_named(lbpw, amdgpu_lbpw, int, 0444); #ifdef CONFIG_DRM_AMDGPU_SI + +#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) int amdgpu_si_support = 0; MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))"); +#else +int amdgpu_si_support = 1; +MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)"); +#endif + module_param_named(si_support, amdgpu_si_support, int, 0444); #endif #ifdef CONFIG_DRM_AMDGPU_CIK + +#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) int amdgpu_cik_support = 0; MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))"); +#else +int amdgpu_cik_support = 1; +MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)"); +#endif + module_param_named(cik_support, amdgpu_cik_support, int, 0444); #endif @@ -475,6 +490,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, /* Vega 10 */ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 12497a40ef92..b0b23101d1c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -594,6 +594,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.cu_active_number = adev->gfx.cu_info.number; dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; dev_info.ce_ram_size = adev->gfx.ce_ram_size; + memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0], + sizeof(adev->gfx.cu_info.ao_cu_bitmap)); memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], sizeof(adev->gfx.cu_info.bitmap)); dev_info.vram_type = adev->mc.vram_type; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 38f739fb727b..6558a3ed57a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -359,7 +359,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) head = bo->mn_list.next; bo->mn = NULL; - list_del(&bo->mn_list); + list_del_init(&bo->mn_list); if (list_empty(head)) { struct amdgpu_mn_node *node; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 72c03c744594..b7e1c026c0c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -188,6 +188,9 @@ static int amdgpu_pp_hw_fini(void *handle) int ret = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->pp_enabled && adev->pm.dpm_enabled) + amdgpu_pm_sysfs_fini(adev); + if (adev->powerplay.ip_funcs->hw_fini) ret = adev->powerplay.ip_funcs->hw_fini( adev->powerplay.pp_handle); @@ -206,10 +209,9 @@ static void amdgpu_pp_late_fini(void *handle) adev->powerplay.ip_funcs->late_fini( adev->powerplay.pp_handle); - if (adev->pp_enabled && adev->pm.dpm_enabled) - amdgpu_pm_sysfs_fini(adev); - amd_powerplay_destroy(adev->powerplay.pp_handle); + if (adev->pp_enabled) + amd_powerplay_destroy(adev->powerplay.pp_handle); } static int amdgpu_pp_suspend(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c224c5caba5b..4083be61b328 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -152,8 +152,8 @@ static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd, uint64_t tmr_mc, uint32_t size) { cmd->cmd_id = GFX_CMD_ID_SETUP_TMR; - cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = (uint32_t)tmr_mc; - cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = (uint32_t)(tmr_mc >> 32); + cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); + cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); cmd->cmd.cmd_setup_tmr.buf_size = size; } @@ -333,14 +333,11 @@ static int psp_load_fw(struct amdgpu_device *adev) { int ret; struct psp_context *psp = &adev->psp; - struct psp_gfx_cmd_resp *cmd; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) + psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!psp->cmd) return -ENOMEM; - psp->cmd = cmd; - ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, AMDGPU_GEM_DOMAIN_GTT, &psp->fw_pri_bo, @@ -379,8 +376,6 @@ static int psp_load_fw(struct amdgpu_device *adev) if (ret) goto failed_mem; - kfree(cmd); - return 0; failed_mem: @@ -390,7 +385,8 @@ failed_mem1: amdgpu_bo_free_kernel(&psp->fw_pri_bo, &psp->fw_pri_mc_addr, &psp->fw_pri_buf); failed: - kfree(cmd); + kfree(psp->cmd); + psp->cmd = NULL; return ret; } @@ -450,6 +446,9 @@ static int psp_hw_fini(void *handle) amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); + kfree(psp->cmd); + psp->cmd = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 8c9bc75a9c2d..8a0818b23ea4 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -165,7 +165,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); @@ -178,7 +178,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) { int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h index 18fd01f3e4b2..003a131bad47 100644 --- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h @@ -1,24 +1,25 @@ - /* -*************************************************************************************************** -* -* Trade secret of Advanced Micro Devices, Inc. -* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished) -* -* All rights reserved. This notice is intended as a precaution against inadvertent publication and -* does not imply publication or any waiver of confidentiality. The year included in the foregoing -* notice is the year of creation of the work. -* -*************************************************************************************************** -*/ -/** -*************************************************************************************************** -* @brief gfx9 Clearstate Definitions -*************************************************************************************************** -* -* Do not edit! This is a machine-generated file! -* -*/ + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ static const unsigned int gfx9_SECT_CONTEXT_def_1[] = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 3c62c45f43a1..9f78c03a2e31 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index c8ed0facddcd..4bcf01dc567a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 786b5d02f44e..fd134a4629d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -991,8 +991,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, fixed20_12 a, b, c; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); priority_a_cnt = 0; priority_b_cnt = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 3e90c19b9c7f..a9e869554627 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 7b0b3cf16334..5173ca1fd159 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -3535,7 +3535,9 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) mask <<= 1; } active_cu_number += counter; - ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + if (i < 2 && j < 2) + ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index ec754288f146..37b45e4403d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -5427,7 +5427,9 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) mask <<= 1; } active_cu_number += counter; - ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + if (i < 2 && j < 2) + ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 142924212b43..aa5a50f5eac8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -40,7 +40,6 @@ #include "bif/bif_5_0_d.h" #include "bif/bif_5_0_sh_mask.h" - #include "gca/gfx_8_0_d.h" #include "gca/gfx_8_0_enum.h" #include "gca/gfx_8_0_sh_mask.h" @@ -2100,7 +2099,7 @@ static int gfx_v8_0_sw_init(void *handle) return r; /* create MQD for all compute queues as well as KIQ for SRIOV case */ - r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct vi_mqd)); + r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation)); if (r) return r; @@ -4637,56 +4636,6 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) return r; } -static int gfx_v8_0_kiq_kcq_disable(struct amdgpu_device *adev) -{ - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; - uint32_t scratch, tmp = 0; - int r, i; - - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("Failed to get scratch reg (%d).\n", r); - return r; - } - WREG32(scratch, 0xCAFEDEAD); - - r = amdgpu_ring_alloc(kiq_ring, 6 + 3); - if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; - } - /* unmap queues */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); - amdgpu_ring_write(kiq_ring, - PACKET3_UNMAP_QUEUES_ACTION(1)| /* RESET_QUEUES */ - PACKET3_UNMAP_QUEUES_QUEUE_SEL(2)); /* select all queues */ - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - /* write to scratch for completion */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); - amdgpu_ring_write(kiq_ring, 0xDEADBEEF); - amdgpu_ring_commit(kiq_ring); - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i >= adev->usec_timeout) { - DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", - scratch, tmp); - r = -EINVAL; - } - amdgpu_gfx_scratch_free(adev, scratch); - - return r; -} - static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) { int i, r = 0; @@ -4715,9 +4664,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; uint32_t tmp; - /* init the mqd struct */ - memset(mqd, 0, sizeof(struct vi_mqd)); - mqd->header = 0xC0310800; mqd->compute_pipelinestat_enable = 0x00000001; mqd->compute_static_thread_mgmt_se0 = 0xffffffff; @@ -4725,7 +4671,12 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->compute_static_thread_mgmt_se2 = 0xffffffff; mqd->compute_static_thread_mgmt_se3 = 0xffffffff; mqd->compute_misc_reserved = 0x00000003; - + if (!(adev->flags & AMD_IS_APU)) { + mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr + + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); + mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr + + offsetof(struct vi_mqd_allocation, dyamic_cu_mask)); + } eop_base_addr = ring->eop_gpu_addr >> 8; mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); @@ -4890,7 +4841,6 @@ int gfx_v8_0_mqd_commit(struct amdgpu_device *adev, static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) { - int r = 0; struct amdgpu_device *adev = ring->adev; struct vi_mqd *mqd = ring->mqd_ptr; int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; @@ -4900,44 +4850,32 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.in_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; amdgpu_ring_clear_ring(ring); mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - r = gfx_v8_0_deactivate_hqd(adev, 1); - if (r) { - dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name); - goto out_unlock; - } gfx_v8_0_mqd_commit(adev, mqd); vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } else { + memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); + ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); - r = gfx_v8_0_deactivate_hqd(adev, 1); - if (r) { - dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name); - goto out_unlock; - } gfx_v8_0_mqd_commit(adev, mqd); vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); } - return r; - -out_unlock: - vi_srbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); - return r; + return 0; } static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) @@ -4947,6 +4885,9 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) int mqd_idx = ring - &adev->gfx.compute_ring[0]; if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { + memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); + ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF; + ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); @@ -4954,11 +4895,11 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); + memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); } else if (adev->gfx.in_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) - memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); + memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; amdgpu_ring_clear_ring(ring); @@ -5138,7 +5079,6 @@ static int gfx_v8_0_hw_fini(void *handle) pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; } - gfx_v8_0_kiq_kcq_disable(adev); gfx_v8_0_cp_enable(adev, false); gfx_v8_0_rlc_stop(adev); @@ -7080,7 +7020,9 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) mask <<= 1; } active_cu_number += counter; - ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + if (i < 2 && j < 2) + ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index ba228f613027..c9b9c88231aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1475,21 +1475,23 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) { - u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + u32 data; - if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (se_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); + + if (se_num == 0xffffffff) data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (sh_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } else { + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); } @@ -1964,8 +1966,8 @@ static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev, data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL)); } -void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, - bool enable) +static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, + bool enable) { uint32_t data, default_data; @@ -1978,7 +1980,7 @@ void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); } -void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, +static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, bool enable) { uint32_t data, default_data; @@ -2502,56 +2504,6 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) return r; } -static int gfx_v9_0_kiq_kcq_disable(struct amdgpu_device *adev) -{ - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; - uint32_t scratch, tmp = 0; - int r, i; - - r = amdgpu_gfx_scratch_get(adev, &scratch); - if (r) { - DRM_ERROR("Failed to get scratch reg (%d).\n", r); - return r; - } - WREG32(scratch, 0xCAFEDEAD); - - r = amdgpu_ring_alloc(kiq_ring, 6 + 3); - if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); - amdgpu_gfx_scratch_free(adev, scratch); - return r; - } - /* unmap queues */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); - amdgpu_ring_write(kiq_ring, - PACKET3_UNMAP_QUEUES_ACTION(1)| /* RESET_QUEUES */ - PACKET3_UNMAP_QUEUES_QUEUE_SEL(2)); /* select all queues */ - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - amdgpu_ring_write(kiq_ring, 0); - /* write to scratch for completion */ - amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); - amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); - amdgpu_ring_write(kiq_ring, 0xDEADBEEF); - amdgpu_ring_commit(kiq_ring); - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(scratch); - if (tmp == 0xDEADBEEF) - break; - DRM_UDELAY(1); - } - if (i >= adev->usec_timeout) { - DRM_ERROR("KCQ disable failed (scratch(0x%04X)=0x%08X)\n", - scratch, tmp); - r = -EINVAL; - } - amdgpu_gfx_scratch_free(adev, scratch); - - return r; -} - static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -2996,7 +2948,6 @@ static int gfx_v9_0_hw_fini(void *handle) pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; } - gfx_v9_0_kiq_kcq_disable(adev); gfx_v9_0_cp_enable(adev, false); gfx_v9_0_rlc_stop(adev); @@ -4416,6 +4367,20 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) } } +static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + + WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data); +} + static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; @@ -4436,10 +4401,13 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; + unsigned disable_masks[4 * 2]; if (!adev || !cu_info) return -EINVAL; + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { @@ -4447,6 +4415,9 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, ao_bitmap = 0; counter = 0; gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v9_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); bitmap = gfx_v9_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; @@ -4459,7 +4430,9 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, mask <<= 1; } active_cu_number += counter; - ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + if (i < 2 && j < 2) + ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); + cu_info->ao_cu_bitmap[i][j] = ao_bitmap; } } gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index ce68d609b619..d0214d942bfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -794,14 +794,6 @@ static int gmc_v6_0_early_init(void *handle) gmc_v6_0_set_gart_funcs(adev); gmc_v6_0_set_irq_funcs(adev); - if (adev->flags & AMD_IS_APU) { - adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; - } else { - u32 tmp = RREG32(mmMC_SEQ_MISC0); - tmp &= MC_SEQ_MISC0__MT__MASK; - adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp); - } - return 0; } @@ -821,6 +813,14 @@ static int gmc_v6_0_sw_init(void *handle) int dma_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->flags & AMD_IS_APU) { + adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; + } else { + u32 tmp = RREG32(mmMC_SEQ_MISC0); + tmp &= MC_SEQ_MISC0__MT__MASK; + adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp); + } + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 68172aace3ee..175ba5f9691c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -695,6 +695,15 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) else nbio_v6_1_hdp_flush(adev); + switch (adev->asic_type) { + case CHIP_RAVEN: + mmhub_v1_0_initialize_power_gating(adev); + mmhub_v1_0_update_power_gating(adev, true); + break; + default: + break; + } + r = gfxhub_v1_0_gart_enable(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index f50b5a77f45a..9804318f3488 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -244,6 +244,224 @@ static void mmhub_v1_0_program_invalidation(struct amdgpu_device *adev) } } +struct pctl_data { + uint32_t index; + uint32_t data; +}; + +const struct pctl_data pctl0_data[] = { + {0x0, 0x7a640}, + {0x9, 0x2a64a}, + {0xd, 0x2a680}, + {0x11, 0x6a684}, + {0x19, 0xea68e}, + {0x29, 0xa69e}, + {0x2b, 0x34a6c0}, + {0x61, 0x83a707}, + {0xe6, 0x8a7a4}, + {0xf0, 0x1a7b8}, + {0xf3, 0xfa7cc}, + {0x104, 0x17a7dd}, + {0x11d, 0xa7dc}, + {0x11f, 0x12a7f5}, + {0x133, 0xa808}, + {0x135, 0x12a810}, + {0x149, 0x7a82c} +}; +#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0])) + +#define PCTL0_RENG_EXEC_END_PTR 0x151 +#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640 +#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833 + +const struct pctl_data pctl1_data[] = { + {0x0, 0x39a000}, + {0x3b, 0x44a040}, + {0x81, 0x2a08d}, + {0x85, 0x6ba094}, + {0xf2, 0x18a100}, + {0x10c, 0x4a132}, + {0x112, 0xca141}, + {0x120, 0x2fa158}, + {0x151, 0x17a1d0}, + {0x16a, 0x1a1e9}, + {0x16d, 0x13a1ec}, + {0x182, 0x7a201}, + {0x18b, 0x3a20a}, + {0x190, 0x7a580}, + {0x199, 0xa590}, + {0x19b, 0x4a594}, + {0x1a1, 0x1a59c}, + {0x1a4, 0x7a82c}, + {0x1ad, 0xfa7cc}, + {0x1be, 0x17a7dd}, + {0x1d7, 0x12a810} +}; +#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0])) + +#define PCTL1_RENG_EXEC_END_PTR 0x1ea +#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000 +#define PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa20d +#define PCTL1_STCTRL_REG_SAVE_RANGE1_BASE 0xa580 +#define PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT 0xa59d +#define PCTL1_STCTRL_REG_SAVE_RANGE2_BASE 0xa82c +#define PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT 0xa833 + +static void mmhub_v1_0_power_gating_write_save_ranges(struct amdgpu_device *adev) +{ + uint32_t tmp = 0; + + /* PCTL0_STCTRL_REGISTER_SAVE_RANGE0 */ + tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0, + STCTRL_REGISTER_SAVE_BASE, + PCTL0_STCTRL_REG_SAVE_RANGE0_BASE); + tmp = REG_SET_FIELD(tmp, PCTL0_STCTRL_REGISTER_SAVE_RANGE0, + STCTRL_REGISTER_SAVE_LIMIT, + PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT); + WREG32_SOC15(MMHUB, 0, mmPCTL0_STCTRL_REGISTER_SAVE_RANGE0, tmp); + + /* PCTL1_STCTRL_REGISTER_SAVE_RANGE0 */ + tmp = 0; + tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0, + STCTRL_REGISTER_SAVE_BASE, + PCTL1_STCTRL_REG_SAVE_RANGE0_BASE); + tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE0, + STCTRL_REGISTER_SAVE_LIMIT, + PCTL1_STCTRL_REG_SAVE_RANGE0_LIMIT); + WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE0, tmp); + + /* PCTL1_STCTRL_REGISTER_SAVE_RANGE1 */ + tmp = 0; + tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1, + STCTRL_REGISTER_SAVE_BASE, + PCTL1_STCTRL_REG_SAVE_RANGE1_BASE); + tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE1, + STCTRL_REGISTER_SAVE_LIMIT, + PCTL1_STCTRL_REG_SAVE_RANGE1_LIMIT); + WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE1, tmp); + + /* PCTL1_STCTRL_REGISTER_SAVE_RANGE2 */ + tmp = 0; + tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2, + STCTRL_REGISTER_SAVE_BASE, + PCTL1_STCTRL_REG_SAVE_RANGE2_BASE); + tmp = REG_SET_FIELD(tmp, PCTL1_STCTRL_REGISTER_SAVE_RANGE2, + STCTRL_REGISTER_SAVE_LIMIT, + PCTL1_STCTRL_REG_SAVE_RANGE2_LIMIT); + WREG32_SOC15(MMHUB, 0, mmPCTL1_STCTRL_REGISTER_SAVE_RANGE2, tmp); +} + +void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev) +{ + uint32_t pctl0_misc = 0; + uint32_t pctl0_reng_execute = 0; + uint32_t pctl1_misc = 0; + uint32_t pctl1_reng_execute = 0; + int i = 0; + + if (amdgpu_sriov_vf(adev)) + return; + + pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC); + pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE); + pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC); + pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE); + + /* Light sleep must be disabled before writing to pctl0 registers */ + pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK; + WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc); + + /* Write data used to access ram of register engine */ + for (i = 0; i < PCTL0_DATA_LEN; i++) { + WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_INDEX, + pctl0_data[i].index); + WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_RAM_DATA, + pctl0_data[i].data); + } + + /* Set the reng execute end ptr for pctl0 */ + pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, + PCTL0_RENG_EXECUTE, + RENG_EXECUTE_END_PTR, + PCTL0_RENG_EXEC_END_PTR); + WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute); + + /* Light sleep must be disabled before writing to pctl1 registers */ + pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK; + WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc); + + /* Write data used to access ram of register engine */ + for (i = 0; i < PCTL1_DATA_LEN; i++) { + WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_INDEX, + pctl1_data[i].index); + WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_RAM_DATA, + pctl1_data[i].data); + } + + /* Set the reng execute end ptr for pctl1 */ + pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, + PCTL1_RENG_EXECUTE, + RENG_EXECUTE_END_PTR, + PCTL1_RENG_EXEC_END_PTR); + WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); + + mmhub_v1_0_power_gating_write_save_ranges(adev); + + /* Re-enable light sleep */ + pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK; + WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc); + pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK; + WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc); +} + +void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, + bool enable) +{ + uint32_t pctl0_reng_execute = 0; + uint32_t pctl1_reng_execute = 0; + + if (amdgpu_sriov_vf(adev)) + return; + + pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE); + pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE); + + if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) { + pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, + PCTL0_RENG_EXECUTE, + RENG_EXECUTE_ON_PWR_UP, 1); + pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, + PCTL0_RENG_EXECUTE, + RENG_EXECUTE_ON_REG_UPDATE, 1); + WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute); + + pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, + PCTL1_RENG_EXECUTE, + RENG_EXECUTE_ON_PWR_UP, 1); + pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, + PCTL1_RENG_EXECUTE, + RENG_EXECUTE_ON_REG_UPDATE, 1); + WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); + + } else { + pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, + PCTL0_RENG_EXECUTE, + RENG_EXECUTE_ON_PWR_UP, 0); + pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, + PCTL0_RENG_EXECUTE, + RENG_EXECUTE_ON_REG_UPDATE, 0); + WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute); + + pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, + PCTL1_RENG_EXECUTE, + RENG_EXECUTE_ON_PWR_UP, 0); + pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, + PCTL1_RENG_EXECUTE, + RENG_EXECUTE_ON_REG_UPDATE, 0); + WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); + } +} + int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index bbfacbcdc4a2..57bb940c0ecd 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h @@ -32,6 +32,9 @@ void mmhub_v1_0_init(struct amdgpu_device *adev); int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state); void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); +void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev); +void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, + bool enable); extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs; extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 20c1e539ff35..2258323a3c26 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -96,8 +96,8 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm header = (struct common_firmware_header *)ucode->fw; cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; - cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = (uint32_t)fw_mem_mc_addr; - cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = (uint32_t)((uint64_t)fw_mem_mc_addr >> 32); + cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); + cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); cmd->cmd.cmd_load_ip_fw.fw_size = le32_to_cpu(header->ucode_size_bytes); ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); @@ -172,10 +172,10 @@ int psp_v10_0_cmd_submit(struct psp_context *psp, write_frame = ring->ring_mem + (psp_write_ptr_reg / (sizeof(struct psp_gfx_rb_frame) / 4)); /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = (unsigned int)(cmd_buf_mc_addr >> 32); - write_frame->cmd_buf_addr_lo = (unsigned int)(cmd_buf_mc_addr); - write_frame->fence_addr_hi = (unsigned int)(fence_mc_addr >> 32); - write_frame->fence_addr_lo = (unsigned int)(fence_mc_addr); + write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); + write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); + write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); + write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; /* Update the write Pointer in DWORDs */ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 6e5c6edabb84..c98d77d0c8f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -254,8 +254,8 @@ int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; - cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = (uint32_t)fw_mem_mc_addr; - cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = (uint32_t)((uint64_t)fw_mem_mc_addr >> 32); + cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); + cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; ret = psp_v3_1_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); @@ -375,10 +375,10 @@ int psp_v3_1_cmd_submit(struct psp_context *psp, memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame)); /* Update KM RB frame */ - write_frame->cmd_buf_addr_hi = (unsigned int)(cmd_buf_mc_addr >> 32); - write_frame->cmd_buf_addr_lo = (unsigned int)(cmd_buf_mc_addr); - write_frame->fence_addr_hi = (unsigned int)(fence_mc_addr >> 32); - write_frame->fence_addr_lo = (unsigned int)(fence_mc_addr); + write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr); + write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr); + write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); + write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; /* Update the write Pointer in DWORDs */ diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index f45fb0f022b3..4267fa417997 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1385,6 +1385,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, pitcairn_mgcg_cgcg_init, (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); + break; case CHIP_VERDE: amdgpu_program_register_sequence(adev, verde_golden_registers, @@ -1409,6 +1410,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, oland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); + break; case CHIP_HAINAN: amdgpu_program_register_sequence(adev, hainan_golden_registers, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 5fdb05a0c88a..a7341d88a320 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -625,7 +625,8 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = AMD_PG_SUPPORT_SDMA; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | + AMD_PG_SUPPORT_MMHUB; adev->external_rev_id = 0x1; break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 88187bfc5ea3..3f95f7cb4019 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -226,10 +226,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->shared_resources = *gpu_resources; - /* We only use the first MEC */ - if (kfd->shared_resources.num_mec > 1) - kfd->shared_resources.num_mec = 1; - /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * kfd->device_info->mqd_size_aligned; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 955aa304ff48..602769ced3bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -77,13 +77,6 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) return false; } -unsigned int get_mec_num(struct device_queue_manager *dqm) -{ - BUG_ON(!dqm || !dqm->dev); - - return dqm->dev->shared_resources.num_mec; -} - unsigned int get_queues_num(struct device_queue_manager *dqm) { BUG_ON(!dqm || !dqm->dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 66b9615bc3c1..faf820a06400 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -180,7 +180,6 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops); void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops); void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd); -unsigned int get_mec_num(struct device_queue_manager *dqm); unsigned int get_queues_num(struct device_queue_manager *dqm); unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 84d1ffd1eef9..035bbc98a63d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -318,12 +318,13 @@ static struct kfd_process *create_process(const struct task_struct *thread) /* init process apertures*/ process->is_32bit_user_mode = in_compat_syscall(); - if (kfd_init_apertures(process) != 0) - goto err_init_apretures; + err = kfd_init_apertures(process); + if (err != 0) + goto err_init_apertures; return process; -err_init_apretures: +err_init_apertures: pqm_uninit(&process->pqm); err_process_pqm_init: hash_del_rcu(&process->kfd_processes); diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index beb2a81ab7da..70e8c20acb2f 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -184,6 +184,7 @@ enum amd_fan_ctrl_mode { #define AMD_PG_SUPPORT_SAMU (1 << 10) #define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11) #define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12) +#define AMD_PG_SUPPORT_MMHUB (1 << 13) enum amd_pm_state_type { /* not used for dpm */ diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 91ef1484b3bb..36f376677a53 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -63,9 +63,6 @@ struct kgd2kfd_shared_resources { /* Bit n == 1 means VMID n is available for KFD. */ unsigned int compute_vmid_bitmap; - /* number of mec available from the hardware */ - uint32_t num_mec; - /* number of pipes per mec */ uint32_t num_pipe_per_mec; diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h index b68f8efcdeae..ca93b5160ba6 100644 --- a/drivers/gpu/drm/amd/include/vi_structs.h +++ b/drivers/gpu/drm/amd/include/vi_structs.h @@ -195,6 +195,274 @@ struct vi_mqd { uint32_t compute_wave_restore_addr_lo; uint32_t compute_wave_restore_addr_hi; uint32_t compute_wave_restore_control; + uint32_t reserved9; + uint32_t reserved10; + uint32_t reserved11; + uint32_t reserved12; + uint32_t reserved13; + uint32_t reserved14; + uint32_t reserved15; + uint32_t reserved16; + uint32_t reserved17; + uint32_t reserved18; + uint32_t reserved19; + uint32_t reserved20; + uint32_t reserved21; + uint32_t reserved22; + uint32_t reserved23; + uint32_t reserved24; + uint32_t reserved25; + uint32_t reserved26; + uint32_t reserved27; + uint32_t reserved28; + uint32_t reserved29; + uint32_t reserved30; + uint32_t reserved31; + uint32_t reserved32; + uint32_t reserved33; + uint32_t reserved34; + uint32_t compute_user_data_0; + uint32_t compute_user_data_1; + uint32_t compute_user_data_2; + uint32_t compute_user_data_3; + uint32_t compute_user_data_4; + uint32_t compute_user_data_5; + uint32_t compute_user_data_6; + uint32_t compute_user_data_7; + uint32_t compute_user_data_8; + uint32_t compute_user_data_9; + uint32_t compute_user_data_10; + uint32_t compute_user_data_11; + uint32_t compute_user_data_12; + uint32_t compute_user_data_13; + uint32_t compute_user_data_14; + uint32_t compute_user_data_15; + uint32_t cp_compute_csinvoc_count_lo; + uint32_t cp_compute_csinvoc_count_hi; + uint32_t reserved35; + uint32_t reserved36; + uint32_t reserved37; + uint32_t cp_mqd_query_time_lo; + uint32_t cp_mqd_query_time_hi; + uint32_t cp_mqd_connect_start_time_lo; + uint32_t cp_mqd_connect_start_time_hi; + uint32_t cp_mqd_connect_end_time_lo; + uint32_t cp_mqd_connect_end_time_hi; + uint32_t cp_mqd_connect_end_wf_count; + uint32_t cp_mqd_connect_end_pq_rptr; + uint32_t cp_mqd_connect_endvi_sdma_mqd_pq_wptr; + uint32_t cp_mqd_connect_end_ib_rptr; + uint32_t reserved38; + uint32_t reserved39; + uint32_t cp_mqd_save_start_time_lo; + uint32_t cp_mqd_save_start_time_hi; + uint32_t cp_mqd_save_end_time_lo; + uint32_t cp_mqd_save_end_time_hi; + uint32_t cp_mqd_restore_start_time_lo; + uint32_t cp_mqd_restore_start_time_hi; + uint32_t cp_mqd_restore_end_time_lo; + uint32_t cp_mqd_restore_end_time_hi; + uint32_t disable_queue; + uint32_t reserved41; + uint32_t gds_cs_ctxsw_cnt0; + uint32_t gds_cs_ctxsw_cnt1; + uint32_t gds_cs_ctxsw_cnt2; + uint32_t gds_cs_ctxsw_cnt3; + uint32_t reserved42; + uint32_t reserved43; + uint32_t cp_pq_exe_status_lo; + uint32_t cp_pq_exe_status_hi; + uint32_t cp_packet_id_lo; + uint32_t cp_packet_id_hi; + uint32_t cp_packet_exe_status_lo; + uint32_t cp_packet_exe_status_hi; + uint32_t gds_save_base_addr_lo; + uint32_t gds_save_base_addr_hi; + uint32_t gds_save_mask_lo; + uint32_t gds_save_mask_hi; + uint32_t ctx_save_base_addr_lo; + uint32_t ctx_save_base_addr_hi; + uint32_t dynamic_cu_mask_addr_lo; + uint32_t dynamic_cu_mask_addr_hi; + uint32_t cp_mqd_base_addr_lo; + uint32_t cp_mqd_base_addr_hi; + uint32_t cp_hqd_active; + uint32_t cp_hqd_vmid; + uint32_t cp_hqd_persistent_state; + uint32_t cp_hqd_pipe_priority; + uint32_t cp_hqd_queue_priority; + uint32_t cp_hqd_quantum; + uint32_t cp_hqd_pq_base_lo; + uint32_t cp_hqd_pq_base_hi; + uint32_t cp_hqd_pq_rptr; + uint32_t cp_hqd_pq_rptr_report_addr_lo; + uint32_t cp_hqd_pq_rptr_report_addr_hi; + uint32_t cp_hqd_pq_wptr_poll_addr_lo; + uint32_t cp_hqd_pq_wptr_poll_addr_hi; + uint32_t cp_hqd_pq_doorbell_control; + uint32_t cp_hqd_pq_wptr; + uint32_t cp_hqd_pq_control; + uint32_t cp_hqd_ib_base_addr_lo; + uint32_t cp_hqd_ib_base_addr_hi; + uint32_t cp_hqd_ib_rptr; + uint32_t cp_hqd_ib_control; + uint32_t cp_hqd_iq_timer; + uint32_t cp_hqd_iq_rptr; + uint32_t cp_hqd_dequeue_request; + uint32_t cp_hqd_dma_offload; + uint32_t cp_hqd_sema_cmd; + uint32_t cp_hqd_msg_type; + uint32_t cp_hqd_atomic0_preop_lo; + uint32_t cp_hqd_atomic0_preop_hi; + uint32_t cp_hqd_atomic1_preop_lo; + uint32_t cp_hqd_atomic1_preop_hi; + uint32_t cp_hqd_hq_status0; + uint32_t cp_hqd_hq_control0; + uint32_t cp_mqd_control; + uint32_t cp_hqd_hq_status1; + uint32_t cp_hqd_hq_control1; + uint32_t cp_hqd_eop_base_addr_lo; + uint32_t cp_hqd_eop_base_addr_hi; + uint32_t cp_hqd_eop_control; + uint32_t cp_hqd_eop_rptr; + uint32_t cp_hqd_eop_wptr; + uint32_t cp_hqd_eop_done_events; + uint32_t cp_hqd_ctx_save_base_addr_lo; + uint32_t cp_hqd_ctx_save_base_addr_hi; + uint32_t cp_hqd_ctx_save_control; + uint32_t cp_hqd_cntl_stack_offset; + uint32_t cp_hqd_cntl_stack_size; + uint32_t cp_hqd_wg_state_offset; + uint32_t cp_hqd_ctx_save_size; + uint32_t cp_hqd_gds_resource_state; + uint32_t cp_hqd_error; + uint32_t cp_hqd_eop_wptr_mem; + uint32_t cp_hqd_eop_dones; + uint32_t reserved46; + uint32_t reserved47; + uint32_t reserved48; + uint32_t reserved49; + uint32_t reserved50; + uint32_t reserved51; + uint32_t reserved52; + uint32_t reserved53; + uint32_t reserved54; + uint32_t reserved55; + uint32_t iqtimer_pkt_header; + uint32_t iqtimer_pkt_dw0; + uint32_t iqtimer_pkt_dw1; + uint32_t iqtimer_pkt_dw2; + uint32_t iqtimer_pkt_dw3; + uint32_t iqtimer_pkt_dw4; + uint32_t iqtimer_pkt_dw5; + uint32_t iqtimer_pkt_dw6; + uint32_t iqtimer_pkt_dw7; + uint32_t iqtimer_pkt_dw8; + uint32_t iqtimer_pkt_dw9; + uint32_t iqtimer_pkt_dw10; + uint32_t iqtimer_pkt_dw11; + uint32_t iqtimer_pkt_dw12; + uint32_t iqtimer_pkt_dw13; + uint32_t iqtimer_pkt_dw14; + uint32_t iqtimer_pkt_dw15; + uint32_t iqtimer_pkt_dw16; + uint32_t iqtimer_pkt_dw17; + uint32_t iqtimer_pkt_dw18; + uint32_t iqtimer_pkt_dw19; + uint32_t iqtimer_pkt_dw20; + uint32_t iqtimer_pkt_dw21; + uint32_t iqtimer_pkt_dw22; + uint32_t iqtimer_pkt_dw23; + uint32_t iqtimer_pkt_dw24; + uint32_t iqtimer_pkt_dw25; + uint32_t iqtimer_pkt_dw26; + uint32_t iqtimer_pkt_dw27; + uint32_t iqtimer_pkt_dw28; + uint32_t iqtimer_pkt_dw29; + uint32_t iqtimer_pkt_dw30; + uint32_t iqtimer_pkt_dw31; + uint32_t reserved56; + uint32_t reserved57; + uint32_t reserved58; + uint32_t set_resources_header; + uint32_t set_resources_dw1; + uint32_t set_resources_dw2; + uint32_t set_resources_dw3; + uint32_t set_resources_dw4; + uint32_t set_resources_dw5; + uint32_t set_resources_dw6; + uint32_t set_resources_dw7; + uint32_t reserved59; + uint32_t reserved60; + uint32_t reserved61; + uint32_t reserved62; + uint32_t reserved63; + uint32_t reserved64; + uint32_t reserved65; + uint32_t reserved66; + uint32_t reserved67; + uint32_t reserved68; + uint32_t reserved69; + uint32_t reserved70; + uint32_t reserved71; + uint32_t reserved72; + uint32_t reserved73; + uint32_t reserved74; + uint32_t reserved75; + uint32_t reserved76; + uint32_t reserved77; + uint32_t reserved78; + uint32_t reserved_t[256]; +}; + +struct vi_mqd_allocation { + struct vi_mqd mqd; + uint32_t wptr_poll_mem; + uint32_t rptr_report_mem; + uint32_t dyamic_cu_mask; + uint32_t dyamic_rb_mask; +}; + +struct cz_mqd { + uint32_t header; + uint32_t compute_dispatch_initiator; + uint32_t compute_dim_x; + uint32_t compute_dim_y; + uint32_t compute_dim_z; + uint32_t compute_start_x; + uint32_t compute_start_y; + uint32_t compute_start_z; + uint32_t compute_num_thread_x; + uint32_t compute_num_thread_y; + uint32_t compute_num_thread_z; + uint32_t compute_pipelinestat_enable; + uint32_t compute_perfcount_enable; + uint32_t compute_pgm_lo; + uint32_t compute_pgm_hi; + uint32_t compute_tba_lo; + uint32_t compute_tba_hi; + uint32_t compute_tma_lo; + uint32_t compute_tma_hi; + uint32_t compute_pgm_rsrc1; + uint32_t compute_pgm_rsrc2; + uint32_t compute_vmid; + uint32_t compute_resource_limits; + uint32_t compute_static_thread_mgmt_se0; + uint32_t compute_static_thread_mgmt_se1; + uint32_t compute_tmpring_size; + uint32_t compute_static_thread_mgmt_se2; + uint32_t compute_static_thread_mgmt_se3; + uint32_t compute_restart_x; + uint32_t compute_restart_y; + uint32_t compute_restart_z; + uint32_t compute_thread_trace_enable; + uint32_t compute_misc_reserved; + uint32_t compute_dispatch_id; + uint32_t compute_threadgroup_id; + uint32_t compute_relaunch; + uint32_t compute_wave_restore_addr_lo; + uint32_t compute_wave_restore_addr_hi; + uint32_t compute_wave_restore_control; uint32_t reserved_39; uint32_t reserved_40; uint32_t reserved_41; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index f988ed204d9a..197174e562d2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -2128,15 +2128,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) pp_table->AvfsGbCksOff.m2_shift = 12; pp_table->AvfsGbCksOff.b_shift = 0; - for (i = 0; i < dep_table->count; i++) { - if (dep_table->entries[i].sclk_offset == 0) - pp_table->StaticVoltageOffsetVid[i] = 248; - else - pp_table->StaticVoltageOffsetVid[i] = - (uint8_t)(dep_table->entries[i].sclk_offset * - VOLTAGE_VID_OFFSET_SCALE2 / - VOLTAGE_VID_OFFSET_SCALE1); - } + for (i = 0; i < dep_table->count; i++) + pp_table->StaticVoltageOffsetVid[i] = + convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset)); if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != data->disp_clk_quad_eqn_a) && @@ -2865,6 +2859,7 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, void *state, struct pp_power_state *power_state, void *pp_table, uint32_t classification_flag) { + ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2; struct vega10_power_state *vega10_power_state = cast_phw_vega10_power_state(&(power_state->hardware)); struct vega10_performance_level *performance_level; @@ -2941,11 +2936,16 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, performance_level = &(vega10_power_state->performance_levels [vega10_power_state->performance_level_count++]); - performance_level->soc_clock = socclk_dep_table->entries - [state_entry->ucSocClockIndexHigh].ulClk; - performance_level->gfx_clock = gfxclk_dep_table->entries + [state_entry->ucSocClockIndexHigh].ulClk; + if (gfxclk_dep_table->ucRevId == 0) { + performance_level->gfx_clock = gfxclk_dep_table->entries [state_entry->ucGfxClockIndexHigh].ulClk; + } else if (gfxclk_dep_table->ucRevId == 1) { + patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; + performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk; + } + performance_level->mem_clock = mclk_dep_table->entries [state_entry->ucMemClockIndexHigh].ulMemClk; return 0; @@ -3349,7 +3349,6 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( dpm_table-> gfx_table.dpm_levels[dpm_table->gfx_table.count - 1]. value = sclk; - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -3472,7 +3471,6 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( return result); } } - return result; } @@ -3828,13 +3826,18 @@ static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, struct pp_gpu_power *query) { + uint32_t value; + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrPkgPwr), "Failed to get current package power!", return -EINVAL); - return vega10_read_arg_from_smc(hwmgr->smumgr, - &query->average_gpu_power); + vega10_read_arg_from_smc(hwmgr->smumgr, &value); + /* power value is an integer */ + query->average_gpu_power = value << 8; + + return 0; } static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h index 52beea3bf6b7..b3e63003a789 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h @@ -144,6 +144,15 @@ typedef struct _ATOM_Vega10_GFXCLK_Dependency_Record { USHORT usAVFSOffset; /* AVFS Voltage offset */ } ATOM_Vega10_GFXCLK_Dependency_Record; +typedef struct _ATOM_Vega10_GFXCLK_Dependency_Record_V2 { + ULONG ulClk; + UCHAR ucVddInd; + USHORT usCKSVOffsetandDisable; + USHORT usAVFSOffset; + UCHAR ucACGEnable; + UCHAR ucReserved[3]; +} ATOM_Vega10_GFXCLK_Dependency_Record_V2; + typedef struct _ATOM_Vega10_MCLK_Dependency_Record { ULONG ulMemClk; /* Clock Frequency */ UCHAR ucVddInd; /* SOC_VDD index */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index 2b892e47d8dc..1623644ea49a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -585,6 +585,7 @@ static int get_gfxclk_voltage_dependency_table( uint32_t table_size, i; struct phm_ppt_v1_clock_voltage_dependency_table *clk_table; + ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2; PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0), "Invalid PowerPlay Table!", return -1); @@ -601,18 +602,41 @@ static int get_gfxclk_voltage_dependency_table( clk_table->count = clk_dep_table->ucNumEntries; - for (i = 0; i < clk_table->count; i++) { - clk_table->entries[i].vddInd = + if (clk_dep_table->ucRevId == 0) { + for (i = 0; i < clk_table->count; i++) { + clk_table->entries[i].vddInd = clk_dep_table->entries[i].ucVddInd; - clk_table->entries[i].clk = + clk_table->entries[i].clk = le32_to_cpu(clk_dep_table->entries[i].ulClk); - clk_table->entries[i].cks_enable = - (((clk_dep_table->entries[i].usCKSVOffsetandDisable & 0x8000) + clk_table->entries[i].cks_enable = + (((le16_to_cpu(clk_dep_table->entries[i].usCKSVOffsetandDisable) & 0x8000) >> 15) == 0) ? 1 : 0; - clk_table->entries[i].cks_voffset = - (clk_dep_table->entries[i].usCKSVOffsetandDisable & 0x7F); - clk_table->entries[i].sclk_offset = - clk_dep_table->entries[i].usAVFSOffset; + clk_table->entries[i].cks_voffset = + le16_to_cpu(clk_dep_table->entries[i].usCKSVOffsetandDisable) & 0x7F; + clk_table->entries[i].sclk_offset = + le16_to_cpu(clk_dep_table->entries[i].usAVFSOffset); + } + } else if (clk_dep_table->ucRevId == 1) { + patom_record_v2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)clk_dep_table->entries; + for (i = 0; i < clk_table->count; i++) { + clk_table->entries[i].vddInd = + patom_record_v2->ucVddInd; + clk_table->entries[i].clk = + le32_to_cpu(patom_record_v2->ulClk); + clk_table->entries[i].cks_enable = + (((le16_to_cpu(patom_record_v2->usCKSVOffsetandDisable) & 0x8000) + >> 15) == 0) ? 1 : 0; + clk_table->entries[i].cks_voffset = + le16_to_cpu(patom_record_v2->usCKSVOffsetandDisable) & 0x7F; + clk_table->entries[i].sclk_offset = + le16_to_cpu(patom_record_v2->usAVFSOffset); + patom_record_v2++; + } + } else { + kfree(clk_table); + PP_ASSERT_WITH_CODE(false, + "Unsupported GFXClockDependencyTable Revision!", + return -EINVAL); } *pp_vega10_clk_dep_table = clk_table; diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h index e07cab311c7a..b4af9e85dfa5 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h @@ -124,8 +124,8 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_NumOfDisplays 0x56 #define PPSMC_MSG_ReadSerialNumTop32 0x58 #define PPSMC_MSG_ReadSerialNumBottom32 0x59 -#define PPSMC_MSG_GetCurrPkgPwr 0x5C -#define PPSMC_Message_Count 0x5D +#define PPSMC_MSG_GetCurrPkgPwr 0x61 +#define PPSMC_Message_Count 0x62 typedef int PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 39c7091866e8..652aaa43e95c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c @@ -72,7 +72,7 @@ static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr, result = SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); if (result != 0) { - pr_err("cz_send_msg_to_smc_async failed\n"); + pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg); return result; } diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 1a3359c0f6cd..d67b6f15e8b8 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -261,21 +261,14 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane, { struct drm_framebuffer *fb = plane->state->fb; struct hdlcd_drm_private *hdlcd; - struct drm_gem_cma_object *gem; - u32 src_x, src_y, dest_h; + u32 dest_h; dma_addr_t scanout_start; if (!fb) return; - src_x = plane->state->src.x1 >> 16; - src_y = plane->state->src.y1 >> 16; dest_h = drm_rect_height(&plane->state->dst); - gem = drm_fb_cma_get_gem_obj(fb, 0); - - scanout_start = gem->paddr + fb->offsets[0] + - src_y * fb->pitches[0] + - src_x * fb->format->cpp[0]; + scanout_start = drm_fb_cma_get_gem_addr(fb, plane->state, 0); hdlcd = plane->dev->dev_private; hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index 345c8357b273..d3da87fbd85a 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -297,6 +297,9 @@ static int hdlcd_drm_bind(struct device *dev) if (ret) goto err_free; + /* Set the CRTC's port so that the encoder component can find it */ + hdlcd->crtc.port = of_graph_get_port_by_id(dev->of_node, 0); + ret = component_bind_all(dev, drm); if (ret) { DRM_ERROR("Failed to bind all components\n"); @@ -340,11 +343,14 @@ err_register: } err_fbdev: drm_kms_helper_poll_fini(drm); + drm_vblank_cleanup(drm); err_vblank: pm_runtime_disable(drm->dev); err_pm_active: component_unbind_all(dev, drm); err_unload: + of_node_put(hdlcd->crtc.port); + hdlcd->crtc.port = NULL; drm_irq_uninstall(drm); of_reserved_mem_device_release(drm->dev); err_free: @@ -367,6 +373,9 @@ static void hdlcd_drm_unbind(struct device *dev) } drm_kms_helper_poll_fini(drm); component_unbind_all(dev, drm); + of_node_put(hdlcd->crtc.port); + hdlcd->crtc.port = NULL; + drm_vblank_cleanup(drm); pm_runtime_get_sync(drm->dev); drm_irq_uninstall(drm); pm_runtime_put_sync(drm->dev); diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c index 2a7eb6817c36..92e6b08ea64a 100644 --- a/drivers/gpu/drm/armada/armada_fb.c +++ b/drivers/gpu/drm/armada/armada_fb.c @@ -133,7 +133,7 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev, } /* Framebuffer objects must have a valid device address for scanout */ - if (obj->dev_addr == DMA_ERROR_CODE) { + if (!obj->mapped) { ret = -EINVAL; goto err_unref; } diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index d6c2a5d190eb..a76ca21d063b 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -175,6 +175,7 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) obj->phys_addr = obj->linear->start; obj->dev_addr = obj->linear->start; + obj->mapped = true; } DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj, @@ -205,7 +206,6 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size) return NULL; drm_gem_private_object_init(dev, &obj->obj, size); - obj->dev_addr = DMA_ERROR_CODE; DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size); @@ -229,8 +229,6 @@ static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, return NULL; } - obj->dev_addr = DMA_ERROR_CODE; - mapping = obj->obj.filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); @@ -610,5 +608,6 @@ int armada_gem_map_import(struct armada_gem_object *dobj) return -EINVAL; } dobj->dev_addr = sg_dma_address(dobj->sgt->sgl); + dobj->mapped = true; return 0; } diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h index b88d2b9853c7..6e524e0676bb 100644 --- a/drivers/gpu/drm/armada/armada_gem.h +++ b/drivers/gpu/drm/armada/armada_gem.h @@ -16,6 +16,7 @@ struct armada_gem_object { void *addr; phys_addr_t phys_addr; resource_size_t dev_addr; + bool mapped; struct drm_mm_node *linear; /* for linear backed */ struct page *page; /* for page backed */ struct sg_table *sgt; /* for imported */ diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c index cf92ebfe6ab7..67469c26bae8 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -11,6 +11,7 @@ #include <sound/hdmi-codec.h> #include <sound/pcm.h> #include <sound/soc.h> +#include <linux/of_graph.h> #include "adv7511.h" @@ -182,10 +183,31 @@ static void audio_shutdown(struct device *dev, void *data) { } +static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component, + struct device_node *endpoint) +{ + struct of_endpoint of_ep; + int ret; + + ret = of_graph_parse_endpoint(endpoint, &of_ep); + if (ret < 0) + return ret; + + /* + * HDMI sound should be located as reg = <2> + * Then, it is sound port 0 + */ + if (of_ep.port == 2) + return 0; + + return -EINVAL; +} + static const struct hdmi_codec_ops adv7511_codec_ops = { .hw_params = adv7511_hdmi_hw_params, .audio_shutdown = audio_shutdown, .audio_startup = audio_startup, + .get_dai_id = adv7511_hdmi_i2s_get_dai_id, }; static struct hdmi_codec_pdata codec_data = { diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 99f9a4beb859..67fe19e5a9c6 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -161,7 +161,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, int ret; if (!panel) - return ERR_PTR(EINVAL); + return ERR_PTR(-EINVAL); panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge), GFP_KERNEL); diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 40d2827a6d19..53e78d092d18 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -1,6 +1,7 @@ config DRM_DW_HDMI tristate select DRM_KMS_HELPER + select REGMAP_MMIO config DRM_DW_HDMI_AHB_AUDIO tristate "Synopsys Designware AHB Audio interface" diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index aaf287d2e91d..b2cf59f54c88 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -82,9 +82,30 @@ static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data) hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0); } +static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component, + struct device_node *endpoint) +{ + struct of_endpoint of_ep; + int ret; + + ret = of_graph_parse_endpoint(endpoint, &of_ep); + if (ret < 0) + return ret; + + /* + * HDMI sound should be located as reg = <2> + * Then, it is sound port 0 + */ + if (of_ep.port == 2) + return 0; + + return -EINVAL; +} + static struct hdmi_codec_ops dw_hdmi_i2s_ops = { .hw_params = dw_hdmi_i2s_hw_params, .audio_shutdown = dw_hdmi_i2s_audio_shutdown, + .get_dai_id = dw_hdmi_i2s_get_dai_id, }; static int snd_dw_hdmi_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 5c26488e7a2d..0529e500c534 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) /* port@2 is the output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); - if (ret) + if (ret && ret != -ENODEV) return ret; /* Shut down GPIO is optional */ diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index adb1dd7fde5f..1ee84dd802d4 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1258,11 +1258,11 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data, * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */ -int drm_legacy_infobufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int __drm_legacy_infobufs(struct drm_device *dev, + void *data, int *p, + int (*f)(void *, int, struct drm_buf_entry *)) { struct drm_device_dma *dma = dev->dma; - struct drm_buf_info *request = data; int i; int count; @@ -1290,26 +1290,12 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data, DRM_DEBUG("count = %d\n", count); - if (request->count >= count) { + if (*p >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { - if (dma->bufs[i].buf_count) { - struct drm_buf_desc __user *to = - &request->list[count]; - struct drm_buf_entry *from = &dma->bufs[i]; - if (copy_to_user(&to->count, - &from->buf_count, - sizeof(from->buf_count)) || - copy_to_user(&to->size, - &from->buf_size, - sizeof(from->buf_size)) || - copy_to_user(&to->low_mark, - &from->low_mark, - sizeof(from->low_mark)) || - copy_to_user(&to->high_mark, - &from->high_mark, - sizeof(from->high_mark))) + struct drm_buf_entry *from = &dma->bufs[i]; + if (from->buf_count) { + if (f(data, count, from) < 0) return -EFAULT; - DRM_DEBUG("%d %d %d %d %d\n", i, dma->bufs[i].buf_count, @@ -1320,11 +1306,29 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data, } } } - request->count = count; + *p = count; return 0; } +static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) +{ + struct drm_buf_info *request = data; + struct drm_buf_desc __user *to = &request->list[count]; + struct drm_buf_desc v = {.count = from->buf_count, + .size = from->buf_size, + .low_mark = from->low_mark, + .high_mark = from->high_mark}; + return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags)); +} + +int drm_legacy_infobufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_buf_info *request = data; + return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf); +} + /** * Specifies a low and high water mark for buffer allocation * @@ -1439,15 +1443,15 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data, * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls * drm_mmap_dma(). */ -int drm_legacy_mapbufs(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p, + void __user **v, + int (*f)(void *, int, unsigned long, + struct drm_buf *), + struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int retcode = 0; - const int zero = 0; unsigned long virtual; - unsigned long address; - struct drm_buf_map *request = data; int i; if (!drm_core_check_feature(dev, DRIVER_LEGACY)) @@ -1467,7 +1471,7 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data, dev->buf_use++; /* Can't allocate more after this call */ spin_unlock(&dev->buf_lock); - if (request->count >= dma->buf_count) { + if (*p >= dma->buf_count) { if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG))) { @@ -1492,41 +1496,51 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data, retcode = (signed long)virtual; goto done; } - request->virtual = (void __user *)virtual; + *v = (void __user *)virtual; for (i = 0; i < dma->buf_count; i++) { - if (copy_to_user(&request->list[i].idx, - &dma->buflist[i]->idx, - sizeof(request->list[0].idx))) { - retcode = -EFAULT; - goto done; - } - if (copy_to_user(&request->list[i].total, - &dma->buflist[i]->total, - sizeof(request->list[0].total))) { - retcode = -EFAULT; - goto done; - } - if (copy_to_user(&request->list[i].used, - &zero, sizeof(zero))) { - retcode = -EFAULT; - goto done; - } - address = virtual + dma->buflist[i]->offset; /* *** */ - if (copy_to_user(&request->list[i].address, - &address, sizeof(address))) { + if (f(data, i, virtual, dma->buflist[i]) < 0) { retcode = -EFAULT; goto done; } } } done: - request->count = dma->buf_count; - DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); + *p = dma->buf_count; + DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode); return retcode; } +static int map_one_buf(void *data, int idx, unsigned long virtual, + struct drm_buf *buf) +{ + struct drm_buf_map *request = data; + unsigned long address = virtual + buf->offset; /* *** */ + + if (copy_to_user(&request->list[idx].idx, &buf->idx, + sizeof(request->list[0].idx))) + return -EFAULT; + if (copy_to_user(&request->list[idx].total, &buf->total, + sizeof(request->list[0].total))) + return -EFAULT; + if (clear_user(&request->list[idx].used, sizeof(int))) + return -EFAULT; + if (copy_to_user(&request->list[idx].address, &address, + sizeof(address))) + return -EFAULT; + return 0; +} + +int drm_legacy_mapbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_buf_map *request = data; + return __drm_legacy_mapbufs(dev, data, &request->count, + &request->virtual, map_one_buf, + file_priv); +} + int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 5cd61aff7857..8072e6e4c62c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1293,21 +1293,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (!connector) return -ENOENT; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - encoder = drm_connector_get_encoder(connector); - if (encoder) - out_resp->encoder_id = encoder->base.id; - else - out_resp->encoder_id = 0; - - ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, - (uint32_t __user *)(unsigned long)(out_resp->props_ptr), - (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), - &out_resp->count_props); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - if (ret) - goto out_unref; - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) if (connector->encoder_ids[i] != 0) encoders_count++; @@ -1320,7 +1305,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (put_user(connector->encoder_ids[i], encoder_ptr + copied)) { ret = -EFAULT; - goto out_unref; + goto out; } copied++; } @@ -1364,15 +1349,32 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (copy_to_user(mode_ptr + copied, &u_mode, sizeof(u_mode))) { ret = -EFAULT; + mutex_unlock(&dev->mode_config.mutex); + goto out; } copied++; } } out_resp->count_modes = mode_count; -out: mutex_unlock(&dev->mode_config.mutex); -out_unref: + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + encoder = drm_connector_get_encoder(connector); + if (encoder) + out_resp->encoder_id = encoder->base.id; + else + out_resp->encoder_id = 0; + + /* Only grab properties after probing, to make sure EDID and other + * properties reflect the latest status. */ + ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, + (uint32_t __user *)(unsigned long)(out_resp->props_ptr), + (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), + &out_resp->count_props); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + +out: drm_connector_put(connector); return ret; diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c index ec1ed94b2390..d34e5096887a 100644 --- a/drivers/gpu/drm/drm_dp_aux_dev.c +++ b/drivers/gpu/drm/drm_dp_aux_dev.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/uaccess.h> +#include <linux/uio.h> #include <drm/drm_dp_helper.h> #include <drm/drm_crtc.h> #include <drm/drmP.h> @@ -140,101 +141,83 @@ static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence) return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET); } -static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count, - loff_t *offset) +static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to) { - size_t bytes_pending, num_bytes_processed = 0; - struct drm_dp_aux_dev *aux_dev = file->private_data; + struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data; + loff_t pos = iocb->ki_pos; ssize_t res = 0; if (!atomic_inc_not_zero(&aux_dev->usecount)) return -ENODEV; - bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - (*offset)); - - if (!access_ok(VERIFY_WRITE, buf, bytes_pending)) { - res = -EFAULT; - goto out; - } + iov_iter_truncate(to, AUX_MAX_OFFSET - pos); - while (bytes_pending > 0) { - uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES]; - ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf)); + while (iov_iter_count(to)) { + uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES]; + ssize_t todo = min(iov_iter_count(to), sizeof(buf)); if (signal_pending(current)) { - res = num_bytes_processed ? - num_bytes_processed : -ERESTARTSYS; - goto out; + res = -ERESTARTSYS; + break; } - res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo); - if (res <= 0) { - res = num_bytes_processed ? num_bytes_processed : res; - goto out; - } - if (__copy_to_user(buf + num_bytes_processed, localbuf, res)) { - res = num_bytes_processed ? - num_bytes_processed : -EFAULT; - goto out; + res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo); + if (res <= 0) + break; + + if (copy_to_iter(buf, res, to) != res) { + res = -EFAULT; + break; } - bytes_pending -= res; - *offset += res; - num_bytes_processed += res; - res = num_bytes_processed; + + pos += res; } -out: + if (pos != iocb->ki_pos) + res = pos - iocb->ki_pos; + iocb->ki_pos = pos; + atomic_dec(&aux_dev->usecount); wake_up_atomic_t(&aux_dev->usecount); return res; } -static ssize_t auxdev_write(struct file *file, const char __user *buf, - size_t count, loff_t *offset) +static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from) { - size_t bytes_pending, num_bytes_processed = 0; - struct drm_dp_aux_dev *aux_dev = file->private_data; + struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data; + loff_t pos = iocb->ki_pos; ssize_t res = 0; if (!atomic_inc_not_zero(&aux_dev->usecount)) return -ENODEV; - bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - *offset); - - if (!access_ok(VERIFY_READ, buf, bytes_pending)) { - res = -EFAULT; - goto out; - } + iov_iter_truncate(from, AUX_MAX_OFFSET - pos); - while (bytes_pending > 0) { - uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES]; - ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf)); + while (iov_iter_count(from)) { + uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES]; + ssize_t todo = min(iov_iter_count(from), sizeof(buf)); if (signal_pending(current)) { - res = num_bytes_processed ? - num_bytes_processed : -ERESTARTSYS; - goto out; + res = -ERESTARTSYS; + break; } - if (__copy_from_user(localbuf, - buf + num_bytes_processed, todo)) { - res = num_bytes_processed ? - num_bytes_processed : -EFAULT; - goto out; + if (!copy_from_iter_full(buf, todo, from)) { + res = -EFAULT; + break; } - res = drm_dp_dpcd_write(aux_dev->aux, *offset, localbuf, todo); - if (res <= 0) { - res = num_bytes_processed ? num_bytes_processed : res; - goto out; - } - bytes_pending -= res; - *offset += res; - num_bytes_processed += res; - res = num_bytes_processed; + res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo); + if (res <= 0) + break; + + pos += res; } -out: + if (pos != iocb->ki_pos) + res = pos - iocb->ki_pos; + iocb->ki_pos = pos; + atomic_dec(&aux_dev->usecount); wake_up_atomic_t(&aux_dev->usecount); return res; @@ -251,8 +234,8 @@ static int auxdev_release(struct inode *inode, struct file *file) static const struct file_operations auxdev_fops = { .owner = THIS_MODULE, .llseek = auxdev_llseek, - .read = auxdev_read, - .write = auxdev_write, + .read_iter = auxdev_read_iter, + .write_iter = auxdev_write_iter, .open = auxdev_open, .release = auxdev_release, }; diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 213fb837e1c4..08af8d6b844b 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -544,7 +544,7 @@ void drm_dp_downstream_debug(struct seq_file *m, DP_DETAILED_CAP_INFO_AVAILABLE; int clk; int bpc; - char id[6]; + char id[7]; int len; uint8_t rev[2]; int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; @@ -583,6 +583,7 @@ void drm_dp_downstream_debug(struct seq_file *m, seq_puts(m, "\t\tType: N/A\n"); } + memset(id, 0, sizeof(id)); drm_dp_downstream_id(aux, id); seq_printf(m, "\t\tID: %s\n", id); @@ -591,7 +592,7 @@ void drm_dp_downstream_debug(struct seq_file *m, seq_printf(m, "\t\tHW: %d.%d\n", (rev[0] & 0xf0) >> 4, rev[0] & 0xf); - len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2); + len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2); if (len > 0) seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index bfd237c15e76..ae5f06895562 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, return false; } + /* + * ignore out-of-order messages or messages that are part of a + * failed transaction + */ + if (!recv_hdr.somt && !msg->have_somt) + return false; + /* get length contained in this portion */ msg->curchunk_len = recv_hdr.msg_len; msg->curchunk_hdrlen = hdrlen; @@ -2164,7 +2171,7 @@ out_unlock: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); -static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) { int len; u8 replyblock[32]; @@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) replyblock, len); if (ret != len) { DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); - return; + return false; } ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); if (!ret) { DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); - return; + return false; } replylen = msg->curchunk_len + msg->curchunk_hdrlen; @@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, replyblock, len); if (ret != len) { - DRM_DEBUG_KMS("failed to read a chunk\n"); + DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", + len, ret); + return false; } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); - if (ret == false) + if (!ret) { DRM_DEBUG_KMS("failed to build sideband msg\n"); + return false; + } + curreply += len; replylen -= len; } + return true; } static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, false); + if (!drm_dp_get_one_sb_msg(mgr, false)) { + memset(&mgr->down_rep_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->down_rep_recv.have_eomt) { struct drm_dp_sideband_msg_tx *txmsg; @@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, true); + + if (!drm_dp_get_one_sb_msg(mgr, true)) { + memset(&mgr->up_req_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->up_req_recv.have_eomt) { struct drm_dp_sideband_msg_req_body msg; @@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); } - drm_dp_put_mst_branch_device(mstb); + if (mstb) + drm_dp_put_mst_branch_device(mstb); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); } return ret; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index fc8ef42203ec..b3ef4f1c2630 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -832,6 +832,7 @@ unlock: drm_atomic_clean_old_fb(dev, plane_mask, ret); if (ret == -EDEADLK) { + drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); goto retry; } diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 5cecc974d2f9..5edc24bd10fa 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -146,6 +146,10 @@ static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc) #endif +drm_ioctl_t drm_version; +drm_ioctl_t drm_getunique; +drm_ioctl_t drm_getclient; + /* drm_syncobj.c */ void drm_syncobj_open(struct drm_file *file_private); void drm_syncobj_release(struct drm_file *file_private); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index ae386783e3ea..d1f202852028 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -32,6 +32,9 @@ #include <linux/export.h> #include <drm/drmP.h> +#include "drm_legacy.h" +#include "drm_internal.h" +#include "drm_crtc_internal.h" #define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t) #define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t) @@ -87,39 +90,31 @@ static int compat_drm_version(struct file *file, unsigned int cmd, unsigned long arg) { drm_version32_t v32; - struct drm_version __user *version; + struct drm_version v; int err; if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) return -EFAULT; - version = compat_alloc_user_space(sizeof(*version)); - if (!version) - return -EFAULT; - if (__put_user(v32.name_len, &version->name_len) - || __put_user((void __user *)(unsigned long)v32.name, - &version->name) - || __put_user(v32.date_len, &version->date_len) - || __put_user((void __user *)(unsigned long)v32.date, - &version->date) - || __put_user(v32.desc_len, &version->desc_len) - || __put_user((void __user *)(unsigned long)v32.desc, - &version->desc)) - return -EFAULT; - - err = drm_ioctl(file, - DRM_IOCTL_VERSION, (unsigned long)version); + v = (struct drm_version) { + .name_len = v32.name_len, + .name = compat_ptr(v32.name), + .date_len = v32.date_len, + .date = compat_ptr(v32.date), + .desc_len = v32.desc_len, + .desc = compat_ptr(v32.desc), + }; + err = drm_ioctl_kernel(file, drm_version, &v, + DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW); if (err) return err; - if (__get_user(v32.version_major, &version->version_major) - || __get_user(v32.version_minor, &version->version_minor) - || __get_user(v32.version_patchlevel, &version->version_patchlevel) - || __get_user(v32.name_len, &version->name_len) - || __get_user(v32.date_len, &version->date_len) - || __get_user(v32.desc_len, &version->desc_len)) - return -EFAULT; - + v32.version_major = v.version_major; + v32.version_minor = v.version_minor; + v32.version_patchlevel = v.version_patchlevel; + v32.name_len = v.name_len; + v32.date_len = v.date_len; + v32.desc_len = v.desc_len; if (copy_to_user((void __user *)arg, &v32, sizeof(v32))) return -EFAULT; return 0; @@ -134,26 +129,21 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, unsigned long arg) { drm_unique32_t uq32; - struct drm_unique __user *u; + struct drm_unique uq; int err; if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) return -EFAULT; + uq = (struct drm_unique){ + .unique_len = uq32.unique_len, + .unique = compat_ptr(uq32.unique), + }; - u = compat_alloc_user_space(sizeof(*u)); - if (!u) - return -EFAULT; - if (__put_user(uq32.unique_len, &u->unique_len) - || __put_user((void __user *)(unsigned long)uq32.unique, - &u->unique)) - return -EFAULT; - - err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u); + err = drm_ioctl_kernel(file, drm_getunique, &uq, DRM_UNLOCKED); if (err) return err; - if (__get_user(uq32.unique_len, &u->unique_len)) - return -EFAULT; + uq32.unique_len = uq.unique_len; if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32))) return -EFAULT; return 0; @@ -162,21 +152,8 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, static int compat_drm_setunique(struct file *file, unsigned int cmd, unsigned long arg) { - drm_unique32_t uq32; - struct drm_unique __user *u; - - if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) - return -EFAULT; - - u = compat_alloc_user_space(sizeof(*u)); - if (!u) - return -EFAULT; - if (__put_user(uq32.unique_len, &u->unique_len) - || __put_user((void __user *)(unsigned long)uq32.unique, - &u->unique)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u); + /* it's dead */ + return -EINVAL; } typedef struct drm_map32 { @@ -193,32 +170,23 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, { drm_map32_t __user *argp = (void __user *)arg; drm_map32_t m32; - struct drm_map __user *map; - int idx, err; - void *handle; - - if (get_user(idx, &argp->offset)) - return -EFAULT; + struct drm_map map; + int err; - map = compat_alloc_user_space(sizeof(*map)); - if (!map) - return -EFAULT; - if (__put_user(idx, &map->offset)) + if (copy_from_user(&m32, argp, sizeof(m32))) return -EFAULT; - err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map); + map.offset = m32.offset; + err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, DRM_UNLOCKED); if (err) return err; - if (__get_user(m32.offset, &map->offset) - || __get_user(m32.size, &map->size) - || __get_user(m32.type, &map->type) - || __get_user(m32.flags, &map->flags) - || __get_user(handle, &map->handle) - || __get_user(m32.mtrr, &map->mtrr)) - return -EFAULT; - - m32.handle = (unsigned long)handle; + m32.offset = map.offset; + m32.size = map.size; + m32.type = map.type; + m32.flags = map.flags; + m32.handle = ptr_to_compat(map.handle); + m32.mtrr = map.mtrr; if (copy_to_user(argp, &m32, sizeof(m32))) return -EFAULT; return 0; @@ -230,35 +198,28 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, { drm_map32_t __user *argp = (void __user *)arg; drm_map32_t m32; - struct drm_map __user *map; + struct drm_map map; int err; - void *handle; if (copy_from_user(&m32, argp, sizeof(m32))) return -EFAULT; - map = compat_alloc_user_space(sizeof(*map)); - if (!map) - return -EFAULT; - if (__put_user(m32.offset, &map->offset) - || __put_user(m32.size, &map->size) - || __put_user(m32.type, &map->type) - || __put_user(m32.flags, &map->flags)) - return -EFAULT; + map.offset = m32.offset; + map.size = m32.size; + map.type = m32.type; + map.flags = m32.flags; - err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map); + err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); if (err) return err; - if (__get_user(m32.offset, &map->offset) - || __get_user(m32.mtrr, &map->mtrr) - || __get_user(handle, &map->handle)) - return -EFAULT; - - m32.handle = (unsigned long)handle; - if (m32.handle != (unsigned long)handle) + m32.offset = map.offset; + m32.mtrr = map.mtrr; + m32.handle = ptr_to_compat(map.handle); + if (map.handle != compat_ptr(m32.handle)) pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", - handle, m32.type, m32.offset); + map.handle, m32.type, m32.offset); if (copy_to_user(argp, &m32, sizeof(m32))) return -EFAULT; @@ -270,19 +231,13 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd, unsigned long arg) { drm_map32_t __user *argp = (void __user *)arg; - struct drm_map __user *map; + struct drm_map map; u32 handle; if (get_user(handle, &argp->handle)) return -EFAULT; - - map = compat_alloc_user_space(sizeof(*map)); - if (!map) - return -EFAULT; - if (__put_user((void *)(unsigned long)handle, &map->handle)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map); + map.handle = compat_ptr(handle); + return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH); } typedef struct drm_client32 { @@ -299,29 +254,24 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, { drm_client32_t c32; drm_client32_t __user *argp = (void __user *)arg; - struct drm_client __user *client; - int idx, err; + struct drm_client client; + int err; - if (get_user(idx, &argp->idx)) + if (copy_from_user(&c32, argp, sizeof(c32))) return -EFAULT; - client = compat_alloc_user_space(sizeof(*client)); - if (!client) - return -EFAULT; - if (__put_user(idx, &client->idx)) - return -EFAULT; + client.idx = c32.idx; - err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client); + err = drm_ioctl_kernel(file, drm_getclient, &client, DRM_UNLOCKED); if (err) return err; - if (__get_user(c32.idx, &client->idx) - || __get_user(c32.auth, &client->auth) - || __get_user(c32.pid, &client->pid) - || __get_user(c32.uid, &client->uid) - || __get_user(c32.magic, &client->magic) - || __get_user(c32.iocs, &client->iocs)) - return -EFAULT; + c32.idx = client.idx; + c32.auth = client.auth; + c32.pid = client.pid; + c32.uid = client.uid; + c32.magic = client.magic; + c32.iocs = client.iocs; if (copy_to_user(argp, &c32, sizeof(c32))) return -EFAULT; @@ -339,28 +289,14 @@ typedef struct drm_stats32 { static int compat_drm_getstats(struct file *file, unsigned int cmd, unsigned long arg) { - drm_stats32_t s32; drm_stats32_t __user *argp = (void __user *)arg; - struct drm_stats __user *stats; - int i, err; - - memset(&s32, 0, sizeof(drm_stats32_t)); - stats = compat_alloc_user_space(sizeof(*stats)); - if (!stats) - return -EFAULT; + int err; - err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats); + err = drm_ioctl_kernel(file, drm_noop, NULL, DRM_UNLOCKED); if (err) return err; - if (__get_user(s32.count, &stats->count)) - return -EFAULT; - for (i = 0; i < 15; ++i) - if (__get_user(s32.data[i].value, &stats->data[i].value) - || __get_user(s32.data[i].type, &stats->data[i].type)) - return -EFAULT; - - if (copy_to_user(argp, &s32, sizeof(s32))) + if (clear_user(argp, sizeof(drm_stats32_t))) return -EFAULT; return 0; } @@ -378,26 +314,28 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_desc32_t __user *argp = (void __user *)arg; - struct drm_buf_desc __user *buf; + drm_buf_desc32_t desc32; + struct drm_buf_desc desc; int err; - unsigned long agp_start; - buf = compat_alloc_user_space(sizeof(*buf)); - if (!buf || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))) + if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t))) return -EFAULT; - if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start)) - || __get_user(agp_start, &argp->agp_start) - || __put_user(agp_start, &buf->agp_start)) - return -EFAULT; + desc = (struct drm_buf_desc){ + desc32.count, desc32.size, desc32.low_mark, desc32.high_mark, + desc32.flags, desc32.agp_start + }; - err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf); + err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); if (err) return err; - if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start)) - || __get_user(agp_start, &buf->agp_start) - || __put_user(agp_start, &argp->agp_start)) + desc32 = (drm_buf_desc32_t){ + desc.count, desc.size, desc.low_mark, desc.high_mark, + desc.flags, desc.agp_start + }; + if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t))) return -EFAULT; return 0; @@ -408,21 +346,17 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd, { drm_buf_desc32_t b32; drm_buf_desc32_t __user *argp = (void __user *)arg; - struct drm_buf_desc __user *buf; + struct drm_buf_desc buf; if (copy_from_user(&b32, argp, sizeof(b32))) return -EFAULT; - buf = compat_alloc_user_space(sizeof(*buf)); - if (!buf) - return -EFAULT; - - if (__put_user(b32.size, &buf->size) - || __put_user(b32.low_mark, &buf->low_mark) - || __put_user(b32.high_mark, &buf->high_mark)) - return -EFAULT; + buf.size = b32.size; + buf.low_mark = b32.low_mark; + buf.high_mark = b32.high_mark; - return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf); + return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); } typedef struct drm_buf_info32 { @@ -430,52 +364,42 @@ typedef struct drm_buf_info32 { u32 list; } drm_buf_info32_t; +static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from) +{ + drm_buf_info32_t *request = data; + drm_buf_desc32_t __user *to = compat_ptr(request->list); + drm_buf_desc32_t v = {.count = from->buf_count, + .size = from->buf_size, + .low_mark = from->low_mark, + .high_mark = from->high_mark}; + return copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags)); +} + +static int drm_legacy_infobufs32(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_buf_info32_t *request = data; + return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32); +} + static int compat_drm_infobufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_info32_t req32; drm_buf_info32_t __user *argp = (void __user *)arg; - drm_buf_desc32_t __user *to; - struct drm_buf_info __user *request; - struct drm_buf_desc __user *list; - size_t nbytes; - int i, err; - int count, actual; + int err; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; - count = req32.count; - to = (drm_buf_desc32_t __user *) (unsigned long)req32.list; - if (count < 0) - count = 0; - if (count > 0 - && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t))) - return -EFAULT; - - nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc); - request = compat_alloc_user_space(nbytes); - if (!request) - return -EFAULT; - list = (struct drm_buf_desc *) (request + 1); - - if (__put_user(count, &request->count) - || __put_user(list, &request->list)) - return -EFAULT; + if (req32.count < 0) + req32.count = 0; - err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request); + err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH); if (err) return err; - if (__get_user(actual, &request->count)) - return -EFAULT; - if (count >= actual) - for (i = 0; i < actual; ++i) - if (__copy_in_user(&to[i], &list[i], - offsetof(struct drm_buf_desc, flags))) - return -EFAULT; - - if (__put_user(actual, &argp->count)) + if (put_user(req32.count, &argp->count)) return -EFAULT; return 0; @@ -494,54 +418,52 @@ typedef struct drm_buf_map32 { u32 list; /**< Buffer information */ } drm_buf_map32_t; +static int map_one_buf32(void *data, int idx, unsigned long virtual, + struct drm_buf *buf) +{ + drm_buf_map32_t *request = data; + drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx; + drm_buf_pub32_t v; + + v.idx = buf->idx; + v.total = buf->total; + v.used = 0; + v.address = virtual + buf->offset; + if (copy_to_user(to, &v, sizeof(v))) + return -EFAULT; + return 0; +} + +static int drm_legacy_mapbufs32(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + drm_buf_map32_t *request = data; + void __user *v; + int err = __drm_legacy_mapbufs(dev, data, &request->count, + &v, map_one_buf32, + file_priv); + request->virtual = ptr_to_compat(v); + return err; +} + static int compat_drm_mapbufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_map32_t __user *argp = (void __user *)arg; drm_buf_map32_t req32; - drm_buf_pub32_t __user *list32; - struct drm_buf_map __user *request; - struct drm_buf_pub __user *list; - int i, err; - int count, actual; - size_t nbytes; - void __user *addr; + int err; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; - count = req32.count; - list32 = (void __user *)(unsigned long)req32.list; - - if (count < 0) + if (req32.count < 0) return -EINVAL; - nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub); - request = compat_alloc_user_space(nbytes); - if (!request) - return -EFAULT; - list = (struct drm_buf_pub *) (request + 1); - - if (__put_user(count, &request->count) - || __put_user(list, &request->list)) - return -EFAULT; - err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request); + err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH); if (err) return err; - if (__get_user(actual, &request->count)) - return -EFAULT; - if (count >= actual) - for (i = 0; i < actual; ++i) - if (__copy_in_user(&list32[i], &list[i], - offsetof(struct drm_buf_pub, address)) - || __get_user(addr, &list[i].address) - || __put_user((unsigned long)addr, - &list32[i].address)) - return -EFAULT; - - if (__put_user(actual, &argp->count) - || __get_user(addr, &request->virtual) - || __put_user((unsigned long)addr, &argp->virtual)) + if (put_user(req32.count, &argp->count) + || put_user(req32.virtual, &argp->virtual)) return -EFAULT; return 0; @@ -556,21 +478,15 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_free32_t req32; - struct drm_buf_free __user *request; + struct drm_buf_free request; drm_buf_free32_t __user *argp = (void __user *)arg; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; - request = compat_alloc_user_space(sizeof(*request)); - if (!request) - return -EFAULT; - if (__put_user(req32.count, &request->count) - || __put_user((int __user *)(unsigned long)req32.list, - &request->list)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request); + request.count = req32.count; + request.list = compat_ptr(req32.list); + return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH); } typedef struct drm_ctx_priv_map32 { @@ -582,48 +498,36 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, unsigned long arg) { drm_ctx_priv_map32_t req32; - struct drm_ctx_priv_map __user *request; + struct drm_ctx_priv_map request; drm_ctx_priv_map32_t __user *argp = (void __user *)arg; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; - request = compat_alloc_user_space(sizeof(*request)); - if (!request) - return -EFAULT; - if (__put_user(req32.ctx_id, &request->ctx_id) - || __put_user((void *)(unsigned long)req32.handle, - &request->handle)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request); + request.ctx_id = req32.ctx_id; + request.handle = compat_ptr(req32.handle); + return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); } static int compat_drm_getsareactx(struct file *file, unsigned int cmd, unsigned long arg) { - struct drm_ctx_priv_map __user *request; + struct drm_ctx_priv_map req; + drm_ctx_priv_map32_t req32; drm_ctx_priv_map32_t __user *argp = (void __user *)arg; int err; - unsigned int ctx_id; - void *handle; - - if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp)) - || __get_user(ctx_id, &argp->ctx_id)) - return -EFAULT; - request = compat_alloc_user_space(sizeof(*request)); - if (!request) - return -EFAULT; - if (__put_user(ctx_id, &request->ctx_id)) + if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; - err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request); + req.ctx_id = req32.ctx_id; + err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH); if (err) return err; - if (__get_user(handle, &request->handle) - || __put_user((unsigned long)handle, &argp->handle)) + req32.handle = ptr_to_compat(req.handle); + if (copy_to_user(argp, &req32, sizeof(req32))) return -EFAULT; return 0; @@ -639,26 +543,20 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, { drm_ctx_res32_t __user *argp = (void __user *)arg; drm_ctx_res32_t res32; - struct drm_ctx_res __user *res; + struct drm_ctx_res res; int err; if (copy_from_user(&res32, argp, sizeof(res32))) return -EFAULT; - res = compat_alloc_user_space(sizeof(*res)); - if (!res) - return -EFAULT; - if (__put_user(res32.count, &res->count) - || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts, - &res->contexts)) - return -EFAULT; - - err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res); + res.count = res32.count; + res.contexts = compat_ptr(res32.contexts); + err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH); if (err) return err; - if (__get_user(res32.count, &res->count) - || __put_user(res32.count, &argp->count)) + res32.count = res.count; + if (copy_to_user(argp, &res32, sizeof(res32))) return -EFAULT; return 0; @@ -682,38 +580,26 @@ static int compat_drm_dma(struct file *file, unsigned int cmd, { drm_dma32_t d32; drm_dma32_t __user *argp = (void __user *)arg; - struct drm_dma __user *d; + struct drm_dma d; int err; if (copy_from_user(&d32, argp, sizeof(d32))) return -EFAULT; - d = compat_alloc_user_space(sizeof(*d)); - if (!d) - return -EFAULT; - - if (__put_user(d32.context, &d->context) - || __put_user(d32.send_count, &d->send_count) - || __put_user((int __user *)(unsigned long)d32.send_indices, - &d->send_indices) - || __put_user((int __user *)(unsigned long)d32.send_sizes, - &d->send_sizes) - || __put_user(d32.flags, &d->flags) - || __put_user(d32.request_count, &d->request_count) - || __put_user((int __user *)(unsigned long)d32.request_indices, - &d->request_indices) - || __put_user((int __user *)(unsigned long)d32.request_sizes, - &d->request_sizes)) - return -EFAULT; - - err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d); + d.context = d32.context; + d.send_count = d32.send_count; + d.send_indices = compat_ptr(d32.send_indices); + d.send_sizes = compat_ptr(d32.send_sizes); + d.flags = d32.flags; + d.request_count = d32.request_count; + d.request_indices = compat_ptr(d32.request_indices); + d.request_sizes = compat_ptr(d32.request_sizes); + err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH); if (err) return err; - if (__get_user(d32.request_size, &d->request_size) - || __get_user(d32.granted_count, &d->granted_count) - || __put_user(d32.request_size, &argp->request_size) - || __put_user(d32.granted_count, &argp->granted_count)) + if (put_user(d.request_size, &argp->request_size) + || put_user(d.granted_count, &argp->granted_count)) return -EFAULT; return 0; @@ -728,17 +614,13 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_mode32_t __user *argp = (void __user *)arg; - drm_agp_mode32_t m32; - struct drm_agp_mode __user *mode; - - if (get_user(m32.mode, &argp->mode)) - return -EFAULT; + struct drm_agp_mode mode; - mode = compat_alloc_user_space(sizeof(*mode)); - if (put_user(m32.mode, &mode->mode)) + if (get_user(mode.mode, &argp->mode)) return -EFAULT; - return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode); + return drm_ioctl_kernel(file, drm_agp_enable_ioctl, &mode, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); } typedef struct drm_agp_info32 { @@ -760,28 +642,22 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd, { drm_agp_info32_t __user *argp = (void __user *)arg; drm_agp_info32_t i32; - struct drm_agp_info __user *info; + struct drm_agp_info info; int err; - info = compat_alloc_user_space(sizeof(*info)); - if (!info) - return -EFAULT; - - err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info); + err = drm_ioctl_kernel(file, drm_agp_info_ioctl, &info, DRM_AUTH); if (err) return err; - if (__get_user(i32.agp_version_major, &info->agp_version_major) - || __get_user(i32.agp_version_minor, &info->agp_version_minor) - || __get_user(i32.mode, &info->mode) - || __get_user(i32.aperture_base, &info->aperture_base) - || __get_user(i32.aperture_size, &info->aperture_size) - || __get_user(i32.memory_allowed, &info->memory_allowed) - || __get_user(i32.memory_used, &info->memory_used) - || __get_user(i32.id_vendor, &info->id_vendor) - || __get_user(i32.id_device, &info->id_device)) - return -EFAULT; - + i32.agp_version_major = info.agp_version_major; + i32.agp_version_minor = info.agp_version_minor; + i32.mode = info.mode; + i32.aperture_base = info.aperture_base; + i32.aperture_size = info.aperture_size; + i32.memory_allowed = info.memory_allowed; + i32.memory_used = info.memory_used; + i32.id_vendor = info.id_vendor; + i32.id_device = info.id_device; if (copy_to_user(argp, &i32, sizeof(i32))) return -EFAULT; @@ -800,26 +676,24 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, { drm_agp_buffer32_t __user *argp = (void __user *)arg; drm_agp_buffer32_t req32; - struct drm_agp_buffer __user *request; + struct drm_agp_buffer request; int err; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; - request = compat_alloc_user_space(sizeof(*request)); - if (!request - || __put_user(req32.size, &request->size) - || __put_user(req32.type, &request->type)) - return -EFAULT; - - err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request); + request.size = req32.size; + request.type = req32.type; + err = drm_ioctl_kernel(file, drm_agp_alloc_ioctl, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); if (err) return err; - if (__get_user(req32.handle, &request->handle) - || __get_user(req32.physical, &request->physical) - || copy_to_user(argp, &req32, sizeof(req32))) { - drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request); + req32.handle = request.handle; + req32.physical = request.physical; + if (copy_to_user(argp, &req32, sizeof(req32))) { + drm_ioctl_kernel(file, drm_agp_free_ioctl, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); return -EFAULT; } @@ -830,16 +704,13 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_buffer32_t __user *argp = (void __user *)arg; - struct drm_agp_buffer __user *request; - u32 handle; + struct drm_agp_buffer request; - request = compat_alloc_user_space(sizeof(*request)); - if (!request - || get_user(handle, &argp->handle) - || __put_user(handle, &request->handle)) + if (get_user(request.handle, &argp->handle)) return -EFAULT; - return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request); + return drm_ioctl_kernel(file, drm_agp_free_ioctl, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); } typedef struct drm_agp_binding32 { @@ -852,34 +723,28 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd, { drm_agp_binding32_t __user *argp = (void __user *)arg; drm_agp_binding32_t req32; - struct drm_agp_binding __user *request; + struct drm_agp_binding request; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; - request = compat_alloc_user_space(sizeof(*request)); - if (!request - || __put_user(req32.handle, &request->handle) - || __put_user(req32.offset, &request->offset)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request); + request.handle = req32.handle; + request.offset = req32.offset; + return drm_ioctl_kernel(file, drm_agp_bind_ioctl, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); } static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_binding32_t __user *argp = (void __user *)arg; - struct drm_agp_binding __user *request; - u32 handle; + struct drm_agp_binding request; - request = compat_alloc_user_space(sizeof(*request)); - if (!request - || get_user(handle, &argp->handle) - || __put_user(handle, &request->handle)) + if (get_user(request.handle, &argp->handle)) return -EFAULT; - return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request); + return drm_ioctl_kernel(file, drm_agp_unbind_ioctl, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); } #endif /* CONFIG_AGP */ @@ -892,23 +757,19 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, unsigned long arg) { drm_scatter_gather32_t __user *argp = (void __user *)arg; - struct drm_scatter_gather __user *request; + struct drm_scatter_gather request; int err; - unsigned long x; - request = compat_alloc_user_space(sizeof(*request)); - if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) - || __get_user(x, &argp->size) - || __put_user(x, &request->size)) + if (get_user(request.size, &argp->size)) return -EFAULT; - err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request); + err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); if (err) return err; /* XXX not sure about the handle conversion here... */ - if (__get_user(x, &request->handle) - || __put_user(x >> PAGE_SHIFT, &argp->handle)) + if (put_user(request.handle >> PAGE_SHIFT, &argp->handle)) return -EFAULT; return 0; @@ -918,19 +779,17 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, unsigned long arg) { drm_scatter_gather32_t __user *argp = (void __user *)arg; - struct drm_scatter_gather __user *request; + struct drm_scatter_gather request; unsigned long x; - request = compat_alloc_user_space(sizeof(*request)); - if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)) - || __get_user(x, &argp->handle) - || __put_user(x << PAGE_SHIFT, &request->handle)) + if (get_user(x, &argp->handle)) return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request); + request.handle = x << PAGE_SHIFT; + return drm_ioctl_kernel(file, drm_legacy_sg_free, &request, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); } -#if defined(CONFIG_X86) || defined(CONFIG_IA64) +#if defined(CONFIG_X86) typedef struct drm_update_draw32 { drm_drawable_t handle; unsigned int type; @@ -943,22 +802,11 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd, unsigned long arg) { drm_update_draw32_t update32; - struct drm_update_draw __user *request; - int err; - if (copy_from_user(&update32, (void __user *)arg, sizeof(update32))) return -EFAULT; - request = compat_alloc_user_space(sizeof(*request)); - if (!request || - __put_user(update32.handle, &request->handle) || - __put_user(update32.type, &request->type) || - __put_user(update32.num, &request->num) || - __put_user(update32.data, &request->data)) - return -EFAULT; - - err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request); - return err; + return drm_ioctl_kernel(file, drm_noop, NULL, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); } #endif @@ -985,36 +833,30 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, { drm_wait_vblank32_t __user *argp = (void __user *)arg; drm_wait_vblank32_t req32; - union drm_wait_vblank __user *request; + union drm_wait_vblank req; int err; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; - request = compat_alloc_user_space(sizeof(*request)); - if (!request - || __put_user(req32.request.type, &request->request.type) - || __put_user(req32.request.sequence, &request->request.sequence) - || __put_user(req32.request.signal, &request->request.signal)) - return -EFAULT; - - err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request); + req.request.type = req32.request.type; + req.request.sequence = req32.request.sequence; + req.request.signal = req32.request.signal; + err = drm_ioctl_kernel(file, drm_wait_vblank, &req, DRM_UNLOCKED); if (err) return err; - if (__get_user(req32.reply.type, &request->reply.type) - || __get_user(req32.reply.sequence, &request->reply.sequence) - || __get_user(req32.reply.tval_sec, &request->reply.tval_sec) - || __get_user(req32.reply.tval_usec, &request->reply.tval_usec)) - return -EFAULT; - + req32.reply.type = req.reply.type; + req32.reply.sequence = req.reply.sequence; + req32.reply.tval_sec = req.reply.tval_sec; + req32.reply.tval_usec = req.reply.tval_usec; if (copy_to_user(argp, &req32, sizeof(req32))) return -EFAULT; return 0; } -#if defined(CONFIG_X86) || defined(CONFIG_IA64) +#if defined(CONFIG_X86) typedef struct drm_mode_fb_cmd232 { u32 fb_id; u32 width; @@ -1031,82 +873,67 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd, unsigned long arg) { struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg; - struct drm_mode_fb_cmd232 req32; - struct drm_mode_fb_cmd2 __user *req64; - int i; + struct drm_mode_fb_cmd2 req64; int err; - if (copy_from_user(&req32, argp, sizeof(req32))) + if (copy_from_user(&req64, argp, + offsetof(drm_mode_fb_cmd232_t, modifier))) return -EFAULT; - req64 = compat_alloc_user_space(sizeof(*req64)); - - if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64)) - || __put_user(req32.width, &req64->width) - || __put_user(req32.height, &req64->height) - || __put_user(req32.pixel_format, &req64->pixel_format) - || __put_user(req32.flags, &req64->flags)) + if (copy_from_user(&req64.modifier, &argp->modifier, + sizeof(req64.modifier))) return -EFAULT; - for (i = 0; i < 4; i++) { - if (__put_user(req32.handles[i], &req64->handles[i])) - return -EFAULT; - if (__put_user(req32.pitches[i], &req64->pitches[i])) - return -EFAULT; - if (__put_user(req32.offsets[i], &req64->offsets[i])) - return -EFAULT; - if (__put_user(req32.modifier[i], &req64->modifier[i])) - return -EFAULT; - } - - err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64); + err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, + DRM_CONTROL_ALLOW|DRM_UNLOCKED); if (err) return err; - if (__get_user(req32.fb_id, &req64->fb_id)) - return -EFAULT; - - if (copy_to_user(argp, &req32, sizeof(req32))) + if (put_user(req64.fb_id, &argp->fb_id)) return -EFAULT; return 0; } #endif -static drm_ioctl_compat_t *drm_compat_ioctls[] = { - [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, - [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient, - [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats, - [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique, - [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap, - [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs, - [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs, - [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs, - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs, - [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs, - [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap, - [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx, - [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx, - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx, - [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma, +static struct { + drm_ioctl_compat_t *fn; + char *name; +} drm_compat_ioctls[] = { +#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n} + DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version), + DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique), + DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap), + DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient), + DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats), + DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique), + DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap), + DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs), + DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs), + DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs), + DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs), + DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs), + DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap), + DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx), + DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx), + DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx), + DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma), #if IS_ENABLED(CONFIG_AGP) - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind, + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable), + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info), + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc), + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free), + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind), + DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind), #endif - [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc, - [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free, + DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc), + DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free), #if defined(CONFIG_X86) || defined(CONFIG_IA64) - [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw, + DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw), #endif - [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, + DRM_IOCTL32_DEF(DRM_IOCTL_WAIT_VBLANK, compat_drm_wait_vblank), #if defined(CONFIG_X86) || defined(CONFIG_IA64) - [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2, + DRM_IOCTL32_DEF(DRM_IOCTL_MODE_ADDFB2, compat_drm_mode_addfb2), #endif }; @@ -1127,6 +954,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = { long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); + struct drm_file *file_priv = filp->private_data; drm_ioctl_compat_t *fn; int ret; @@ -1137,13 +965,18 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (nr >= ARRAY_SIZE(drm_compat_ioctls)) return drm_ioctl(filp, cmd, arg); - fn = drm_compat_ioctls[nr]; - - if (fn != NULL) - ret = (*fn) (filp, cmd, arg); - else - ret = drm_ioctl(filp, cmd, arg); + fn = drm_compat_ioctls[nr].fn; + if (!fn) + return drm_ioctl(filp, cmd, arg); + DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->kdev->devt), + file_priv->authenticated, + drm_compat_ioctls[nr].name); + ret = (*fn)(filp, cmd, arg); + if (ret) + DRM_DEBUG("ret = %d\n", ret); return ret; } EXPORT_SYMBOL(drm_compat_ioctl); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f1e568176da9..f1eb326524cf 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -107,7 +107,7 @@ * * Copies the bus id from drm_device::unique into user space. */ -static int drm_getunique(struct drm_device *dev, void *data, +int drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_unique *u = data; @@ -172,7 +172,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) * Searches for the client with the specified index and copies its information * into userspace */ -static int drm_getclient(struct drm_device *dev, void *data, +int drm_getclient(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_client *client = data; @@ -464,7 +464,7 @@ static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value) * * Fills in the version information in \p arg. */ -static int drm_version(struct drm_device *dev, void *data, +int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_version *version = data; @@ -706,6 +706,33 @@ static const struct drm_ioctl_desc drm_ioctls[] = { * structure. */ +long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata, + u32 flags) +{ + struct drm_file *file_priv = file->private_data; + struct drm_device *dev = file_priv->minor->dev; + int retcode; + + if (drm_device_is_unplugged(dev)) + return -ENODEV; + + retcode = drm_ioctl_permit(flags, file_priv); + if (unlikely(retcode)) + return retcode; + + /* Enforce sane locking for modern driver ioctls. */ + if (!drm_core_check_feature(dev, DRIVER_LEGACY) || + (flags & DRM_UNLOCKED)) + retcode = func(dev, kdata, file_priv); + else { + mutex_lock(&drm_global_mutex); + retcode = func(dev, kdata, file_priv); + mutex_unlock(&drm_global_mutex); + } + return retcode; +} +EXPORT_SYMBOL(drm_ioctl_kernel); + /** * drm_ioctl - ioctl callback implementation for DRM drivers * @filp: file this ioctl is called on @@ -774,10 +801,6 @@ long drm_ioctl(struct file *filp, goto err_i1; } - retcode = drm_ioctl_permit(ioctl->flags, file_priv); - if (unlikely(retcode)) - goto err_i1; - if (ksize <= sizeof(stack_kdata)) { kdata = stack_kdata; } else { @@ -796,16 +819,7 @@ long drm_ioctl(struct file *filp, if (ksize > in_size) memset(kdata + in_size, 0, ksize - in_size); - /* Enforce sane locking for modern driver ioctls. */ - if (!drm_core_check_feature(dev, DRIVER_LEGACY) || - (ioctl->flags & DRM_UNLOCKED)) - retcode = func(dev, kdata, file_priv); - else { - mutex_lock(&drm_global_mutex); - retcode = func(dev, kdata, file_priv); - mutex_unlock(&drm_global_mutex); - } - + retcode = drm_ioctl_kernel(filp, func, kdata, ioctl->flags); if (copy_to_user((void __user *)arg, kdata, out_size) != 0) retcode = -EFAULT; diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h index e4bb5ad747c8..280fbeb846ff 100644 --- a/drivers/gpu/drm/drm_legacy.h +++ b/drivers/gpu/drm/drm_legacy.h @@ -74,6 +74,13 @@ int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f); int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f); int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f); +int __drm_legacy_infobufs(struct drm_device *, void *, int *, + int (*)(void *, int, struct drm_buf_entry *)); +int __drm_legacy_mapbufs(struct drm_device *, void *, int *, + void __user **, + int (*)(void *, int, unsigned long, struct drm_buf *), + struct drm_file *); + #ifdef CONFIG_DRM_VM void drm_legacy_vma_flush(struct drm_device *d); #else diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 89441bc78591..789ba0b37f7b 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -77,17 +77,15 @@ EXPORT_SYMBOL(drm_syncobj_find); /** * drm_syncobj_replace_fence - replace fence in a sync object. - * @file_private: drm file private pointer. * @syncobj: Sync object to replace fence in * @fence: fence to install in sync file. * * This replaces the fence on a sync object. */ -void drm_syncobj_replace_fence(struct drm_file *file_private, - struct drm_syncobj *syncobj, +void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, struct dma_fence *fence) { - struct dma_fence *old_fence = NULL; + struct dma_fence *old_fence; if (fence) dma_fence_get(fence); @@ -292,7 +290,7 @@ int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, return -ENOENT; } - drm_syncobj_replace_fence(file_private, syncobj, fence); + drm_syncobj_replace_fence(syncobj, fence); dma_fence_put(fence); drm_syncobj_put(syncobj); return 0; diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 463e4d81fb0d..e9f33cd805dd 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -242,7 +242,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, * Otherwise reinitialize delayed at next vblank interrupt and assign 0 * for now, to mark the vblanktimestamp as invalid. */ - if (!rc && in_vblank_irq) + if (!rc && !in_vblank_irq) t_vblank = (struct timeval) {0, 0}; store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h index e881482b5971..207f45c999c3 100644 --- a/drivers/gpu/drm/etnaviv/common.xml.h +++ b/drivers/gpu/drm/etnaviv/common.xml.h @@ -8,10 +8,38 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng git clone git://0x04.net/rules-ng-ng The rules-ng-ng source files this header was generated from are: -- state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53) -- common.xml ( 18379 bytes, from 2015-12-12 09:02:53) +- state.xml ( 19930 bytes, from 2017-03-09 15:43:43) +- common.xml ( 23473 bytes, from 2017-03-09 15:43:43) +- state_hi.xml ( 26403 bytes, from 2017-03-09 15:43:43) +- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56) +- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56) +- state_3d.xml ( 66957 bytes, from 2017-03-09 15:43:43) +- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56) -Copyright (C) 2015 +Copyright (C) 2012-2017 by the following authors: +- Wladimir J. van der Laan <laanwj@gmail.com> +- Christian Gmeiner <christian.gmeiner@gmail.com> +- Lucas Stach <l.stach@pengutronix.de> +- Russell King <rmk@arm.linux.org.uk> + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sub license, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. */ @@ -162,129 +190,129 @@ Copyright (C) 2015 #define chipMinorFeatures1_FC_FLUSH_STALL 0x80000000 #define chipMinorFeatures2_LINE_LOOP 0x00000001 #define chipMinorFeatures2_LOGIC_OP 0x00000002 -#define chipMinorFeatures2_UNK2 0x00000004 +#define chipMinorFeatures2_SEAMLESS_CUBE_MAP 0x00000004 #define chipMinorFeatures2_SUPERTILED_TEXTURE 0x00000008 -#define chipMinorFeatures2_UNK4 0x00000010 +#define chipMinorFeatures2_LINEAR_PE 0x00000010 #define chipMinorFeatures2_RECT_PRIMITIVE 0x00000020 #define chipMinorFeatures2_COMPOSITION 0x00000040 #define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT 0x00000080 -#define chipMinorFeatures2_UNK8 0x00000100 -#define chipMinorFeatures2_UNK9 0x00000200 -#define chipMinorFeatures2_UNK10 0x00000400 +#define chipMinorFeatures2_PE_SWIZZLE 0x00000100 +#define chipMinorFeatures2_END_EVENT 0x00000200 +#define chipMinorFeatures2_S1S8 0x00000400 #define chipMinorFeatures2_HALTI1 0x00000800 -#define chipMinorFeatures2_UNK12 0x00001000 -#define chipMinorFeatures2_UNK13 0x00002000 -#define chipMinorFeatures2_UNK14 0x00004000 +#define chipMinorFeatures2_RGB888 0x00001000 +#define chipMinorFeatures2_TX__YUV_ASSEMBLER 0x00002000 +#define chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING 0x00004000 #define chipMinorFeatures2_EXTRA_TEXTURE_STATE 0x00008000 #define chipMinorFeatures2_FULL_DIRECTFB 0x00010000 #define chipMinorFeatures2_2D_TILING 0x00020000 #define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000 #define chipMinorFeatures2_TILE_FILLER 0x00080000 -#define chipMinorFeatures2_UNK20 0x00100000 +#define chipMinorFeatures2_YUV_STANDARD 0x00100000 #define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT 0x00200000 -#define chipMinorFeatures2_UNK22 0x00400000 -#define chipMinorFeatures2_UNK23 0x00800000 -#define chipMinorFeatures2_UNK24 0x01000000 +#define chipMinorFeatures2_YUV_CONVERSION 0x00400000 +#define chipMinorFeatures2_FLUSH_FIXED_2D 0x00800000 +#define chipMinorFeatures2_INTERLEAVER 0x01000000 #define chipMinorFeatures2_MIXED_STREAMS 0x02000000 #define chipMinorFeatures2_2D_420_L2CACHE 0x04000000 -#define chipMinorFeatures2_UNK27 0x08000000 +#define chipMinorFeatures2_BUG_FIXES7 0x08000000 #define chipMinorFeatures2_2D_NO_INDEX8_BRUSH 0x10000000 #define chipMinorFeatures2_TEXTURE_TILED_READ 0x20000000 -#define chipMinorFeatures2_UNK30 0x40000000 -#define chipMinorFeatures2_UNK31 0x80000000 +#define chipMinorFeatures2_DECOMPRESS_Z16 0x40000000 +#define chipMinorFeatures2_BUG_FIXES8 0x80000000 #define chipMinorFeatures3_ROTATION_STALL_FIX 0x00000001 -#define chipMinorFeatures3_UNK1 0x00000002 +#define chipMinorFeatures3_OCL_ONLY 0x00000002 #define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX 0x00000004 -#define chipMinorFeatures3_UNK3 0x00000008 -#define chipMinorFeatures3_UNK4 0x00000010 -#define chipMinorFeatures3_UNK5 0x00000020 -#define chipMinorFeatures3_UNK6 0x00000040 -#define chipMinorFeatures3_UNK7 0x00000080 +#define chipMinorFeatures3_INSTRUCTION_CACHE 0x00000008 +#define chipMinorFeatures3_GEOMETRY_SHADER 0x00000010 +#define chipMinorFeatures3_TEX_COMPRESSION_SUPERTILED 0x00000020 +#define chipMinorFeatures3_GENERICS 0x00000040 +#define chipMinorFeatures3_BUG_FIXES9 0x00000080 #define chipMinorFeatures3_FAST_MSAA 0x00000100 -#define chipMinorFeatures3_UNK9 0x00000200 +#define chipMinorFeatures3_WCLIP 0x00000200 #define chipMinorFeatures3_BUG_FIXES10 0x00000400 -#define chipMinorFeatures3_UNK11 0x00000800 +#define chipMinorFeatures3_UNIFIED_SAMPLERS 0x00000800 #define chipMinorFeatures3_BUG_FIXES11 0x00001000 -#define chipMinorFeatures3_UNK13 0x00002000 -#define chipMinorFeatures3_UNK14 0x00004000 -#define chipMinorFeatures3_UNK15 0x00008000 -#define chipMinorFeatures3_UNK16 0x00010000 -#define chipMinorFeatures3_UNK17 0x00020000 +#define chipMinorFeatures3_PERFORMANCE_COUNTERS 0x00002000 +#define chipMinorFeatures3_HAS_FAST_TRANSCENDENTALS 0x00004000 +#define chipMinorFeatures3_BUG_FIXES12 0x00008000 +#define chipMinorFeatures3_BUG_FIXES13 0x00010000 +#define chipMinorFeatures3_DE_ENHANCEMENTS1 0x00020000 #define chipMinorFeatures3_ACE 0x00040000 -#define chipMinorFeatures3_UNK19 0x00080000 -#define chipMinorFeatures3_UNK20 0x00100000 -#define chipMinorFeatures3_UNK21 0x00200000 +#define chipMinorFeatures3_TX_ENHANCEMENTS1 0x00080000 +#define chipMinorFeatures3_SH_ENHANCEMENTS1 0x00100000 +#define chipMinorFeatures3_SH_ENHANCEMENTS2 0x00200000 #define chipMinorFeatures3_UNK22 0x00400000 -#define chipMinorFeatures3_UNK23 0x00800000 +#define chipMinorFeatures3_2D_FC_SOURCE 0x00800000 #define chipMinorFeatures3_UNK24 0x01000000 #define chipMinorFeatures3_UNK25 0x02000000 #define chipMinorFeatures3_NEW_HZ 0x04000000 #define chipMinorFeatures3_UNK27 0x08000000 #define chipMinorFeatures3_UNK28 0x10000000 -#define chipMinorFeatures3_UNK29 0x20000000 +#define chipMinorFeatures3_SH_ENHANCEMENTS3 0x20000000 #define chipMinorFeatures3_UNK30 0x40000000 #define chipMinorFeatures3_UNK31 0x80000000 #define chipMinorFeatures4_UNK0 0x00000001 -#define chipMinorFeatures4_UNK1 0x00000002 -#define chipMinorFeatures4_UNK2 0x00000004 +#define chipMinorFeatures4_PE_ENHANCEMENTS2 0x00000002 +#define chipMinorFeatures4_FRUSTUM_CLIP_FIX 0x00000004 #define chipMinorFeatures4_UNK3 0x00000008 #define chipMinorFeatures4_UNK4 0x00000010 -#define chipMinorFeatures4_UNK5 0x00000020 -#define chipMinorFeatures4_UNK6 0x00000040 +#define chipMinorFeatures4_2D_GAMMA 0x00000020 +#define chipMinorFeatures4_SINGLE_BUFFER 0x00000040 #define chipMinorFeatures4_UNK7 0x00000080 #define chipMinorFeatures4_UNK8 0x00000100 #define chipMinorFeatures4_UNK9 0x00000200 #define chipMinorFeatures4_UNK10 0x00000400 -#define chipMinorFeatures4_UNK11 0x00000800 -#define chipMinorFeatures4_UNK12 0x00001000 -#define chipMinorFeatures4_UNK13 0x00002000 +#define chipMinorFeatures4_TX_LERP_PRECISION_FIX 0x00000800 +#define chipMinorFeatures4_2D_COLOR_SPACE_CONVERSION 0x00001000 +#define chipMinorFeatures4_TEXTURE_ASTC 0x00002000 #define chipMinorFeatures4_UNK14 0x00004000 #define chipMinorFeatures4_UNK15 0x00008000 #define chipMinorFeatures4_HALTI2 0x00010000 #define chipMinorFeatures4_UNK17 0x00020000 #define chipMinorFeatures4_SMALL_MSAA 0x00040000 #define chipMinorFeatures4_UNK19 0x00080000 -#define chipMinorFeatures4_UNK20 0x00100000 -#define chipMinorFeatures4_UNK21 0x00200000 -#define chipMinorFeatures4_UNK22 0x00400000 -#define chipMinorFeatures4_UNK23 0x00800000 -#define chipMinorFeatures4_UNK24 0x01000000 -#define chipMinorFeatures4_UNK25 0x02000000 -#define chipMinorFeatures4_UNK26 0x04000000 -#define chipMinorFeatures4_UNK27 0x08000000 +#define chipMinorFeatures4_NEW_RA 0x00100000 +#define chipMinorFeatures4_2D_OPF_YUV_OUTPUT 0x00200000 +#define chipMinorFeatures4_2D_MULTI_SOURCE_BLT_EX2 0x00400000 +#define chipMinorFeatures4_NO_USER_CSC 0x00800000 +#define chipMinorFeatures4_ZFIXES 0x01000000 +#define chipMinorFeatures4_BUG_FIXES18 0x02000000 +#define chipMinorFeatures4_2D_COMPRESSION 0x04000000 +#define chipMinorFeatures4_PROBE 0x08000000 #define chipMinorFeatures4_UNK28 0x10000000 -#define chipMinorFeatures4_UNK29 0x20000000 +#define chipMinorFeatures4_2D_SUPER_TILE_VERSION 0x20000000 #define chipMinorFeatures4_UNK30 0x40000000 #define chipMinorFeatures4_UNK31 0x80000000 #define chipMinorFeatures5_UNK0 0x00000001 #define chipMinorFeatures5_UNK1 0x00000002 #define chipMinorFeatures5_UNK2 0x00000004 #define chipMinorFeatures5_UNK3 0x00000008 -#define chipMinorFeatures5_UNK4 0x00000010 +#define chipMinorFeatures5_EEZ 0x00000010 #define chipMinorFeatures5_UNK5 0x00000020 #define chipMinorFeatures5_UNK6 0x00000040 #define chipMinorFeatures5_UNK7 0x00000080 #define chipMinorFeatures5_UNK8 0x00000100 #define chipMinorFeatures5_HALTI3 0x00000200 #define chipMinorFeatures5_UNK10 0x00000400 -#define chipMinorFeatures5_UNK11 0x00000800 +#define chipMinorFeatures5_2D_ONE_PASS_FILTER_TAP 0x00000800 #define chipMinorFeatures5_UNK12 0x00001000 -#define chipMinorFeatures5_UNK13 0x00002000 -#define chipMinorFeatures5_UNK14 0x00004000 +#define chipMinorFeatures5_SEPARATE_SRC_DST 0x00002000 +#define chipMinorFeatures5_HALTI4 0x00004000 #define chipMinorFeatures5_UNK15 0x00008000 -#define chipMinorFeatures5_UNK16 0x00010000 -#define chipMinorFeatures5_UNK17 0x00020000 +#define chipMinorFeatures5_ANDROID_ONLY 0x00010000 +#define chipMinorFeatures5_HAS_PRODUCTID 0x00020000 #define chipMinorFeatures5_UNK18 0x00040000 #define chipMinorFeatures5_UNK19 0x00080000 -#define chipMinorFeatures5_UNK20 0x00100000 +#define chipMinorFeatures5_PE_DITHER_FIX2 0x00100000 #define chipMinorFeatures5_UNK21 0x00200000 #define chipMinorFeatures5_UNK22 0x00400000 #define chipMinorFeatures5_UNK23 0x00800000 #define chipMinorFeatures5_UNK24 0x01000000 #define chipMinorFeatures5_UNK25 0x02000000 #define chipMinorFeatures5_UNK26 0x04000000 -#define chipMinorFeatures5_UNK27 0x08000000 -#define chipMinorFeatures5_UNK28 0x10000000 +#define chipMinorFeatures5_RS_DEPTHSTENCIL_NATIVE_SUPPORT 0x08000000 +#define chipMinorFeatures5_V2_MSAA_COMP_FIX 0x10000000 #define chipMinorFeatures5_UNK29 0x20000000 #define chipMinorFeatures5_UNK30 0x40000000 #define chipMinorFeatures5_UNK31 0x80000000 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 5255278dde56..91e17aeee1da 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -495,6 +495,7 @@ static struct drm_driver etnaviv_drm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, + .gem_prime_res_obj = etnaviv_gem_prime_res_obj, .gem_prime_pin = etnaviv_gem_prime_pin, .gem_prime_unpin = etnaviv_gem_prime_unpin, .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index e41f38667c1c..058389f93b69 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -80,6 +80,7 @@ void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj); struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int etnaviv_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index d6fb724fc3cc..9a3bea738330 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -411,16 +411,20 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); struct drm_device *dev = obj->dev; bool write = !!(op & ETNA_PREP_WRITE); - unsigned long remain = - op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout); - long lret; - - lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, - write, true, remain); - if (lret < 0) - return lret; - else if (lret == 0) - return remain == 0 ? -EBUSY : -ETIMEDOUT; + int ret; + + if (op & ETNA_PREP_NOSYNC) { + if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, + write)) + return -EBUSY; + } else { + unsigned long remain = etnaviv_timeout_to_jiffies(timeout); + + ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv, + write, true, remain); + if (ret <= 0) + return ret == 0 ? -ETIMEDOUT : ret; + } if (etnaviv_obj->flags & ETNA_BO_CACHED) { if (!etnaviv_obj->sgt) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index c4a091e87426..e437fba1209d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -106,9 +106,10 @@ struct etnaviv_gem_submit { struct etnaviv_gpu *gpu; struct ww_acquire_ctx ticket; struct dma_fence *fence; + u32 flags; unsigned int nr_bos; struct etnaviv_gem_submit_bo bos[0]; - u32 flags; + /* No new members here, the previous one is variable-length! */ }; int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 367bf952f61a..e5da4f2300ba 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -150,3 +150,10 @@ fail: return ERR_PTR(ret); } + +struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + return etnaviv_obj->resv; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index ee7069e93eda..6463fc2c736f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -172,7 +172,7 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; - bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); + bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, explicit); @@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, if (ret) return ret; - if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { - DRM_ERROR("relocation %u outside object", i); + if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) { + DRM_ERROR("relocation %u outside object\n", i); return -EINVAL; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 9a9c40717801..ada45fdd0eae 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -412,13 +412,19 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu) { - unsigned int fscale = 1 << (6 - gpu->freq_scale); - u32 clock; - - clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | - VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); + if (gpu->identity.minor_features2 & + chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) { + clk_set_rate(gpu->clk_core, + gpu->base_rate_core >> gpu->freq_scale); + clk_set_rate(gpu->clk_shader, + gpu->base_rate_shader >> gpu->freq_scale); + } else { + unsigned int fscale = 1 << (6 - gpu->freq_scale); + u32 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | + VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); - etnaviv_gpu_load_clock(gpu, clock); + etnaviv_gpu_load_clock(gpu, clock); + } } static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) @@ -523,9 +529,10 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS); - /* Disable PA clock gating for GC400+ except for GC420 */ + /* Disable PA clock gating for GC400+ without bugfix except for GC420 */ if (gpu->identity.model >= chipModel_GC400 && - gpu->identity.model != chipModel_GC420) + gpu->identity.model != chipModel_GC420 && + !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12)) pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA; /* @@ -541,6 +548,11 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) if (gpu->identity.revision < 0x5422) pmc |= BIT(15); /* Unknown bit */ + /* Disable TX clock gating on affected core revisions. */ + if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) || + etnaviv_is_model_rev(gpu, GC2000, 0x5108)) + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX; + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ; pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ; @@ -1736,11 +1748,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) DBG("clk_core: %p", gpu->clk_core); if (IS_ERR(gpu->clk_core)) gpu->clk_core = NULL; + gpu->base_rate_core = clk_get_rate(gpu->clk_core); gpu->clk_shader = devm_clk_get(&pdev->dev, "shader"); DBG("clk_shader: %p", gpu->clk_shader); if (IS_ERR(gpu->clk_shader)) gpu->clk_shader = NULL; + gpu->base_rate_shader = clk_get_rate(gpu->clk_shader); /* TODO: figure out max mapped size */ dev_set_drvdata(dev, gpu); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 9227a9740447..689cb8f3680c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -152,6 +152,8 @@ struct etnaviv_gpu { u32 hangcheck_dma_addr; struct work_struct recover_work; unsigned int freq_scale; + unsigned long base_rate_core; + unsigned long base_rate_shader; }; static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 1d185347c64c..305dc3d4ff77 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -75,6 +75,7 @@ config DRM_EXYNOS_DP config DRM_EXYNOS_HDMI bool "HDMI" depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON + select CEC_CORE if CEC_NOTIFIER help Choose this option if you want to use Exynos HDMI for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 35a8dfc93836..242bd50faa26 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -453,7 +453,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev) struct component_match *match; pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls); match = exynos_drm_match_add(&pdev->dev); if (IS_ERR(match)) diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index a11b79596e2f..b6a46d9a016e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1651,8 +1651,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) return ret; dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0); - if (!dsi->bridge_node) - return -EINVAL; return 0; } @@ -1687,9 +1685,11 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, return ret; } - bridge = of_drm_find_bridge(dsi->bridge_node); - if (bridge) - drm_bridge_attach(encoder, bridge, NULL); + if (dsi->bridge_node) { + bridge = of_drm_find_bridge(dsi->bridge_node); + if (bridge) + drm_bridge_attach(encoder, bridge, NULL); + } return mipi_dsi_host_register(&dsi->dsi_host); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index c77a5aced81a..73217c281c9a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -145,13 +145,19 @@ static struct drm_framebuffer * exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { + const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd); struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; struct drm_gem_object *obj; struct drm_framebuffer *fb; int i; int ret; - for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { + for (i = 0; i < info->num_planes; i++) { + unsigned int height = (i == 0) ? mode_cmd->height : + DIV_ROUND_UP(mode_cmd->height, info->vsub); + unsigned long size = height * mode_cmd->pitches[i] + + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); if (!obj) { DRM_ERROR("failed to lookup gem object\n"); @@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, } exynos_gem[i] = to_exynos_gem(obj); + + if (size > exynos_gem[i]->size) { + i++; + ret = -EINVAL; + goto err; + } } fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); @@ -181,8 +193,8 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) { struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - if (index >= MAX_FB_BUFFER) - return DMA_ERROR_CODE; + if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) + return 0; return exynos_fb->dma_addr[index]; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index e45720543a45..16bbee897e0d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -340,16 +340,10 @@ static int exynos_mic_bind(struct device *dev, struct device *master, void *data) { struct exynos_mic *mic = dev_get_drvdata(dev); - int ret; - mic->bridge.funcs = &mic_bridge_funcs; - mic->bridge.of_node = dev->of_node; mic->bridge.driver_private = mic; - ret = drm_bridge_add(&mic->bridge); - if (ret) - DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); - return ret; + return 0; } static void exynos_mic_unbind(struct device *dev, struct device *master, @@ -365,8 +359,6 @@ static void exynos_mic_unbind(struct device *dev, struct device *master, already_disabled: mutex_unlock(&mic_mutex); - - drm_bridge_remove(&mic->bridge); } static const struct component_ops exynos_mic_component_ops = { @@ -461,6 +453,15 @@ static int exynos_mic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mic); + mic->bridge.funcs = &mic_bridge_funcs; + mic->bridge.of_node = dev->of_node; + + ret = drm_bridge_add(&mic->bridge); + if (ret) { + DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); + return ret; + } + pm_runtime_enable(dev); ret = component_add(dev, &exynos_mic_component_ops); @@ -479,8 +480,13 @@ err: static int exynos_mic_remove(struct platform_device *pdev) { + struct exynos_mic *mic = platform_get_drvdata(pdev); + component_del(&pdev->dev, &exynos_mic_component_ops); pm_runtime_disable(&pdev->dev); + + drm_bridge_remove(&mic->bridge); + return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 06bfbe400cf1..d3b69d66736f 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1501,8 +1501,6 @@ static void hdmi_disable(struct drm_encoder *encoder) */ cancel_delayed_work(&hdata->hotplug_work); cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); - - hdmiphy_disable(hdata); } static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { @@ -1676,7 +1674,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata) return hdmi_bridge_init(hdata); } -static struct of_device_id hdmi_match_types[] = { +static const struct of_device_id hdmi_match_types[] = { { .compatible = "samsung,exynos4210-hdmi", .data = &exynos4210_hdmi_driver_data, @@ -1934,8 +1932,7 @@ static int hdmi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int exynos_hdmi_suspend(struct device *dev) +static int __maybe_unused exynos_hdmi_suspend(struct device *dev) { struct hdmi_context *hdata = dev_get_drvdata(dev); @@ -1944,7 +1941,7 @@ static int exynos_hdmi_suspend(struct device *dev) return 0; } -static int exynos_hdmi_resume(struct device *dev) +static int __maybe_unused exynos_hdmi_resume(struct device *dev) { struct hdmi_context *hdata = dev_get_drvdata(dev); int ret; @@ -1955,7 +1952,6 @@ static int exynos_hdmi_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops exynos_hdmi_pm_ops = { SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 6bed4f3ffcd6..a998a8dd783c 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1094,28 +1094,28 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = { .atomic_check = mixer_atomic_check, }; -static struct mixer_drv_data exynos5420_mxr_drv_data = { +static const struct mixer_drv_data exynos5420_mxr_drv_data = { .version = MXR_VER_128_0_0_184, .is_vp_enabled = 0, }; -static struct mixer_drv_data exynos5250_mxr_drv_data = { +static const struct mixer_drv_data exynos5250_mxr_drv_data = { .version = MXR_VER_16_0_33_0, .is_vp_enabled = 0, }; -static struct mixer_drv_data exynos4212_mxr_drv_data = { +static const struct mixer_drv_data exynos4212_mxr_drv_data = { .version = MXR_VER_0_0_0_16, .is_vp_enabled = 1, }; -static struct mixer_drv_data exynos4210_mxr_drv_data = { +static const struct mixer_drv_data exynos4210_mxr_drv_data = { .version = MXR_VER_0_0_0_16, .is_vp_enabled = 1, .has_sclk = 1, }; -static struct of_device_id mixer_match_types[] = { +static const struct of_device_id mixer_match_types[] = { { .compatible = "samsung,exynos4210-mixer", .data = &exynos4210_mxr_drv_data, diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4577b0af6886..d1bd53b73738 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data) struct file_stats *stats = data; struct i915_vma *vma; + lockdep_assert_held(&obj->base.dev->struct_mutex); + stats->count++; stats->total += obj->base.size; if (!obj->bind_count) @@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) struct drm_i915_gem_request *request; struct task_struct *task; + mutex_lock(&dev->struct_mutex); + memset(&stats, 0, sizeof(stats)); stats.file_priv = file->driver_priv; spin_lock(&file->table_lock); @@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data) * still alive (e.g. get_pid(current) => fork() => exit()). * Therefore, we need to protect this ->comm access using RCU. */ - mutex_lock(&dev->struct_mutex); request = list_first_entry_or_null(&file_priv->mm.request_list, struct drm_i915_gem_request, client_link); @@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) PIDTYPE_PID); print_file_stats(m, task ? task->comm : "<unknown>", stats); rcu_read_unlock(); + mutex_unlock(&dev->struct_mutex); } mutex_unlock(&dev->filelist_mutex); @@ -3083,7 +3087,7 @@ static void intel_connector_info(struct seq_file *m, connector->display_info.cea_rev); } - if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) + if (!intel_encoder) return; switch (connector->connector_type) { @@ -4576,7 +4580,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, sseu->slice_mask |= BIT(s); - if (IS_GEN9_BC(dev_priv)) + if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7dcac3bfb771..969bac8404f1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2434,8 +2434,9 @@ rebuild_st: * again with !__GFP_NORETRY. However, we still * want to fail this allocation rather than * trigger the out-of-memory killer and for - * this we want the future __GFP_MAYFAIL. + * this we want __GFP_RETRY_MAYFAIL. */ + gfp |= __GFP_RETRY_MAYFAIL; } } while (1); diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c index 152f16c11878..348b29a845c9 100644 --- a/drivers/gpu/drm/i915/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/i915_gem_clflush.c @@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence, return NOTIFY_DONE; } -void i915_gem_clflush_object(struct drm_i915_gem_object *obj, +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, unsigned int flags) { struct clflush *clflush; @@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, */ if (!i915_gem_object_has_struct_page(obj)) { obj->cache_dirty = false; - return; + return false; } /* If the GPU is snooping the contents of the CPU cache, @@ -140,7 +140,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, * tracking. */ if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent) - return; + return false; trace_i915_gem_object_clflush(obj); @@ -179,4 +179,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, } obj->cache_dirty = false; + return true; } diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h index 2455a7820937..f390247561b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_clflush.h +++ b/drivers/gpu/drm/i915/i915_gem_clflush.h @@ -28,7 +28,7 @@ struct drm_i915_private; struct drm_i915_gem_object; -void i915_gem_clflush_object(struct drm_i915_gem_object *obj, +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, unsigned int flags); #define I915_CLFLUSH_FORCE BIT(0) #define I915_CLFLUSH_SYNC BIT(1) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 39ed58a21fc1..e1e971ee2ed5 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -688,19 +688,19 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, } static bool -needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, - struct intel_engine_cs *engine, - struct i915_gem_context *to) +needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine) { + struct i915_gem_context *from = engine->legacy_active_context; + if (!ppgtt) return false; /* Always load the ppgtt on first use */ - if (!engine->legacy_active_context) + if (!from) return true; /* Same context without new entries, skip */ - if (engine->legacy_active_context == to && + if ((!from->ppgtt || from->ppgtt == ppgtt) && !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) return false; @@ -744,7 +744,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) if (skip_rcs_switch(ppgtt, engine, to)) return 0; - if (needs_pd_load_pre(ppgtt, engine, to)) { + if (needs_pd_load_pre(ppgtt, engine)) { /* Older GENs and non render rings still want the load first, * "PP_DCLV followed by PP_DIR_BASE register through Load * Register Immediate commands in Ring Buffer before submitting @@ -841,7 +841,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; - if (needs_pd_load_pre(ppgtt, engine, to)) { + if (needs_pd_load_pre(ppgtt, engine)) { int ret; trace_switch_mm(engine, to); @@ -852,6 +852,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); } + engine->legacy_active_context = to; return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 054b2e54cdaf..e9503f6d1100 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -560,9 +560,6 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, eb->args->flags |= __EXEC_HAS_RELOC; } - entry->flags |= __EXEC_OBJECT_HAS_PIN; - GEM_BUG_ON(eb_vma_misplaced(entry, vma)); - if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) { err = i915_vma_get_fence(vma); if (unlikely(err)) { @@ -574,6 +571,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, entry->flags |= __EXEC_OBJECT_HAS_FENCE; } + entry->flags |= __EXEC_OBJECT_HAS_PIN; + GEM_BUG_ON(eb_vma_misplaced(entry, vma)); + return 0; } @@ -1458,7 +1458,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * to read. However, if the array is not writable the user loses * the updated relocation values. */ - if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs)))) + if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs)))) return -EFAULT; do { @@ -1775,7 +1775,7 @@ out: } } - return err ?: have_copy; + return err; } static int eb_relocate(struct i915_execbuffer *eb) @@ -1825,7 +1825,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) int err; for (i = 0; i < count; i++) { - const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; + struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct i915_vma *vma = exec_to_vma(entry); struct drm_i915_gem_object *obj = vma->obj; @@ -1841,12 +1841,14 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) eb->request->capture_list = capture; } + if (unlikely(obj->cache_dirty && !obj->cache_coherent)) { + if (i915_gem_clflush_object(obj, 0)) + entry->flags &= ~EXEC_OBJECT_ASYNC; + } + if (entry->flags & EXEC_OBJECT_ASYNC) goto skip_flushes; - if (unlikely(obj->cache_dirty && !obj->cache_coherent)) - i915_gem_clflush_object(obj, 0); - err = i915_gem_request_await_object (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE); if (err) @@ -2209,7 +2211,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_unlock; err = eb_relocate(&eb); - if (err) + if (err) { /* * If the user expects the execobject.offset and * reloc.presumed_offset to be an exact match, @@ -2218,8 +2220,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, * relocation. */ args->flags &= ~__EXEC_HAS_RELOC; - if (err < 0) goto err_vma; + } if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) { DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 7032c542a9b1..4dd4c2159a92 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req) goto err_unpin; } + ret = req->engine->emit_flush(req, EMIT_INVALIDATE); + if (ret) + goto err_unpin; + ret = req->engine->emit_bb_start(req, so->batch_offset, so->batch_size, I915_DISPATCH_SECURE); diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 7b7c84369d78..7579b9702c22 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -129,7 +129,7 @@ struct drm_i915_gem_request { * It is used by the driver to then queue the request for execution. */ struct i915_sw_fence submit; - wait_queue_t submitq; + wait_queue_entry_t submitq; wait_queue_head_t execute; /* A list of everyone we wait upon, and everyone who waits upon us. diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 1032f98add11..77fb39808131 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock) return true; case MUTEX_TRYLOCK_FAILED: + *unlock = false; + preempt_disable(); do { cpu_relax(); if (mutex_trylock(&dev_priv->drm.struct_mutex)) { - case MUTEX_TRYLOCK_SUCCESS: *unlock = true; - return true; + break; } } while (!need_resched()); + preempt_enable(); + return *unlock; - return false; + case MUTEX_TRYLOCK_SUCCESS: + *unlock = true; + return true; } BUG(); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 9cd22f83b0cf..f33d90226704 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req) u32 *cs; int i; - cs = intel_ring_begin(req, n_flex_regs * 2 + 4); + cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4); if (IS_ERR(cs)) return PTR_ERR(cs); - *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); + *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1); *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 474d23c0c0ce..f29540f922af 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -125,7 +125,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, struct list_head *continuation) { wait_queue_head_t *x = &fence->wait; - wait_queue_t *pos, *next; + wait_queue_entry_t *pos, *next; unsigned long flags; debug_fence_deactivate(fence); @@ -133,31 +133,30 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, /* * To prevent unbounded recursion as we traverse the graph of - * i915_sw_fences, we move the task_list from this, the next ready - * fence, to the tail of the original fence's task_list + * i915_sw_fences, we move the entry list from this, the next ready + * fence, to the tail of the original fence's entry list * (and so added to the list to be woken). */ spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation); if (continuation) { - list_for_each_entry_safe(pos, next, &x->task_list, task_list) { + list_for_each_entry_safe(pos, next, &x->head, entry) { if (pos->func == autoremove_wake_function) pos->func(pos, TASK_NORMAL, 0, continuation); else - list_move_tail(&pos->task_list, continuation); + list_move_tail(&pos->entry, continuation); } } else { LIST_HEAD(extra); do { - list_for_each_entry_safe(pos, next, - &x->task_list, task_list) + list_for_each_entry_safe(pos, next, &x->head, entry) pos->func(pos, TASK_NORMAL, 0, &extra); if (list_empty(&extra)) break; - list_splice_tail_init(&extra, &x->task_list); + list_splice_tail_init(&extra, &x->head); } while (1); } spin_unlock_irqrestore(&x->lock, flags); @@ -222,9 +221,9 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence) i915_sw_fence_complete(fence); } -static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key) +static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key) { - list_del(&wq->task_list); + list_del(&wq->entry); __i915_sw_fence_complete(wq->private, key); if (wq->flags & I915_SW_FENCE_FLAG_ALLOC) @@ -235,7 +234,7 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void * static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, const struct i915_sw_fence * const signaler) { - wait_queue_t *wq; + wait_queue_entry_t *wq; if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) return false; @@ -243,7 +242,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, if (fence == signaler) return true; - list_for_each_entry(wq, &fence->wait.task_list, task_list) { + list_for_each_entry(wq, &fence->wait.head, entry) { if (wq->func != i915_sw_fence_wake) continue; @@ -256,12 +255,12 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence) { - wait_queue_t *wq; + wait_queue_entry_t *wq; if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) return; - list_for_each_entry(wq, &fence->wait.task_list, task_list) { + list_for_each_entry(wq, &fence->wait.head, entry) { if (wq->func != i915_sw_fence_wake) continue; @@ -288,7 +287,7 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, - wait_queue_t *wq, gfp_t gfp) + wait_queue_entry_t *wq, gfp_t gfp) { unsigned long flags; int pending; @@ -318,7 +317,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, pending |= I915_SW_FENCE_FLAG_ALLOC; } - INIT_LIST_HEAD(&wq->task_list); + INIT_LIST_HEAD(&wq->entry); wq->flags = pending; wq->func = i915_sw_fence_wake; wq->private = fence; @@ -327,7 +326,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, spin_lock_irqsave(&signaler->wait.lock, flags); if (likely(!i915_sw_fence_done(signaler))) { - __add_wait_queue_tail(&signaler->wait, wq); + __add_wait_queue_entry_tail(&signaler->wait, wq); pending = 1; } else { i915_sw_fence_wake(wq, 0, 0, NULL); @@ -340,7 +339,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, - wait_queue_t *wq) + wait_queue_entry_t *wq) { return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0); } diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 1d3b6051daaf..fe2ef4dadfc6 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -65,7 +65,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence); int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *after, - wait_queue_t *wq); + wait_queue_entry_t *wq); int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, struct i915_sw_fence *after, gfp_t gfp); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 532c709febbd..1cfe137cdc32 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -672,6 +672,11 @@ int i915_vma_unbind(struct i915_vma *vma) break; } + if (!ret) { + ret = i915_gem_active_retire(&vma->last_fence, + &vma->vm->i915->drm.struct_mutex); + } + __i915_vma_unpin(vma); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 4a673fc1a432..20cf272c97b1 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -284,12 +284,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma) static inline void __i915_vma_unpin(struct i915_vma *vma) { - GEM_BUG_ON(!i915_vma_is_pinned(vma)); vma->flags--; } static inline void i915_vma_unpin(struct i915_vma *vma) { + GEM_BUG_ON(!i915_vma_is_pinned(vma)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); __i915_vma_unpin(vma); } diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index eb638a1e69d2..42fb436f6cdc 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -15,13 +15,9 @@ static struct intel_dsm_priv { acpi_handle dhandle; } intel_dsm_priv; -static const u8 intel_dsm_guid[] = { - 0xd3, 0x73, 0xd8, 0x7e, - 0xd0, 0xc2, - 0x4f, 0x4e, - 0xa8, 0x54, - 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c -}; +static const guid_t intel_dsm_guid = + GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f, + 0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c); static char *intel_dsm_port_name(u8 id) { @@ -80,7 +76,7 @@ static void intel_dsm_platform_mux_info(void) int i; union acpi_object *pkg, *connector_count; - pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid, + pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO, NULL, ACPI_TYPE_PACKAGE); if (!pkg) { @@ -118,7 +114,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev) if (!dhandle) return false; - if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID, + if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID, 1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) { DRM_DEBUG_KMS("no _DSM method for intel device\n"); return false; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 639d45c1dd2e..7ea7fd1e8856 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; uint8_t aux_channel, ddc_pin; /* Each DDI port can have more than one value on the "DVO Port" field, - * so look for all the possible values for each port and abort if more - * than one is found. */ + * so look for all the possible values for each port. + */ int dvo_ports[][3] = { {DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, {DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, @@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, }; - /* Find the child device to use, abort if more than one found. */ + /* + * Find the first child device to reference the port, report if more + * than one found. + */ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { it = dev_priv->vbt.child_dev + i; @@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, if (it->common.dvo_port == dvo_ports[port][j]) { if (child) { - DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n", + DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", port_name(port)); - return; + } else { + child = it; } - child = it; } } } diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 306c6b06b330..17c4ae7e4e7c 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset) } /* Program the max register to clamp values > 1.0. */ + i = lut_size - 1; I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), drm_color_lut_extract(lut[i].red, 16)); I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 80e96f1f49d2..d3b3252a8742 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1762,7 +1762,7 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, if (dev_priv->vbt.edp.low_vswing) { if (voltage == VOLTAGE_INFO_0_85V) { *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); - return cnl_ddi_translations_dp_0_85V; + return cnl_ddi_translations_edp_0_85V; } else if (voltage == VOLTAGE_INFO_0_95V) { *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); return cnl_ddi_translations_edp_0_95V; @@ -1896,8 +1896,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level) val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); val &= ~LOADGEN_SELECT; - if (((rate < 600000) && (width == 4) && (ln >= 1)) || - ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) { + if ((rate <= 600000 && width == 4 && ln >= 1) || + (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dec9e58545a1..cc484b56eeaa 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3427,26 +3427,6 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv) intel_finish_page_flip_cs(dev_priv, crtc->pipe); } -static void intel_update_primary_planes(struct drm_device *dev) -{ - struct drm_crtc *crtc; - - for_each_crtc(dev, crtc) { - struct intel_plane *plane = to_intel_plane(crtc->primary); - struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - - if (plane_state->base.visible) { - trace_intel_update_plane(&plane->base, - to_intel_crtc(crtc)); - - plane->update_plane(plane, - to_intel_crtc_state(crtc->state), - plane_state); - } - } -} - static int __intel_display_resume(struct drm_device *dev, struct drm_atomic_state *state, @@ -3499,6 +3479,19 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) struct drm_atomic_state *state; int ret; + + /* reset doesn't touch the display */ + if (!i915.force_reset_modeset_test && + !gpu_reset_clobbers_display(dev_priv)) + return; + + /* We have a modeset vs reset deadlock, defensively unbreak it. + * + * FIXME: We can do a _lot_ better, this is just a first iteration. + */ + i915_gem_set_wedged(dev_priv); + DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n"); + /* * Need mode_config.mutex so that we don't * trample ongoing ->detect() and whatnot. @@ -3512,12 +3505,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) drm_modeset_backoff(ctx); } - - /* reset doesn't touch the display, but flips might get nuked anyway, */ - if (!i915.force_reset_modeset_test && - !gpu_reset_clobbers_display(dev_priv)) - return; - /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. @@ -3547,6 +3534,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) struct drm_atomic_state *state = dev_priv->modeset_restore_state; int ret; + /* reset doesn't touch the display */ + if (!i915.force_reset_modeset_test && + !gpu_reset_clobbers_display(dev_priv)) + return; + + if (!state) + goto unlock; + /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space @@ -3558,22 +3553,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) /* reset doesn't touch the display */ if (!gpu_reset_clobbers_display(dev_priv)) { - if (!state) { - /* - * Flips in the rings have been nuked by the reset, - * so update the base address of all primary - * planes to the the last fb to make sure we're - * showing the correct fb after a reset. - * - * FIXME: Atomic will make this obsolete since we won't schedule - * CS-based flips (which might get lost in gpu resets) any more. - */ - intel_update_primary_planes(dev); - } else { - ret = __intel_display_resume(dev, state, ctx); + /* for testing only restore the display */ + ret = __intel_display_resume(dev, state, ctx); if (ret) DRM_ERROR("Restoring old state failed with %i\n", ret); - } } else { /* * The display has been reset as well, @@ -3597,8 +3580,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_hpd_init(dev_priv); } - if (state) - drm_atomic_state_put(state); + drm_atomic_state_put(state); +unlock: drm_modeset_drop_locks(ctx); drm_modeset_acquire_fini(ctx); mutex_unlock(&dev->mode_config.mutex); @@ -9117,6 +9100,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, u64 power_domain_mask; bool active; + if (INTEL_GEN(dev_priv) >= 9) { + intel_crtc_init_scalers(crtc, pipe_config); + + pipe_config->scaler_state.scaler_id = -1; + pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); + } + power_domain = POWER_DOMAIN_PIPE(crtc->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; @@ -9145,13 +9135,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; - if (INTEL_GEN(dev_priv) >= 9) { - intel_crtc_init_scalers(crtc, pipe_config); - - pipe_config->scaler_state.scaler_id = -1; - pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); - } - power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { power_domain_mask |= BIT_ULL(power_domain); @@ -9540,7 +9523,16 @@ static void i9xx_update_cursor(struct intel_plane *plane, * On some platforms writing CURCNTR first will also * cause CURPOS to be armed by the CURBASE write. * Without the CURCNTR write the CURPOS write would - * arm itself. + * arm itself. Thus we always start the full update + * with a CURCNTR write. + * + * On other platforms CURPOS always requires the + * CURBASE write to arm the update. Additonally + * a write to any of the cursor register will cancel + * an already armed cursor update. Thus leaving out + * the CURBASE write after CURPOS could lead to a + * cursor that doesn't appear to move, or even change + * shape. Thus we always write CURBASE. * * CURCNTR and CUR_FBC_CTL are always * armed by the CURBASE write only. @@ -9559,6 +9551,7 @@ static void i9xx_update_cursor(struct intel_plane *plane, plane->cursor.cntl = cntl; } else { I915_WRITE_FW(CURPOS(pipe), pos); + I915_WRITE_FW(CURBASE(pipe), base); } POSTING_READ_FW(CURBASE(pipe)); diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c index 6e09ceb71500..150a156f3b1e 100644 --- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c @@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector) struct intel_encoder *encoder = connector->encoder; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct mipi_dsi_device *dsi_device; - u8 data; + u8 data = 0; enum port port; /* FIXME: Need to take care of 16 bit brightness level */ diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index 7158c7ce9c09..91c07b0c8db9 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c @@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv, if (!gpio_desc) { gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, - "panel", gpio_index, + NULL, gpio_index, value ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7404cf2aac28..2afa4daa88e8 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return ret; } +static u8 gtiir[] = { + [RCS] = 0, + [BCS] = 0, + [VCS] = 1, + [VCS2] = 1, + [VECS] = 3, +}; + static int gen8_init_common_ring(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; @@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); - /* After a GPU reset, we may have requests to replay */ + GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir)); + + /* + * Clear any pending interrupt state. + * + * We do it twice out of paranoia that some of the IIR are double + * buffered, and if we only reset it once there may still be + * an interrupt pending. + */ + I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), + GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); + I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), + GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + /* After a GPU reset, we may have requests to replay */ submit = false; for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { if (!port_isset(&port[n])) diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 52b3a1fd4059..57ef5833c427 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -63,7 +63,6 @@ enum { }; /* Logical Rings */ -void intel_logical_ring_stop(struct intel_engine_cs *engine); void intel_logical_ring_cleanup(struct intel_engine_cs *engine); int logical_render_ring_init(struct intel_engine_cs *engine); int logical_xcs_ring_init(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5abef482eacf..beb9baaf2f2e 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - if (!IS_GEN9(dev_priv)) { - DRM_ERROR("LSPCON is supported on GEN9 only\n"); + if (!HAS_LSPCON(dev_priv)) { + DRM_ERROR("LSPCON is not supported on this platform\n"); return false; } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 96c2cbd81869..593349be8b9d 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, if (i915.invert_brightness > 0 || dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { - return panel->backlight.max - val; + return panel->backlight.max - val + panel->backlight.min; } return val; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 48ea0fca1f72..40b224b44d1b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4463,8 +4463,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && (plane_bytes_per_line / 512 < 1)) selected_result = method2; - else if ((ddb_allocation && ddb_allocation / - fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) + else if (ddb_allocation >= + fixed_16_16_to_u32_round_up(plane_blocks_per_line)) selected_result = min_fixed_16_16(method1, method2); else if (latency >= linetime_us) selected_result = min_fixed_16_16(method1, method2); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 627e2aa09766..8cdec455cf7d 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -206,7 +206,7 @@ struct drm_i915_private *mock_gem_device(void) mkwrite_device_info(i915)->ring_mask = BIT(0); i915->engine[RCS] = mock_engine(i915, "mock"); if (!i915->engine[RCS]) - goto err_dependencies; + goto err_priorities; i915->kernel_context = mock_context(i915, NULL); if (!i915->kernel_context) diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 49546222c6d3..6276bb834b4f 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -54,7 +54,7 @@ static const uint32_t ipu_plane_formats[] = { DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888, DRM_FORMAT_BGRA8888, - DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 636031a30e17..8aca20209cb8 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) /* port@1 is the output port */ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge); - if (ret) + if (ret && ret != -ENODEV) return ret; imxpd->dev = dev; diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index bf2e5be1ab30..e37b55a23a65 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -1,4 +1,5 @@ -mediatek-drm-y := mtk_disp_ovl.o \ +mediatek-drm-y := mtk_disp_color.o \ + mtk_disp_ovl.o \ mtk_disp_rdma.o \ mtk_drm_crtc.o \ mtk_drm_ddp.o \ diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c new file mode 100644 index 000000000000..ef79a6d55646 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <drm/drmP.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> + +#include "mtk_drm_crtc.h" +#include "mtk_drm_ddp_comp.h" + +#define DISP_COLOR_CFG_MAIN 0x0400 +#define DISP_COLOR_START_MT2701 0x0f00 +#define DISP_COLOR_START_MT8173 0x0c00 +#define DISP_COLOR_START(comp) ((comp)->data->color_offset) +#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50) +#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54) + +#define COLOR_BYPASS_ALL BIT(7) +#define COLOR_SEQ_SEL BIT(13) + +struct mtk_disp_color_data { + unsigned int color_offset; +}; + +/** + * struct mtk_disp_color - DISP_COLOR driver structure + * @ddp_comp - structure containing type enum and hardware resources + * @crtc - associated crtc to report irq events to + */ +struct mtk_disp_color { + struct mtk_ddp_comp ddp_comp; + struct drm_crtc *crtc; + const struct mtk_disp_color_data *data; +}; + +static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp) +{ + return container_of(comp, struct mtk_disp_color, ddp_comp); +} + +static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc) +{ + struct mtk_disp_color *color = comp_to_color(comp); + + writel(w, comp->regs + DISP_COLOR_WIDTH(color)); + writel(h, comp->regs + DISP_COLOR_HEIGHT(color)); +} + +static void mtk_color_start(struct mtk_ddp_comp *comp) +{ + struct mtk_disp_color *color = comp_to_color(comp); + + writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL, + comp->regs + DISP_COLOR_CFG_MAIN); + writel(0x1, comp->regs + DISP_COLOR_START(color)); +} + +static const struct mtk_ddp_comp_funcs mtk_disp_color_funcs = { + .config = mtk_color_config, + .start = mtk_color_start, +}; + +static int mtk_disp_color_bind(struct device *dev, struct device *master, + void *data) +{ + struct mtk_disp_color *priv = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + int ret; + + ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp); + if (ret < 0) { + dev_err(dev, "Failed to register component %s: %d\n", + dev->of_node->full_name, ret); + return ret; + } + + return 0; +} + +static void mtk_disp_color_unbind(struct device *dev, struct device *master, + void *data) +{ + struct mtk_disp_color *priv = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + + mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp); +} + +static const struct component_ops mtk_disp_color_component_ops = { + .bind = mtk_disp_color_bind, + .unbind = mtk_disp_color_unbind, +}; + +static int mtk_disp_color_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_disp_color *priv; + int comp_id; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_COLOR); + if (comp_id < 0) { + dev_err(dev, "Failed to identify by alias: %d\n", comp_id); + return comp_id; + } + + ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id, + &mtk_disp_color_funcs); + if (ret) { + dev_err(dev, "Failed to initialize component: %d\n", ret); + return ret; + } + + priv->data = of_device_get_match_data(dev); + + platform_set_drvdata(pdev, priv); + + ret = component_add(dev, &mtk_disp_color_component_ops); + if (ret) + dev_err(dev, "Failed to add component: %d\n", ret); + + return ret; +} + +static int mtk_disp_color_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_disp_color_component_ops); + + return 0; +} + +static const struct mtk_disp_color_data mt2701_color_driver_data = { + .color_offset = DISP_COLOR_START_MT2701, +}; + +static const struct mtk_disp_color_data mt8173_color_driver_data = { + .color_offset = DISP_COLOR_START_MT8173, +}; + +static const struct of_device_id mtk_disp_color_driver_dt_match[] = { + { .compatible = "mediatek,mt2701-disp-color", + .data = &mt2701_color_driver_data}, + { .compatible = "mediatek,mt8173-disp-color", + .data = &mt8173_color_driver_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match); + +struct platform_driver mtk_disp_color_driver = { + .probe = mtk_disp_color_probe, + .remove = mtk_disp_color_remove, + .driver = { + .name = "mediatek-disp-color", + .owner = THIS_MODULE, + .of_match_table = mtk_disp_color_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index a14d7d64d7b1..35bc5babdbf7 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -42,9 +42,12 @@ #define OVL_RDMA_MEM_GMC 0x40402020 #define OVL_CON_BYTE_SWAP BIT(24) +#define OVL_CON_MTX_YUV_TO_RGB (6 << 16) #define OVL_CON_CLRFMT_RGB (1 << 12) #define OVL_CON_CLRFMT_RGBA8888 (2 << 12) #define OVL_CON_CLRFMT_ARGB8888 (3 << 12) +#define OVL_CON_CLRFMT_UYVY (4 << 12) +#define OVL_CON_CLRFMT_YUYV (5 << 12) #define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ 0 : OVL_CON_CLRFMT_RGB) #define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ @@ -176,6 +179,10 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: return OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP; + case DRM_FORMAT_UYVY: + return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB; + case DRM_FORMAT_YUYV: + return OVL_CON_CLRFMT_YUYV | OVL_CON_MTX_YUV_TO_RGB; } } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 6582e1f56d37..cb32c9369f3a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -559,6 +559,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr, sizeof(*mtk_crtc->ddp_comp), GFP_KERNEL); + if (!mtk_crtc->ddp_comp) + return -ENOMEM; mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe); if (IS_ERR(mtk_crtc->mutex)) { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 8b52416b6e41..07d7ea2268ef 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -38,13 +38,6 @@ #define DISP_REG_UFO_START 0x0000 -#define DISP_COLOR_CFG_MAIN 0x0400 -#define DISP_COLOR_START_MT2701 0x0f00 -#define DISP_COLOR_START_MT8173 0x0c00 -#define DISP_COLOR_START(comp) ((comp)->data->color_offset) -#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50) -#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54) - #define DISP_AAL_EN 0x0000 #define DISP_AAL_SIZE 0x0030 @@ -55,9 +48,6 @@ #define LUT_10BIT_MASK 0x03ff -#define COLOR_BYPASS_ALL BIT(7) -#define COLOR_SEQ_SEL BIT(13) - #define OD_RELAYMODE BIT(0) #define UFO_BYPASS BIT(2) @@ -82,20 +72,6 @@ #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) #define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) -struct mtk_disp_color_data { - unsigned int color_offset; -}; - -struct mtk_disp_color { - struct mtk_ddp_comp ddp_comp; - const struct mtk_disp_color_data *data; -}; - -static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp) -{ - return container_of(comp, struct mtk_disp_color, ddp_comp); -} - void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, unsigned int CFG) { @@ -119,25 +95,6 @@ void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, } } -static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w, - unsigned int h, unsigned int vrefresh, - unsigned int bpc) -{ - struct mtk_disp_color *color = comp_to_color(comp); - - writel(w, comp->regs + DISP_COLOR_WIDTH(color)); - writel(h, comp->regs + DISP_COLOR_HEIGHT(color)); -} - -static void mtk_color_start(struct mtk_ddp_comp *comp) -{ - struct mtk_disp_color *color = comp_to_color(comp); - - writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL, - comp->regs + DISP_COLOR_CFG_MAIN); - writel(0x1, comp->regs + DISP_COLOR_START(color)); -} - static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, unsigned int bpc) @@ -229,11 +186,6 @@ static const struct mtk_ddp_comp_funcs ddp_gamma = { .stop = mtk_gamma_stop, }; -static const struct mtk_ddp_comp_funcs ddp_color = { - .config = mtk_color_config, - .start = mtk_color_start, -}; - static const struct mtk_ddp_comp_funcs ddp_od = { .config = mtk_od_config, .start = mtk_od_start, @@ -268,8 +220,8 @@ struct mtk_ddp_comp_match { static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal }, [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, - [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, - [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, + [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL }, + [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL }, [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL }, [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL }, [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL }, @@ -286,22 +238,6 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, }; -static const struct mtk_disp_color_data mt2701_color_driver_data = { - .color_offset = DISP_COLOR_START_MT2701, -}; - -static const struct mtk_disp_color_data mt8173_color_driver_data = { - .color_offset = DISP_COLOR_START_MT8173, -}; - -static const struct of_device_id mtk_disp_color_driver_dt_match[] = { - { .compatible = "mediatek,mt2701-disp-color", - .data = &mt2701_color_driver_data}, - { .compatible = "mediatek,mt8173-disp-color", - .data = &mt8173_color_driver_data}, - {}, -}; - int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type) { @@ -324,23 +260,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, enum mtk_ddp_comp_type type; struct device_node *larb_node; struct platform_device *larb_pdev; - const struct of_device_id *match; - struct mtk_disp_color *color; if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX) return -EINVAL; type = mtk_ddp_matches[comp_id].type; - if (type == MTK_DISP_COLOR) { - devm_kfree(dev, comp); - color = devm_kzalloc(dev, sizeof(*color), GFP_KERNEL); - if (!color) - return -ENOMEM; - - match = of_match_node(mtk_disp_color_driver_dt_match, node); - color->data = match->data; - comp = &color->ddp_comp; - } comp->id = comp_id; comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index f6c8ec4c7dbc..41d2cffe953e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -439,11 +439,12 @@ static int mtk_drm_probe(struct platform_device *pdev) private->comp_node[comp_id] = of_node_get(node); /* - * Currently only the OVL, RDMA, DSI, and DPI blocks have + * Currently only the COLOR, OVL, RDMA, DSI, and DPI blocks have * separate component platform drivers and initialize their own * DDP component structure. The others are initialized here. */ - if (comp_type == MTK_DISP_OVL || + if (comp_type == MTK_DISP_COLOR || + comp_type == MTK_DISP_OVL || comp_type == MTK_DISP_RDMA || comp_type == MTK_DSI || comp_type == MTK_DPI) { @@ -566,6 +567,7 @@ static struct platform_driver mtk_drm_platform_driver = { static struct platform_driver * const mtk_drm_drivers[] = { &mtk_ddp_driver, + &mtk_disp_color_driver, &mtk_disp_ovl_driver, &mtk_disp_rdma_driver, &mtk_dpi_driver, @@ -576,33 +578,14 @@ static struct platform_driver * const mtk_drm_drivers[] = { static int __init mtk_drm_init(void) { - int ret; - int i; - - for (i = 0; i < ARRAY_SIZE(mtk_drm_drivers); i++) { - ret = platform_driver_register(mtk_drm_drivers[i]); - if (ret < 0) { - pr_err("Failed to register %s driver: %d\n", - mtk_drm_drivers[i]->driver.name, ret); - goto err; - } - } - - return 0; - -err: - while (--i >= 0) - platform_driver_unregister(mtk_drm_drivers[i]); - - return ret; + return platform_register_drivers(mtk_drm_drivers, + ARRAY_SIZE(mtk_drm_drivers)); } static void __exit mtk_drm_exit(void) { - int i; - - for (i = ARRAY_SIZE(mtk_drm_drivers) - 1; i >= 0; i--) - platform_driver_unregister(mtk_drm_drivers[i]); + platform_unregister_drivers(mtk_drm_drivers, + ARRAY_SIZE(mtk_drm_drivers)); } module_init(mtk_drm_init); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index aef8747d810b..c3378c452c0a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -59,6 +59,7 @@ struct mtk_drm_private { }; extern struct platform_driver mtk_ddp_driver; +extern struct platform_driver mtk_disp_color_driver; extern struct platform_driver mtk_disp_ovl_driver; extern struct platform_driver mtk_disp_rdma_driver; extern struct platform_driver mtk_dpi_driver; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index e405e89ed5e5..1a59b9ab4aa8 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -28,6 +28,8 @@ static const u32 formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, }; static void mtk_plane_reset(struct drm_plane *plane) diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index b5cc6e12334c..97253c8f813b 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -930,7 +930,7 @@ static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data) DRM_INFO("type is 0x02, try again\n"); break; default: - DRM_INFO("type(0x%x) cannot be non-recognite\n", type); + DRM_INFO("type(0x%x) not recognized\n", type); break; } diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 0a4ffd724146..71eb4fbbfc85 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1778,33 +1778,14 @@ static struct platform_driver * const mtk_hdmi_drivers[] = { static int __init mtk_hdmitx_init(void) { - int ret; - int i; - - for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) { - ret = platform_driver_register(mtk_hdmi_drivers[i]); - if (ret < 0) { - pr_err("Failed to register %s driver: %d\n", - mtk_hdmi_drivers[i]->driver.name, ret); - goto err; - } - } - - return 0; - -err: - while (--i >= 0) - platform_driver_unregister(mtk_hdmi_drivers[i]); - - return ret; + return platform_register_drivers(mtk_hdmi_drivers, + ARRAY_SIZE(mtk_hdmi_drivers)); } static void __exit mtk_hdmitx_exit(void) { - int i; - - for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--) - platform_driver_unregister(mtk_hdmi_drivers[i]); + platform_unregister_drivers(mtk_hdmi_drivers, + ARRAY_SIZE(mtk_hdmi_drivers)); } module_init(mtk_hdmitx_init); diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h index 45cf363d25ad..a45bb22275a7 100644 --- a/drivers/gpu/drm/mga/mga_drv.h +++ b/drivers/gpu/drm/mga/mga_drv.h @@ -159,6 +159,8 @@ extern int mga_dma_bootstrap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mga_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int mga_getparam(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int mga_dma_flush(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mga_dma_reset(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c index 729bfd56b55f..245fb2e359cf 100644 --- a/drivers/gpu/drm/mga/mga_ioc32.c +++ b/drivers/gpu/drm/mga/mga_ioc32.c @@ -61,46 +61,25 @@ static int compat_mga_init(struct file *file, unsigned int cmd, unsigned long arg) { drm_mga_init32_t init32; - drm_mga_init_t __user *init; - int err = 0, i; + drm_mga_init_t init; if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) return -EFAULT; - init = compat_alloc_user_space(sizeof(*init)); - if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) - || __put_user(init32.func, &init->func) - || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) - || __put_user(init32.chipset, &init->chipset) - || __put_user(init32.sgram, &init->sgram) - || __put_user(init32.maccess, &init->maccess) - || __put_user(init32.fb_cpp, &init->fb_cpp) - || __put_user(init32.front_offset, &init->front_offset) - || __put_user(init32.front_pitch, &init->front_pitch) - || __put_user(init32.back_offset, &init->back_offset) - || __put_user(init32.back_pitch, &init->back_pitch) - || __put_user(init32.depth_cpp, &init->depth_cpp) - || __put_user(init32.depth_offset, &init->depth_offset) - || __put_user(init32.depth_pitch, &init->depth_pitch) - || __put_user(init32.fb_offset, &init->fb_offset) - || __put_user(init32.mmio_offset, &init->mmio_offset) - || __put_user(init32.status_offset, &init->status_offset) - || __put_user(init32.warp_offset, &init->warp_offset) - || __put_user(init32.primary_offset, &init->primary_offset) - || __put_user(init32.buffers_offset, &init->buffers_offset)) - return -EFAULT; - - for (i = 0; i < MGA_NR_TEX_HEAPS; i++) { - err |= - __put_user(init32.texture_offset[i], - &init->texture_offset[i]); - err |= - __put_user(init32.texture_size[i], &init->texture_size[i]); - } - if (err) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init); + init.func = init32.func; + init.sarea_priv_offset = init32.sarea_priv_offset; + memcpy(&init.chipset, &init32.chipset, + offsetof(drm_mga_init_t, fb_offset) - + offsetof(drm_mga_init_t, chipset)); + init.fb_offset = init32.fb_offset; + init.mmio_offset = init32.mmio_offset; + init.status_offset = init32.status_offset; + init.warp_offset = init32.warp_offset; + init.primary_offset = init32.primary_offset; + init.buffers_offset = init32.buffers_offset; + + return drm_ioctl_kernel(file, mga_dma_init, &init, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); } typedef struct drm_mga_getparam32 { @@ -112,19 +91,14 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd, unsigned long arg) { drm_mga_getparam32_t getparam32; - drm_mga_getparam_t __user *getparam; + drm_mga_getparam_t getparam; if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32))) return -EFAULT; - getparam = compat_alloc_user_space(sizeof(*getparam)); - if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam)) - || __put_user(getparam32.param, &getparam->param) - || __put_user((void __user *)(unsigned long)getparam32.value, - &getparam->value)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); + getparam.param = getparam32.param; + getparam.value = compat_ptr(getparam32.value); + return drm_ioctl_kernel(file, mga_getparam, &getparam, DRM_AUTH); } typedef struct drm_mga_drm_bootstrap32 { @@ -141,48 +115,33 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd, unsigned long arg) { drm_mga_dma_bootstrap32_t dma_bootstrap32; - drm_mga_dma_bootstrap_t __user *dma_bootstrap; + drm_mga_dma_bootstrap_t dma_bootstrap; int err; if (copy_from_user(&dma_bootstrap32, (void __user *)arg, sizeof(dma_bootstrap32))) return -EFAULT; - dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap)); - if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap)) - || __put_user(dma_bootstrap32.texture_handle, - &dma_bootstrap->texture_handle) - || __put_user(dma_bootstrap32.texture_size, - &dma_bootstrap->texture_size) - || __put_user(dma_bootstrap32.primary_size, - &dma_bootstrap->primary_size) - || __put_user(dma_bootstrap32.secondary_bin_count, - &dma_bootstrap->secondary_bin_count) - || __put_user(dma_bootstrap32.secondary_bin_size, - &dma_bootstrap->secondary_bin_size) - || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode) - || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) - return -EFAULT; + dma_bootstrap.texture_handle = dma_bootstrap32.texture_handle; + dma_bootstrap.texture_size = dma_bootstrap32.texture_size; + dma_bootstrap.primary_size = dma_bootstrap32.primary_size; + dma_bootstrap.secondary_bin_count = dma_bootstrap32.secondary_bin_count; + dma_bootstrap.secondary_bin_size = dma_bootstrap32.secondary_bin_size; + dma_bootstrap.agp_mode = dma_bootstrap32.agp_mode; + dma_bootstrap.agp_size = dma_bootstrap32.agp_size; - err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP, - (unsigned long)dma_bootstrap); + err = drm_ioctl_kernel(file, mga_dma_bootstrap, &dma_bootstrap, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY); if (err) return err; - if (__get_user(dma_bootstrap32.texture_handle, - &dma_bootstrap->texture_handle) - || __get_user(dma_bootstrap32.texture_size, - &dma_bootstrap->texture_size) - || __get_user(dma_bootstrap32.primary_size, - &dma_bootstrap->primary_size) - || __get_user(dma_bootstrap32.secondary_bin_count, - &dma_bootstrap->secondary_bin_count) - || __get_user(dma_bootstrap32.secondary_bin_size, - &dma_bootstrap->secondary_bin_size) - || __get_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode) - || __get_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) - return -EFAULT; - + dma_bootstrap32.texture_handle = dma_bootstrap.texture_handle; + dma_bootstrap32.texture_size = dma_bootstrap.texture_size; + dma_bootstrap32.primary_size = dma_bootstrap.primary_size; + dma_bootstrap32.secondary_bin_count = dma_bootstrap.secondary_bin_count; + dma_bootstrap32.secondary_bin_size = dma_bootstrap.secondary_bin_size; + dma_bootstrap32.agp_mode = dma_bootstrap.agp_mode; + dma_bootstrap32.agp_size = dma_bootstrap.agp_size; if (copy_to_user((void __user *)arg, &dma_bootstrap32, sizeof(dma_bootstrap32))) return -EFAULT; @@ -190,10 +149,14 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd, return 0; } -drm_ioctl_compat_t *mga_compat_ioctls[] = { - [DRM_MGA_INIT] = compat_mga_init, - [DRM_MGA_GETPARAM] = compat_mga_getparam, - [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap, +static struct { + drm_ioctl_compat_t *fn; + char *name; +} mga_compat_ioctls[] = { +#define DRM_IOCTL32_DEF(n, f)[DRM_##n] = {.fn = f, .name = #n} + DRM_IOCTL32_DEF(MGA_INIT, compat_mga_init), + DRM_IOCTL32_DEF(MGA_GETPARAM, compat_mga_getparam), + DRM_IOCTL32_DEF(MGA_DMA_BOOTSTRAP, compat_mga_dma_bootstrap), }; /** @@ -208,19 +171,27 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = { long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); + struct drm_file *file_priv = filp->private_data; drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); - if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) - fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; - - if (fn != NULL) - ret = (*fn) (filp, cmd, arg); - else - ret = drm_ioctl(filp, cmd, arg); - + if (nr >= DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) + return drm_ioctl(filp, cmd, arg); + + fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE].fn; + if (!fn) + return drm_ioctl(filp, cmd, arg); + + DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->kdev->devt), + file_priv->authenticated, + mga_compat_ioctls[nr - DRM_COMMAND_BASE].name); + ret = (*fn) (filp, cmd, arg); + if (ret) + DRM_DEBUG("ret = %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index 792f924496fc..e5f6b735f575 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c @@ -1005,7 +1005,7 @@ static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *fil return 0; } -static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) +int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_getparam_t *param = data; diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index adb411a078e8..f4b53588e071 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, if (IS_G200_SE(mdev)) { - if (mdev->unique_rev_id >= 0x02) { + if (mdev->unique_rev_id >= 0x04) { + WREG8(MGAREG_CRTCEXT_INDEX, 0x06); + WREG8(MGAREG_CRTCEXT_DATA, 0); + } else if (mdev->unique_rev_id >= 0x02) { u8 hi_pri_lvl; u32 bpp; u32 mb; @@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector, if (mga_vga_calculate_mode_bandwidth(mode, bpp) > (30100 * 1024)) return MODE_BANDWIDTH; + } else { + if (mga_vga_calculate_mode_bandwidth(mode, bpp) + > (55000 * 1024)) + return MODE_BANDWIDTH; } } else if (mdev->type == G200_WB) { if (mode->hdisplay > 1280) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index b638d192ce5e..99d39b2aefa6 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -5,7 +5,7 @@ config DRM_MSM depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on OF && COMMON_CLK depends on MMU - select QCOM_MDT_LOADER + select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index b4b54f1c24bc..f9eae03aa1dc 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -15,7 +15,7 @@ #include <linux/cpumask.h> #include <linux/qcom_scm.h> #include <linux/dma-mapping.h> -#include <linux/of_reserved_mem.h> +#include <linux/of_address.h> #include <linux/soc/qcom/mdt_loader.h> #include "msm_gem.h" #include "msm_mmu.h" @@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu); #define GPU_PAS_ID 13 -#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER) - static int zap_shader_load_mdt(struct device *dev, const char *fwname) { const struct firmware *fw; + struct device_node *np; + struct resource r; phys_addr_t mem_phys; ssize_t mem_size; void *mem_region = NULL; int ret; + if (!IS_ENABLED(CONFIG_ARCH_QCOM)) + return -EINVAL; + + np = of_get_child_by_name(dev->of_node, "zap-shader"); + if (!np) + return -ENODEV; + + np = of_parse_phandle(np, "memory-region", 0); + if (!np) + return -EINVAL; + + ret = of_address_to_resource(np, 0, &r); + if (ret) + return ret; + + mem_phys = r.start; + mem_size = resource_size(&r); + /* Request the MDT file for the firmware */ ret = request_firmware(&fw, fwname, dev); if (ret) { @@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) } /* Allocate memory for the firmware image */ - mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL); + mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC); if (!mem_region) { ret = -ENOMEM; goto out; @@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); out: + if (mem_region) + memunmap(mem_region); + release_firmware(fw); return ret; } -#else -static int zap_shader_load_mdt(struct device *dev, const char *fwname) -{ - return -ENODEV; -} -#endif static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) @@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, gpu->funcs->flush(gpu); } -struct a5xx_hwcg { +static const struct { u32 offset; u32 value; -}; - -static const struct a5xx_hwcg a530_hwcg[] = { +} a5xx_hwcg[] = { {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, @@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = { {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} }; -static const struct { - int (*test)(struct adreno_gpu *gpu); - const struct a5xx_hwcg *regs; - unsigned int count; -} a5xx_hwcg_regs[] = { - { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), }, -}; - -static void _a5xx_enable_hwcg(struct msm_gpu *gpu, - const struct a5xx_hwcg *regs, unsigned int count) +void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) { unsigned int i; - for (i = 0; i < count; i++) - gpu_write(gpu, regs[i].offset, regs[i].value); + for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++) + gpu_write(gpu, a5xx_hwcg[i].offset, + state ? a5xx_hwcg[i].value : 0); - gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); - gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); -} - -static void a5xx_enable_hwcg(struct msm_gpu *gpu) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) { - if (a5xx_hwcg_regs[i].test(adreno_gpu)) { - _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs, - a5xx_hwcg_regs[i].count); - return; - } - } + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0); + gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180); } static int a5xx_me_init(struct msm_gpu *gpu) @@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu) return ret; } -/* Set up a child device to "own" the zap shader */ -static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev) -{ - struct device_node *node; - int ret; - - if (dev->parent) - return 0; - - /* Find the sub-node for the zap shader */ - node = of_get_child_by_name(parent->of_node, "zap-shader"); - if (!node) { - DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n"); - return -ENODEV; - } - - dev->parent = parent; - dev->of_node = node; - dev_set_name(dev, "adreno_zap_shader"); - - ret = device_register(dev); - if (ret) { - DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n"); - goto out; - } - - ret = of_reserved_mem_device_init(dev); - if (ret) { - DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n"); - device_unregister(dev); - } - -out: - if (ret) - dev->parent = NULL; - - return ret; -} - static int a5xx_zap_shader_init(struct msm_gpu *gpu) { static bool loaded; @@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) return -ENODEV; } - ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev); - - if (!ret) - ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev, - adreno_gpu->info->zapfw); + ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw); loaded = !ret; @@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); /* Enable HWCG */ - a5xx_enable_hwcg(gpu); + a5xx_set_hwcg(gpu, true); gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); @@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu) DBG("%s", gpu->name); - if (a5xx_gpu->zap_dev.parent) - device_unregister(&a5xx_gpu->zap_dev); - if (a5xx_gpu->pm4_bo) { if (a5xx_gpu->pm4_iova) msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); @@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = { 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, - 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, - 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, - 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, - 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, - 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, - 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, - 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, - 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, - 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, - 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, - 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, - 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, - 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, - 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, - 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, - 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, - 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, - 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, - 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, - 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, - 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, - 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, - 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, - 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, - ~0 + 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841, + 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28, + 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, + 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98, + 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585, + 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, + 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, + 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545, + 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0, + 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, + 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, + 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9, + 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201, + 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A, + 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F, + 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, + 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947, + 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, + 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68, + 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, + 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, + 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3, + 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F, + 0xB9A0, 0xB9BF, ~0 }; static void a5xx_dump(struct msm_gpu *gpu) @@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) { seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A5XX_RBBM_STATUS)); + + /* + * Temporarily disable hardware clock gating before going into + * adreno_show to avoid issues while reading the registers + */ + a5xx_set_hwcg(gpu, false); adreno_show(gpu, m); + a5xx_set_hwcg(gpu, true); } #endif diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 6638bc85645d..1137092241d5 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -36,8 +36,6 @@ struct a5xx_gpu { uint32_t gpmu_dwords; uint32_t lm_leakage; - - struct device zap_dev; }; #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) @@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, } bool a5xx_idle(struct msm_gpu *gpu); +void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); #endif /* __A5XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index f1ab2703674a..7414c6bbd582 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) *value = adreno_gpu->base.fast_rate; return 0; case MSM_PARAM_TIMESTAMP: - if (adreno_gpu->funcs->get_timestamp) - return adreno_gpu->funcs->get_timestamp(gpu, value); + if (adreno_gpu->funcs->get_timestamp) { + int ret; + + pm_runtime_get_sync(&gpu->pdev->dev); + ret = adreno_gpu->funcs->get_timestamp(gpu, value); + pm_runtime_put_autosuspend(&gpu->pdev->dev); + + return ret; + } return -EINVAL; default: DBG("%s: invalid param: %u", gpu->name, param); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 9e9c5696bc03..c7b612c3d771 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, struct msm_dsi_phy_clk_request *clk_req) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int ret; + + ret = dsi_calc_clk_rate(msm_host); + if (ret) { + pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); + return; + } clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; clk_req->escclk_rate = msm_host->esc_clk_rate; @@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_display_mode *mode) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - int ret; if (msm_host->mode) { drm_mode_destroy(msm_host->dev, msm_host->mode); @@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, return -ENOMEM; } - ret = dsi_calc_clk_rate(msm_host); - if (ret) { - pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index cb5415d6c04b..735a87a699fa 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc) struct mdp5_ctl *ctl = mdp5_cstate->ctl; uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; unsigned long flags; - enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; - enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; + enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; + enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; int i, plane_cnt = 0; bool bg_alpha_enabled = false; u32 mixer_op_mode = 0; @@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (!handle) { DBG("Cursor off"); cursor_enable = false; + mdp5_enable(mdp5_kms); goto set_cursor; } @@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, get_roi(crtc, &roi_w, &roi_h); + mdp5_enable(mdp5_kms); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); @@ -804,6 +807,7 @@ set_cursor: crtc_flush(crtc, flush_mask); end: + mdp5_disable(mdp5_kms); if (old_bo) { drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); /* enable vblank to complete cursor work: */ @@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) get_roi(crtc, &roi_w, &roi_h); + mdp5_enable(mdp5_kms); + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | @@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) crtc_flush(crtc, flush_mask); + mdp5_disable(mdp5_kms); + return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 97f3294fbfc6..70bef51245af 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) struct mdp5_interface *intf = mdp5_encoder->intf; if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) - mdp5_cmd_encoder_disable(encoder); + mdp5_cmd_encoder_enable(encoder); else mdp5_vid_encoder_enable(encoder); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 5d13fa5381ee..1c603aef3c59 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp, const char *name, bool mandatory) { struct device *dev = &pdev->dev; - struct clk *clk = devm_clk_get(dev, name); + struct clk *clk = msm_clk_get(pdev, name); if (IS_ERR(clk) && mandatory) { dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); return PTR_ERR(clk); @@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) } /* mandatory clocks: */ - ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true); + ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); + ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); + ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); if (ret) goto fail; - ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true); + ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); if (ret) goto fail; /* optional clocks: */ - get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); + get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); /* we need to set a default rate before enabling. Set a safe * rate first, then figure out hw revision, and then set a diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index fe3a4de1a433..61f39c86dd09 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, struct mdp5_hw_pipe *right_hwpipe; const struct mdp_format *format; uint32_t nplanes, config = 0; - struct phase_step step = { 0 }; - struct pixel_ext pe = { 0 }; + struct phase_step step = { { 0 } }; + struct pixel_ext pe = { { 0 } }; uint32_t hdecm = 0, vdecm = 0; uint32_t pix_format; unsigned int rotation; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 65f35544c1ec..a0c60e738db8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj, struct page **pages; vma = add_vma(obj, aspace); - if (IS_ERR(vma)) - return PTR_ERR(vma); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto unlock; + } pages = get_pages(obj); if (IS_ERR(pages)) { @@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, fail: del_vma(vma); - +unlock: mutex_unlock(&msm_obj->lock); return ret; } @@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, if (use_vram) { struct msm_gem_vma *vma; struct page **pages; + struct msm_gem_object *msm_obj = to_msm_bo(obj); + + mutex_lock(&msm_obj->lock); vma = add_vma(obj, NULL); + mutex_unlock(&msm_obj->lock); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto fail; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 6bfca7470141..8a75c0bd8a78 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) { struct msm_gem_submit *submit; - uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + - (nr_cmds * sizeof(submit->cmd[0])); + uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) + + ((u64)nr_cmds * sizeof(submit->cmd[0])); if (sz > SIZE_MAX) return NULL; @@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto out; - if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { + if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { ret = submit_fence_sync(submit); if (ret) goto out; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index c36321bc8714..d34e331554f3 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -42,7 +42,7 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, struct sg_table *sgt) { - if (!vma->iova) + if (!aspace || !vma->iova) return; if (aspace->mmu) { diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 39468c218027..7459ef9943ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -60,15 +60,13 @@ bool nouveau_is_v1_dsm(void) { } #ifdef CONFIG_VGA_SWITCHEROO -static const char nouveau_dsm_muid[] = { - 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, - 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, -}; +static const guid_t nouveau_dsm_muid = + GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48, + 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4); -static const char nouveau_op_dsm_muid[] = { - 0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47, - 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0, -}; +static const guid_t nouveau_op_dsm_muid = + GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B, + 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0); static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result) { @@ -86,7 +84,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t * args_buff[i] = (arg >> i * 8) & 0xFF; *result = 0; - obj = acpi_evaluate_dsm_typed(handle, nouveau_op_dsm_muid, 0x00000100, + obj = acpi_evaluate_dsm_typed(handle, &nouveau_op_dsm_muid, 0x00000100, func, &argv4, ACPI_TYPE_BUFFER); if (!obj) { acpi_handle_info(handle, "failed to evaluate _DSM\n"); @@ -138,7 +136,7 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg) .integer.value = arg, }; - obj = acpi_evaluate_dsm_typed(handle, nouveau_dsm_muid, 0x00000102, + obj = acpi_evaluate_dsm_typed(handle, &nouveau_dsm_muid, 0x00000102, func, &argv4, ACPI_TYPE_INTEGER); if (!obj) { acpi_handle_info(handle, "failed to evaluate _DSM\n"); @@ -259,7 +257,7 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out if (!acpi_has_method(dhandle, "_DSM")) return; - supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, + supports_mux = acpi_check_dsm(dhandle, &nouveau_dsm_muid, 0x00000102, 1 << NOUVEAU_DSM_POWER); optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 147b22163f9f..dab78c660dd6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1158,8 +1158,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg) return -ENODEV; if (WARN_ON(msg->size > 16)) return -E2BIG; - if (msg->size == 0) - return msg->size; ret = nvkm_i2c_aux_acquire(aux); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 8d1df5678eaa..f362c9fa8b3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -409,7 +409,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) struct nouveau_display *disp = nouveau_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct drm_connector *connector; - struct drm_crtc *crtc; if (!suspend) { if (drm_drv_uses_atomic_modeset(dev)) @@ -418,10 +417,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) drm_crtc_force_disable_all(dev); } - /* Make sure that drm and hw vblank irqs get properly disabled. */ - drm_for_each_crtc(crtc, dev) - drm_crtc_vblank_off(crtc); - /* disable flip completion events */ nvif_notify_put(&drm->flip); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index e3132a2ce34d..2bc0dc985214 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -3674,15 +3674,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) drm_mode_connector_attach_encoder(connector, encoder); if (dcbe->type == DCB_OUTPUT_DP) { + struct nv50_disp *disp = nv50_disp(encoder->dev); struct nvkm_i2c_aux *aux = nvkm_i2c_aux_find(i2c, dcbe->i2c_index); if (aux) { - nv_encoder->i2c = &nv_connector->aux.ddc; + if (disp->disp->oclass < GF110_DISP) { + /* HW has no support for address-only + * transactions, so we're required to + * use custom I2C-over-AUX code. + */ + nv_encoder->i2c = &aux->i2c; + } else { + nv_encoder->i2c = &nv_connector->aux.ddc; + } nv_encoder->aux = aux; } /*TODO: Use DP Info Table to check for support. */ - if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) { + if (disp->disp->oclass >= GF110_DISP) { ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, nv_connector->base.base.id, &nv_encoder->dp.mstm); @@ -3931,6 +3940,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name, asyh->clr.mask, asyh->set.mask); + if (crtc_state->active && !asyh->state.active) + drm_crtc_vblank_off(crtc); if (asyh->clr.mask) { nv50_head_flush_clr(head, asyh, atom->flush_disable); @@ -4016,11 +4027,13 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) nv50_head_flush_set(head, asyh); interlock_core = 1; } - } - for_each_crtc_in_state(state, crtc, crtc_state, i) { - if (crtc->state->event) - drm_crtc_vblank_get(crtc); + if (asyh->state.active) { + if (!crtc_state->active) + drm_crtc_vblank_on(crtc); + if (asyh->state.event) + drm_crtc_vblank_get(crtc); + } } /* Update plane(s). */ @@ -4067,12 +4080,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) if (crtc->state->event) { unsigned long flags; /* Get correct count/ts if racing with vblank irq */ - drm_accurate_vblank_count(crtc); + if (crtc->state->active) + drm_accurate_vblank_count(crtc); spin_lock_irqsave(&crtc->dev->event_lock, flags); drm_crtc_send_vblank_event(crtc, crtc->state->event); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); crtc->state->event = NULL; - drm_crtc_vblank_put(crtc); + if (crtc->state->active) + drm_crtc_vblank_put(crtc); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c index c7c84d34d97e..88582af8bd89 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c @@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine) /* Create output path objects for each VBIOS display path. */ i = -1; while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { + if (ver < 0x40) /* No support for chipsets prior to NV50. */ + break; if (dcbE.type == DCB_OUTPUT_UNUSED) continue; if (dcbE.type == DCB_OUTPUT_EOL) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h index a24312fb0228..a1e8bf48b778 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h @@ -22,6 +22,7 @@ struct nvkm_ior { unsigned proto_evo:4; enum nvkm_ior_proto { CRT, + TV, TMDS, LVDS, DP, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h index 19c635663399..6ea19466f436 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h @@ -22,7 +22,7 @@ struct nv50_disp { u8 type[3]; } pior; - struct nv50_disp_chan *chan[17]; + struct nv50_disp_chan *chan[21]; }; void nv50_disp_super_1(struct nv50_disp *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index 85aff85394ac..be9e7f8c3b23 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -62,6 +62,7 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type) case 0: switch (outp->info.type) { case DCB_OUTPUT_ANALOG: *type = DAC; return CRT; + case DCB_OUTPUT_TV : *type = DAC; return TV; case DCB_OUTPUT_TMDS : *type = SOR; return TMDS; case DCB_OUTPUT_LVDS : *type = SOR; return LVDS; case DCB_OUTPUT_DP : *type = SOR; return DP; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c index c794b2c2d21e..6d8f21290aa2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c @@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base) if (bar->bar[0].mem) { addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; - nvkm_wr32(device, 0x001714, 0xc0000000 | addr); + nvkm_wr32(device, 0x001714, 0x80000000 | addr); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild index 48f01e40b8fc..b768e66a472b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild @@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o nvkm-y += nvkm/subdev/i2c/aux.o nvkm-y += nvkm/subdev/i2c/auxg94.o +nvkm-y += nvkm/subdev/i2c/auxgf119.o nvkm-y += nvkm/subdev/i2c/auxgm200.o nvkm-y += nvkm/subdev/i2c/anx9805.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c index d172e42dd228..4c1f547da463 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c @@ -117,6 +117,10 @@ int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type, u32 addr, u8 *data, u8 *size) { + if (!*size && !aux->func->address_only) { + AUX_ERR(aux, "address-only transaction dropped"); + return -ENOSYS; + } return aux->func->xfer(aux, retry, type, addr, data, size); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h index 27a4a39c87f0..9587ab456d9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h @@ -3,6 +3,7 @@ #include "pad.h" struct nvkm_i2c_aux_func { + bool address_only; int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type, u32 addr, u8 *data, u8 *size); int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw, @@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **); int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, u32 addr, u8 *data, u8 *size); +int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *, + int, u8, struct nvkm_i2c_aux **); + int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); +int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *); +int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); #define AUX_MSG(b,l,f,a...) do { \ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index ab8cb196c34e..c8ab1b5741a3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c @@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux) return 0; } -static int +int g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, u8 type, u32 addr, u8 *data, u8 *size) { @@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, } ctrl = nvkm_rd32(device, 0x00e4e4 + base); - ctrl &= ~0x0001f0ff; + ctrl &= ~0x0001f1ff; ctrl |= type << 12; - ctrl |= *size - 1; + ctrl |= (*size ? (*size - 1) : 0x00000100); nvkm_wr32(device, 0x00e4e0 + base, addr); /* (maybe) retry transaction a number of times on failure... */ @@ -160,14 +160,10 @@ out: return ret < 0 ? ret : (stat & 0x000f0000) >> 16; } -static const struct nvkm_i2c_aux_func -g94_i2c_aux_func = { - .xfer = g94_i2c_aux_xfer, -}; - int -g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, - struct nvkm_i2c_aux **paux) +g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func, + struct nvkm_i2c_pad *pad, int index, u8 drive, + struct nvkm_i2c_aux **paux) { struct g94_i2c_aux *aux; @@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, return -ENOMEM; *paux = &aux->base; - nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base); + nvkm_i2c_aux_ctor(func, pad, index, &aux->base); aux->ch = drive; aux->base.intr = 1 << aux->ch; return 0; } + +static const struct nvkm_i2c_aux_func +g94_i2c_aux = { + .xfer = g94_i2c_aux_xfer, +}; + +int +g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, + struct nvkm_i2c_aux **paux) +{ + return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c new file mode 100644 index 000000000000..dab40cd8fe3a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c @@ -0,0 +1,35 @@ +/* + * Copyright 2017 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "aux.h" + +static const struct nvkm_i2c_aux_func +gf119_i2c_aux = { + .address_only = true, + .xfer = g94_i2c_aux_xfer, +}; + +int +gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, + struct nvkm_i2c_aux **paux) +{ + return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index ee091fa79628..7ef60895f43a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c @@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, } ctrl = nvkm_rd32(device, 0x00d954 + base); - ctrl &= ~0x0001f0ff; + ctrl &= ~0x0001f1ff; ctrl |= type << 12; - ctrl |= *size - 1; + ctrl |= (*size ? (*size - 1) : 0x00000100); nvkm_wr32(device, 0x00d950 + base, addr); /* (maybe) retry transaction a number of times on failure... */ @@ -162,6 +162,7 @@ out: static const struct nvkm_i2c_aux_func gm200_i2c_aux_func = { + .address_only = true, .xfer = gm200_i2c_aux_xfer, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c index d53212f1aa52..3bc4d0310076 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c @@ -28,7 +28,7 @@ static const struct nvkm_i2c_pad_func gf119_i2c_pad_s_func = { .bus_new_4 = gf119_i2c_bus_new, - .aux_new_6 = g94_i2c_aux_new, + .aux_new_6 = gf119_i2c_aux_new, .mode = g94_i2c_pad_mode, }; @@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad) static const struct nvkm_i2c_pad_func gf119_i2c_pad_x_func = { .bus_new_4 = gf119_i2c_bus_new, - .aux_new_6 = g94_i2c_aux_new, + .aux_new_6 = gf119_i2c_aux_new, }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c index e3e2f5e83815..f44682d62f75 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c @@ -81,10 +81,9 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version) { struct nvkm_subdev *subdev = &mxm->subdev; struct nvkm_device *device = subdev->device; - static char muid[] = { - 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C, - 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65 - }; + static guid_t muid = + GUID_INIT(0x4004A400, 0x917D, 0x4CF2, + 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65); u32 mxms_args[] = { 0x00000000 }; union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, @@ -105,7 +104,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version) * unless you pass in exactly the version it supports.. */ rev = (version & 0xf0) << 4 | (version & 0x0f); - obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4); + obj = acpi_evaluate_dsm(handle, &muid, rev, 0x00000010, &argv4); if (!obj) { nvkm_debug(subdev, "DSM MXMS failed\n"); return false; diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index a5d3cd3ecb5f..4acbb944bcd2 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -105,7 +105,6 @@ radeon-y += \ vce_v2_0.o \ radeon_kfd.o -radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o radeon-$(CONFIG_ACPI) += radeon_acpi.o diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index fa4f8f008e4d..e67ed383e11b 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -31,6 +31,7 @@ #include "radeon_asic.h" #include "atom.h" #include <linux/backlight.h> +#include <linux/dmi.h> extern int atom_debug; @@ -2184,9 +2185,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx) goto assigned; } - /* on DCE32 and encoder can driver any block so just crtc id */ + /* + * On DCE32 any encoder can drive any block so usually just use crtc id, + * but Apple thinks different at least on iMac10,1, so there use linkb, + * otherwise the internal eDP panel will stay dark. + */ if (ASIC_IS_DCE32(rdev)) { - enc_idx = radeon_crtc->crtc_id; + if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1")) + enc_idx = (dig->linkb) ? 1 : 0; + else + enc_idx = radeon_crtc->crtc_id; + goto assigned; } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 4074805034da..3cb6c55b268d 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -9268,8 +9268,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev, u32 tmp, wm_mask; if (radeon_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if ((rdev->pm.pm_method == PM_METHOD_DPM) && diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 44527e679d31..24fe66c89dfb 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2163,8 +2163,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, fixed20_12 a, b, c; if (radeon_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); priority_a_cnt = 0; priority_b_cnt = 0; dram_channels = evergreen_get_number_of_dram_channels(rdev); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 68be1bfa22b9..5008f3d4cccc 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -377,7 +377,7 @@ struct radeon_fence { unsigned ring; bool is_vm_update; - wait_queue_t fence_wake; + wait_queue_entry_t fence_wake; }; int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 432480ff9d22..3178ba0c537c 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev) rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_device == 0x280a) return; + /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS400 && + rdev->pdev->subsystem_vendor == 0x1179 && + rdev->pdev->subsystem_device == 0xff31) + return; /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 6ecf42783d4b..997131d58c7f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -113,7 +113,6 @@ static inline bool radeon_is_atpx_hybrid(void) { return false; } #endif #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) -#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) struct radeon_px_quirk { u32 chip_vendor; @@ -136,8 +135,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { * https://bugzilla.kernel.org/show_bug.cgi?id=51381 */ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, - /* macbook pro 8.2 */ - { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, + /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU + * https://bugs.freedesktop.org/show_bug.cgi?id=101491 + */ + { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, { 0, 0, 0, 0, 0 }, }; @@ -1241,25 +1242,17 @@ static void radeon_check_arguments(struct radeon_device *rdev) static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); - struct radeon_device *rdev = dev->dev_private; if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { - unsigned d3_delay = dev->pdev->d3_delay; - pr_info("radeon: switched on\n"); /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP)) - dev->pdev->d3_delay = 20; - radeon_resume_kms(dev, true, true); - dev->pdev->d3_delay = d3_delay; - dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); } else { diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index b23c771f4216..74abd161237b 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -38,6 +38,7 @@ #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/vga_switcheroo.h> +#include <linux/compat.h> #include <drm/drm_gem.h> #include <drm/drm_fb_helper.h> @@ -150,8 +151,6 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj); struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *); void *radeon_gem_prime_vmap(struct drm_gem_object *obj); void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); -extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) @@ -509,6 +508,21 @@ long radeon_drm_ioctl(struct file *filp, return ret; } +#ifdef CONFIG_COMPAT +static long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + + ret = radeon_drm_ioctl(filp, cmd, arg); + + return ret; +} +#endif + static const struct dev_pm_ops radeon_pm_ops = { .suspend = radeon_pmops_suspend, .resume = radeon_pmops_resume, diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index ef09f0a63754..e86f2bd38410 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -158,7 +158,7 @@ int radeon_fence_emit(struct radeon_device *rdev, * for the fence locking itself, so unlocked variants are used for * fence_signal, and remove_wait_queue. */ -static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) +static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key) { struct radeon_fence *fence; u64 seq; diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c deleted file mode 100644 index 0b98ea134579..000000000000 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c +++ /dev/null @@ -1,424 +0,0 @@ -/** - * \file radeon_ioc32.c - * - * 32-bit ioctl compatibility routines for the Radeon DRM. - * - * \author Paul Mackerras <paulus@samba.org> - * - * Copyright (C) Paul Mackerras 2005 - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ -#include <linux/compat.h> - -#include <drm/drmP.h> -#include <drm/radeon_drm.h> -#include "radeon_drv.h" - -typedef struct drm_radeon_init32 { - int func; - u32 sarea_priv_offset; - int is_pci; - int cp_mode; - int gart_size; - int ring_size; - int usec_timeout; - - unsigned int fb_bpp; - unsigned int front_offset, front_pitch; - unsigned int back_offset, back_pitch; - unsigned int depth_bpp; - unsigned int depth_offset, depth_pitch; - - u32 fb_offset; - u32 mmio_offset; - u32 ring_offset; - u32 ring_rptr_offset; - u32 buffers_offset; - u32 gart_textures_offset; -} drm_radeon_init32_t; - -static int compat_radeon_cp_init(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_radeon_init32_t init32; - drm_radeon_init_t __user *init; - - if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) - return -EFAULT; - - init = compat_alloc_user_space(sizeof(*init)); - if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) - || __put_user(init32.func, &init->func) - || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) - || __put_user(init32.is_pci, &init->is_pci) - || __put_user(init32.cp_mode, &init->cp_mode) - || __put_user(init32.gart_size, &init->gart_size) - || __put_user(init32.ring_size, &init->ring_size) - || __put_user(init32.usec_timeout, &init->usec_timeout) - || __put_user(init32.fb_bpp, &init->fb_bpp) - || __put_user(init32.front_offset, &init->front_offset) - || __put_user(init32.front_pitch, &init->front_pitch) - || __put_user(init32.back_offset, &init->back_offset) - || __put_user(init32.back_pitch, &init->back_pitch) - || __put_user(init32.depth_bpp, &init->depth_bpp) - || __put_user(init32.depth_offset, &init->depth_offset) - || __put_user(init32.depth_pitch, &init->depth_pitch) - || __put_user(init32.fb_offset, &init->fb_offset) - || __put_user(init32.mmio_offset, &init->mmio_offset) - || __put_user(init32.ring_offset, &init->ring_offset) - || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset) - || __put_user(init32.buffers_offset, &init->buffers_offset) - || __put_user(init32.gart_textures_offset, - &init->gart_textures_offset)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init); -} - -typedef struct drm_radeon_clear32 { - unsigned int flags; - unsigned int clear_color; - unsigned int clear_depth; - unsigned int color_mask; - unsigned int depth_mask; /* misnamed field: should be stencil */ - u32 depth_boxes; -} drm_radeon_clear32_t; - -static int compat_radeon_cp_clear(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_radeon_clear32_t clr32; - drm_radeon_clear_t __user *clr; - - if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32))) - return -EFAULT; - - clr = compat_alloc_user_space(sizeof(*clr)); - if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr)) - || __put_user(clr32.flags, &clr->flags) - || __put_user(clr32.clear_color, &clr->clear_color) - || __put_user(clr32.clear_depth, &clr->clear_depth) - || __put_user(clr32.color_mask, &clr->color_mask) - || __put_user(clr32.depth_mask, &clr->depth_mask) - || __put_user((void __user *)(unsigned long)clr32.depth_boxes, - &clr->depth_boxes)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr); -} - -typedef struct drm_radeon_stipple32 { - u32 mask; -} drm_radeon_stipple32_t; - -static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_radeon_stipple32_t __user *argp = (void __user *)arg; - drm_radeon_stipple_t __user *request; - u32 mask; - - if (get_user(mask, &argp->mask)) - return -EFAULT; - - request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || __put_user((unsigned int __user *)(unsigned long)mask, - &request->mask)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request); -} - -typedef struct drm_radeon_tex_image32 { - unsigned int x, y; /* Blit coordinates */ - unsigned int width, height; - u32 data; -} drm_radeon_tex_image32_t; - -typedef struct drm_radeon_texture32 { - unsigned int offset; - int pitch; - int format; - int width; /* Texture image coordinates */ - int height; - u32 image; -} drm_radeon_texture32_t; - -static int compat_radeon_cp_texture(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_radeon_texture32_t req32; - drm_radeon_texture_t __user *request; - drm_radeon_tex_image32_t img32; - drm_radeon_tex_image_t __user *image; - - if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) - return -EFAULT; - if (req32.image == 0) - return -EINVAL; - if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image, - sizeof(img32))) - return -EFAULT; - - request = compat_alloc_user_space(sizeof(*request) + sizeof(*image)); - if (!access_ok(VERIFY_WRITE, request, - sizeof(*request) + sizeof(*image))) - return -EFAULT; - image = (drm_radeon_tex_image_t __user *) (request + 1); - - if (__put_user(req32.offset, &request->offset) - || __put_user(req32.pitch, &request->pitch) - || __put_user(req32.format, &request->format) - || __put_user(req32.width, &request->width) - || __put_user(req32.height, &request->height) - || __put_user(image, &request->image) - || __put_user(img32.x, &image->x) - || __put_user(img32.y, &image->y) - || __put_user(img32.width, &image->width) - || __put_user(img32.height, &image->height) - || __put_user((const void __user *)(unsigned long)img32.data, - &image->data)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request); -} - -typedef struct drm_radeon_vertex2_32 { - int idx; /* Index of vertex buffer */ - int discard; /* Client finished with buffer? */ - int nr_states; - u32 state; - int nr_prims; - u32 prim; -} drm_radeon_vertex2_32_t; - -static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_radeon_vertex2_32_t req32; - drm_radeon_vertex2_t __user *request; - - if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) - return -EFAULT; - - request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || __put_user(req32.idx, &request->idx) - || __put_user(req32.discard, &request->discard) - || __put_user(req32.nr_states, &request->nr_states) - || __put_user((void __user *)(unsigned long)req32.state, - &request->state) - || __put_user(req32.nr_prims, &request->nr_prims) - || __put_user((void __user *)(unsigned long)req32.prim, - &request->prim)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request); -} - -typedef struct drm_radeon_cmd_buffer32 { - int bufsz; - u32 buf; - int nbox; - u32 boxes; -} drm_radeon_cmd_buffer32_t; - -static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_radeon_cmd_buffer32_t req32; - drm_radeon_cmd_buffer_t __user *request; - - if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) - return -EFAULT; - - request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || __put_user(req32.bufsz, &request->bufsz) - || __put_user((void __user *)(unsigned long)req32.buf, - &request->buf) - || __put_user(req32.nbox, &request->nbox) - || __put_user((void __user *)(unsigned long)req32.boxes, - &request->boxes)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request); -} - -typedef struct drm_radeon_getparam32 { - int param; - u32 value; -} drm_radeon_getparam32_t; - -static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_radeon_getparam32_t req32; - drm_radeon_getparam_t __user *request; - - if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) - return -EFAULT; - - request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || __put_user(req32.param, &request->param) - || __put_user((void __user *)(unsigned long)req32.value, - &request->value)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request); -} - -typedef struct drm_radeon_mem_alloc32 { - int region; - int alignment; - int size; - u32 region_offset; /* offset from start of fb or GART */ -} drm_radeon_mem_alloc32_t; - -static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_radeon_mem_alloc32_t req32; - drm_radeon_mem_alloc_t __user *request; - - if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) - return -EFAULT; - - request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || __put_user(req32.region, &request->region) - || __put_user(req32.alignment, &request->alignment) - || __put_user(req32.size, &request->size) - || __put_user((int __user *)(unsigned long)req32.region_offset, - &request->region_offset)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request); -} - -typedef struct drm_radeon_irq_emit32 { - u32 irq_seq; -} drm_radeon_irq_emit32_t; - -static int compat_radeon_irq_emit(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_radeon_irq_emit32_t req32; - drm_radeon_irq_emit_t __user *request; - - if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) - return -EFAULT; - - request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || __put_user((int __user *)(unsigned long)req32.irq_seq, - &request->irq_seq)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request); -} - -/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ -#if defined (CONFIG_X86_64) || defined(CONFIG_IA64) -typedef struct drm_radeon_setparam32 { - int param; - u64 value; -} __attribute__((packed)) drm_radeon_setparam32_t; - -static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, - unsigned long arg) -{ - drm_radeon_setparam32_t req32; - drm_radeon_setparam_t __user *request; - - if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) - return -EFAULT; - - request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) - || __put_user(req32.param, &request->param) - || __put_user((void __user *)(unsigned long)req32.value, - &request->value)) - return -EFAULT; - - return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); -} -#else -#define compat_radeon_cp_setparam NULL -#endif /* X86_64 || IA64 */ - -static drm_ioctl_compat_t *radeon_compat_ioctls[] = { - [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, - [DRM_RADEON_CLEAR] = compat_radeon_cp_clear, - [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple, - [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture, - [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2, - [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf, - [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam, - [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam, - [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc, - [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit, -}; - -/** - * Called whenever a 32-bit process running under a 64-bit kernel - * performs an ioctl on /dev/dri/card<n>. - * - * \param filp file pointer. - * \param cmd command. - * \param arg user argument. - * \return zero on success or negative number on failure. - */ -long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - unsigned int nr = DRM_IOCTL_NR(cmd); - drm_ioctl_compat_t *fn = NULL; - int ret; - - if (nr < DRM_COMMAND_BASE) - return drm_compat_ioctl(filp, cmd, arg); - - if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) - fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; - - if (fn != NULL) - ret = (*fn) (filp, cmd, arg); - else - ret = drm_ioctl(filp, cmd, arg); - - return ret; -} - -long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - unsigned int nr = DRM_IOCTL_NR(cmd); - int ret; - - if (nr < DRM_COMMAND_BASE) - return drm_compat_ioctl(filp, cmd, arg); - - ret = radeon_drm_ioctl(filp, cmd, arg); - - return ret; -} diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 699fe7f9b8bf..a2ab6dcdf4a2 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -184,7 +184,6 @@ void radeon_kfd_device_init(struct radeon_device *rdev) if (rdev->kfd) { struct kgd2kfd_shared_resources gpu_resources = { .compute_vmid_bitmap = 0xFF00, - .num_mec = 1, .num_pipe_per_mec = 4, .num_queue_per_pipe = 8 }; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 7431eb4a11b7..d34d1cf33895 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, } /* TODO: is this still necessary on NI+ ? */ - if ((cmd == 0 || cmd == 1 || cmd == 0x3) && + if ((cmd == 0 || cmd == 0x3) && (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", start, end); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index c88a80e1e3ad..1907c950d76f 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2308,8 +2308,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev, fixed20_12 a, b, c; if (radeon_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); priority_a_cnt = 0; priority_b_cnt = 0; diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 50c41c0a50ef..dcc539ba85d6 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -5,6 +5,10 @@ config DRM_ROCKCHIP select DRM_KMS_HELPER select DRM_PANEL select VIDEOMODE_HELPERS + select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP + select DRM_DW_HDMI if ROCKCHIP_DW_HDMI + select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI + select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC help Choose this option if you have a Rockchip soc chipset. This driver provides kernel mode setting and buffer @@ -12,10 +16,10 @@ config DRM_ROCKCHIP 2D or 3D acceleration; acceleration is performed by other IP found on the SoC. +if DRM_ROCKCHIP + config ROCKCHIP_ANALOGIX_DP bool "Rockchip specific extensions for Analogix DP driver" - depends on DRM_ROCKCHIP - select DRM_ANALOGIX_DP help This selects support for Rockchip SoC specific extensions for the Analogix Core DP driver. If you want to enable DP @@ -23,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP config ROCKCHIP_CDN_DP bool "Rockchip cdn DP" - depends on DRM_ROCKCHIP - depends on EXTCON - select SND_SOC_HDMI_CODEC if SND_SOC + depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m) help This selects support for Rockchip SoC specific extensions for the cdn DP driver. If you want to enable Dp on @@ -34,8 +36,6 @@ config ROCKCHIP_CDN_DP config ROCKCHIP_DW_HDMI bool "Rockchip specific extensions for Synopsys DW HDMI" - depends on DRM_ROCKCHIP - select DRM_DW_HDMI help This selects support for Rockchip SoC specific extensions for the Synopsys DesignWare HDMI driver. If you want to @@ -44,8 +44,6 @@ config ROCKCHIP_DW_HDMI config ROCKCHIP_DW_MIPI_DSI bool "Rockchip specific extensions for Synopsys DW MIPI DSI" - depends on DRM_ROCKCHIP - select DRM_MIPI_DSI help This selects support for Rockchip SoC specific extensions for the Synopsys DesignWare HDMI driver. If you want to @@ -54,8 +52,9 @@ config ROCKCHIP_DW_MIPI_DSI config ROCKCHIP_INNO_HDMI bool "Rockchip specific extensions for Innosilicon HDMI" - depends on DRM_ROCKCHIP help This selects support for Rockchip SoC specific extensions for the Innosilicon HDMI driver. If you want to enable HDMI on RK3036 based SoC, you should select this option. + +endif diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 14fa1f8351e8..9b0b0588bbed 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1195,7 +1195,7 @@ static int cdn_dp_probe(struct platform_device *pdev) continue; port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); - if (!dp) + if (!port) return -ENOMEM; port->extcon = extcon; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 47905faf5586..c7e96b82cf63 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -45,13 +45,13 @@ struct rockchip_crtc_state { * * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc. * @num_pipe: number of pipes for this device. + * @mm_lock: protect drm_mm on multi-threads. */ struct rockchip_drm_private { struct drm_fb_helper fbdev_helper; struct drm_gem_object *fbdev_bo; struct drm_atomic_state *state; struct iommu_domain *domain; - /* protect drm_mm on multi-threads */ struct mutex mm_lock; struct drm_mm mm; struct list_head psr_list; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index df9e57064f19..b74ac717e56a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -29,12 +29,11 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) ssize_t ret; mutex_lock(&private->mm_lock); - ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, rk_obj->base.size, PAGE_SIZE, 0, 0); - mutex_unlock(&private->mm_lock); + if (ret < 0) { DRM_ERROR("out of I/O virtual memory: %zd\n", ret); return ret; @@ -56,7 +55,9 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) return 0; err_remove_node: + mutex_lock(&private->mm_lock); drm_mm_remove_node(&rk_obj->mm); + mutex_unlock(&private->mm_lock); return ret; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 5d450332c2fd..2900f1410d95 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop) static int vop_enable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); - int ret; + int ret, i; ret = pm_runtime_get_sync(vop->dev); if (ret < 0) { @@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc) } memcpy(vop->regs, vop->regsbak, vop->len); + /* + * We need to make sure that all windows are disabled before we + * enable the crtc. Otherwise we might try to scan from a destroyed + * buffer later. + */ + for (i = 0; i < vop->data->win_size; i++) { + struct vop_win *vop_win = &vop->win[i]; + const struct vop_win_data *win = vop_win->data; + + spin_lock(&vop->reg_lock); + VOP_WIN_SET(vop, win, enable, 0); + spin_unlock(&vop->reg_lock); + } + vop_cfg_done(vop); /* @@ -566,28 +580,11 @@ err_put_pm_runtime: static void vop_crtc_disable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); - int i; WARN_ON(vop->event); rockchip_drm_psr_deactivate(&vop->crtc); - /* - * We need to make sure that all windows are disabled before we - * disable that crtc. Otherwise we might try to scan from a destroyed - * buffer later. - */ - for (i = 0; i < vop->data->win_size; i++) { - struct vop_win *vop_win = &vop->win[i]; - const struct vop_win_data *win = vop_win->data; - - spin_lock(&vop->reg_lock); - VOP_WIN_SET(vop, win, enable, 0); - spin_unlock(&vop->reg_lock); - } - - vop_cfg_done(vop); - drm_crtc_vblank_off(crtc); /* @@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane, * Src.x1 can be odd when do clip, but yuv plane start point * need align with 2 pixel. */ - if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) + if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) { + DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); return -EINVAL; + } return 0; } @@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, spin_lock(&vop->reg_lock); VOP_WIN_SET(vop, win, format, format); - VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2); + VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); if (is_yuv_support(fb->format->format)) { int hsub = drm_format_horz_chroma_subsampling(fb->format->format); @@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, offset += (src->y1 >> 16) * fb->pitches[1] / vsub; dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; - VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2); + VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4)); VOP_WIN_SET(vop, win, uv_mst, dma_addr); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 9979fd0c2282..27eefbfcf3d0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h, act_height = (src_h + vskiplines - 1) / vskiplines; + if (act_height == dst_h) + return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines; + return GET_SCL_FT_BILI_DN(act_height, dst_h); } diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig index 2c4817fb0890..8fe5b184b4e8 100644 --- a/drivers/gpu/drm/stm/Kconfig +++ b/drivers/gpu/drm/stm/Kconfig @@ -7,7 +7,6 @@ config DRM_STM select DRM_PANEL select VIDEOMODE_HELPERS select FB_PROVIDE_GET_FB_UNMAPPED_AREA - default y help Enable support for the on-chip display controller on diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index ac15cc65af36..518f4b69ea53 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -562,18 +562,6 @@ fail: #ifdef CONFIG_DRM_TEGRA_STAGING -static struct tegra_drm_context * -tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id) -{ - struct tegra_drm_context *context; - - mutex_lock(&file->lock); - context = idr_find(&file->contexts, id); - mutex_unlock(&file->lock); - - return context; -} - static int tegra_gem_create(struct drm_device *drm, void *data, struct drm_file *file) { @@ -662,7 +650,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv, if (err < 0) return err; - err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); + err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); if (err < 0) { client->ops->close_channel(context); return err; @@ -717,7 +705,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data, mutex_lock(&fpriv->lock); - context = tegra_drm_file_get_context(fpriv, args->context); + context = idr_find(&fpriv->contexts, args->context); if (!context) { err = -EINVAL; goto unlock; @@ -742,7 +730,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data, mutex_lock(&fpriv->lock); - context = tegra_drm_file_get_context(fpriv, args->context); + context = idr_find(&fpriv->contexts, args->context); if (!context) { err = -ENODEV; goto unlock; @@ -771,7 +759,7 @@ static int tegra_submit(struct drm_device *drm, void *data, mutex_lock(&fpriv->lock); - context = tegra_drm_file_get_context(fpriv, args->context); + context = idr_find(&fpriv->contexts, args->context); if (!context) { err = -ENODEV; goto unlock; @@ -796,7 +784,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data, mutex_lock(&fpriv->lock); - context = tegra_drm_file_get_context(fpriv, args->context); + context = idr_find(&fpriv->contexts, args->context); if (!context) { err = -ENODEV; goto unlock; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a6d7fcb99c0b..22b57020790d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1353,7 +1353,6 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) mem_type); return ret; } - dma_fence_put(man->move); man->use_type = false; man->has_type = false; @@ -1369,6 +1368,9 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ret = (*man->func->takedown)(man); } + dma_fence_put(man->move); + man->move = NULL; + return ret; } EXPORT_SYMBOL(ttm_bo_clean_mm); diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 403bbd5f99a9..a12cc7ea99b6 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc) SCALER_DISPSTATX_EMPTY); } +static void vc4_crtc_update_dlist(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + + if (crtc->state->event) { + unsigned long flags; + + crtc->state->event->pipe = drm_crtc_index(crtc); + + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&dev->event_lock, flags); + vc4_crtc->event = crtc->state->event; + crtc->state->event = NULL; + + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + + spin_unlock_irqrestore(&dev->event_lock, flags); + } else { + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + } +} + static void vc4_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) require_hvs_enabled(dev); + /* Enable vblank irq handling before crtc is started otherwise + * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist(). + */ + drm_crtc_vblank_on(crtc); + vc4_crtc_update_dlist(crtc); + /* Turn on the scaler, which will wait for vstart to start * compositing. */ @@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) /* Turn on the pixel valve, which will emit the vstart signal. */ CRTC_WRITE(PV_V_CONTROL, CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); - - /* Enable vblank irq handling after crtc is started. */ - drm_crtc_vblank_on(crtc); } static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc, @@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); struct drm_plane *plane; bool debug_dump_regs = false; @@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); - if (crtc->state->event) { - unsigned long flags; - - crtc->state->event->pipe = drm_crtc_index(crtc); - - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - - spin_lock_irqsave(&dev->event_lock, flags); - vc4_crtc->event = crtc->state->event; - crtc->state->event = NULL; - - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - vc4_state->mm.start); - - spin_unlock_irqrestore(&dev->event_lock, flags); - } else { - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - vc4_state->mm.start); - } + /* Only update DISPLIST if the CRTC was already running and is not + * being disabled. + * vc4_crtc_enable() takes care of updating the dlist just after + * re-enabling VBLANK interrupts and before enabling the engine. + * If the CRTC is being disabled, there's no point in updating this + * information. + */ + if (crtc->state->active && old_state->active) + vc4_crtc_update_dlist(crtc); if (debug_dump_regs) { DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 35bf781e418e..c7056322211c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -30,49 +30,49 @@ #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_page_alloc.h> -static struct ttm_place vram_placement_flags = { +static const struct ttm_place vram_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED }; -static struct ttm_place vram_ne_placement_flags = { +static const struct ttm_place vram_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place sys_placement_flags = { +static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED }; -static struct ttm_place sys_ne_placement_flags = { +static const struct ttm_place sys_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place gmr_placement_flags = { +static const struct ttm_place gmr_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED }; -static struct ttm_place gmr_ne_placement_flags = { +static const struct ttm_place gmr_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT }; -static struct ttm_place mob_placement_flags = { +static const struct ttm_place mob_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED }; -static struct ttm_place mob_ne_placement_flags = { +static const struct ttm_place mob_ne_placement_flags = { .fpfn = 0, .lpfn = 0, .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT @@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = { .busy_placement = &vram_placement_flags }; -static struct ttm_place vram_gmr_placement_flags[] = { +static const struct ttm_place vram_gmr_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = { } }; -static struct ttm_place gmr_vram_placement_flags[] = { +static const struct ttm_place gmr_vram_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = { .busy_placement = &gmr_placement_flags }; -static struct ttm_place vram_gmr_ne_placement_flags[] = { +static const struct ttm_place vram_gmr_ne_placement_flags[] = { { .fpfn = 0, .lpfn = 0, @@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = { .busy_placement = &sys_ne_placement_flags }; -static struct ttm_place evictable_placement_flags[] = { +static const struct ttm_place evictable_placement_flags[] = { { .fpfn = 0, .lpfn = 0, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 99a7f4ab7d97..86178796de6c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -779,8 +779,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, if (ret) return ret; - header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, - &header->handle); + header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL, + &header->handle); if (!header->cb_header) { ret = -ENOMEM; goto out_no_cb_header; @@ -790,7 +790,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, cb_hdr = header->cb_header; offset = header->node.start << PAGE_SHIFT; header->cmd = man->map + offset; - memset(cb_hdr, 0, sizeof(*cb_hdr)); if (man->using_mob) { cb_hdr->flags = SVGA_CB_FLAG_MOB; cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; @@ -827,8 +826,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) return -ENOMEM; - dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, - &header->handle); + dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL, + &header->handle); if (!dheader) return -ENOMEM; @@ -837,7 +836,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, cb_hdr = &dheader->cb_header; header->cb_header = cb_hdr; header->cmd = dheader->cmd; - memset(dheader, 0, sizeof(*dheader)); cb_hdr->status = SVGA_CB_STATUS_NONE; cb_hdr->flags = SVGA_CB_FLAG_NONE; cb_hdr->ptr.pa = (u64)header->handle + diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 13db8a2851ed..36c7b6c839c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, int ret; cres = kzalloc(sizeof(*cres), GFP_KERNEL); - if (unlikely(cres == NULL)) + if (unlikely(!cres)) return -ENOMEM; cres->hash.key = user_key | (res_type << 24); @@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) int ret; man = kzalloc(sizeof(*man), GFP_KERNEL); - if (man == NULL) + if (!man) return ERR_PTR(-ENOMEM); man->dev_priv = dev_priv; @@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_free(man, entry); + drm_ht_remove(&man->resources); kfree(man); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index bcc6d4136c87..4212b3e673bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { uctx->cotables[i] = vmw_cotable_alloc(dev_priv, &uctx->res, i); - if (unlikely(uctx->cotables[i] == NULL)) { - ret = -ENOMEM; + if (unlikely(IS_ERR(uctx->cotables[i]))) { + ret = PTR_ERR(uctx->cotables[i]); goto out_cotables; } } @@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data, } ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (unlikely(ctx == NULL)) { + if (unlikely(!ctx)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_context_size); ret = -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 6c026d75c180..d87861bbe971 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, return ERR_PTR(ret); vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); - if (unlikely(vcotbl == NULL)) { + if (unlikely(!vcotbl)) { ret = -ENOMEM; goto out_no_alloc; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4a641555b960..4436d53ae16c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -227,7 +227,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { DRM_AUTH | DRM_RENDER_ALLOW), }; -static struct pci_device_id vmw_pci_id_list[] = { +static const struct pci_device_id vmw_pci_id_list[] = { {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, {0, 0, 0} }; @@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) char host_log[100] = {0}; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (unlikely(dev_priv == NULL)) { + if (unlikely(!dev_priv)) { DRM_ERROR("Failed allocating a device private struct.\n"); return -ENOMEM; } @@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) int ret = -ENOMEM; vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); - if (unlikely(vmw_fp == NULL)) + if (unlikely(!vmw_fp)) return ret; vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); @@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev, struct vmw_master *vmaster; vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); - if (unlikely(vmaster == NULL)) + if (unlikely(!vmaster)) return -ENOMEM; vmw_master_init(vmaster); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c7b53d987f06..2cfb3c93f42a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, } node = kzalloc(sizeof(*node), GFP_KERNEL); - if (unlikely(node == NULL)) { + if (unlikely(!node)) { DRM_ERROR("Failed to allocate a resource validation " "entry.\n"); return -ENOMEM; @@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list, struct vmw_resource_relocation *rel; rel = kmalloc(sizeof(*rel), GFP_KERNEL); - if (unlikely(rel == NULL)) { + if (unlikely(!rel)) { DRM_ERROR("Failed to allocate a resource relocation.\n"); return -ENOMEM; } @@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - return capable(CAP_SYS_ADMIN) ? : -EINVAL; + return -EINVAL; } static int vmw_cmd_ok(struct vmw_private *dev_priv, @@ -2584,7 +2584,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, /** * vmw_cmd_dx_ia_set_vertex_buffers - Validate an - * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. + * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 6b2708b4eafe..b8bc5bc7de7e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) { struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); - if (unlikely(fman == NULL)) + if (unlikely(!fman)) return NULL; fman->dev_priv = dev_priv; @@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman, int ret; fence = kzalloc(sizeof(*fence), GFP_KERNEL); - if (unlikely(fence == NULL)) + if (unlikely(!fence)) return -ENOMEM; ret = vmw_fence_obj_init(fman, fence, seqno, @@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv, return ret; ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); - if (unlikely(ufence == NULL)) { + if (unlikely(!ufence)) { ret = -ENOMEM; goto out_no_object; } @@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv, struct vmw_fence_manager *fman = fman_from_fence(fence); eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); - if (unlikely(eaction == NULL)) + if (unlikely(!eaction)) return -ENOMEM; eaction->event = event; @@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, int ret; event = kzalloc(sizeof(*event), GFP_KERNEL); - if (unlikely(event == NULL)) { + if (unlikely(!event)) { DRM_ERROR("Failed to allocate an event.\n"); ret = -ENOMEM; goto out_no_space; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c1900f4390a4..d2b03d4a3c86 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, struct vmwgfx_gmrid_man *gman = kzalloc(sizeof(*gman), GFP_KERNEL); - if (unlikely(gman == NULL)) + if (unlikely(!gman)) return -ENOMEM; spin_lock_init(&gman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3d94ea67a825..61e06f0e8cd3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_x = du->hotspot_x; hotspot_y = du->hotspot_y; + + if (plane->fb) { + hotspot_x += plane->fb->hot_x; + hotspot_y += plane->fb->hot_y; + } + du->cursor_surface = vps->surf; du->cursor_dmabuf = vps->dmabuf; @@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, vmw_cursor_update_position(dev_priv, true, du->cursor_x + hotspot_x, du->cursor_y + hotspot_y); + + du->core_hotspot_x = hotspot_x - du->hotspot_x; + du->core_hotspot_y = hotspot_y - du->hotspot_y; } else { DRM_ERROR("Failed to update cursor image\n"); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 941bcfd131ff..b17f08fc50d3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv) if (dev_priv->has_dx) { *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); - if (*otables == NULL) + if (!(*otables)) return -ENOMEM; dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); } else { *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), GFP_KERNEL); - if (*otables == NULL) + if (!(*otables)) return -ENOMEM; dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); @@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages) { struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); - if (unlikely(mob == NULL)) + if (unlikely(!mob)) return NULL; mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 6063c9636d4a..97000996b8dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, reply_len = ebx; reply = kzalloc(reply_len + 1, GFP_KERNEL); - if (reply == NULL) { + if (!reply) { DRM_ERROR("Cannot allocate memory for reply\n"); return -ENOMEM; } @@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param, msg_len = strlen(guest_info_param) + strlen("info-get ") + 1; msg = kzalloc(msg_len, GFP_KERNEL); - if (msg == NULL) { + if (!msg) { DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); return -ENOMEM; } @@ -400,7 +400,7 @@ int vmw_host_log(const char *log) msg_len = strlen(log) + strlen("log ") + 1; msg = kzalloc(msg_len, GFP_KERNEL); - if (msg == NULL) { + if (!msg) { DRM_ERROR("Cannot allocate memory for log message\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 7d591f653dfa..a96f90f017d1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, int ret; user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); - if (unlikely(user_bo == NULL)) { + if (unlikely(!user_bo)) { DRM_ERROR("Failed to allocate a buffer.\n"); return -ENOMEM; } @@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, } backup = kzalloc(sizeof(*backup), GFP_KERNEL); - if (unlikely(backup == NULL)) + if (unlikely(!backup)) return -ENOMEM; ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 68f135c5b0d8..9b832f136813 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, } ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); - if (unlikely(ushader == NULL)) { + if (unlikely(!ushader)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_user_shader_size); ret = -ENOMEM; @@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, } shader = kzalloc(sizeof(*shader), GFP_KERNEL); - if (unlikely(shader == NULL)) { + if (unlikely(!shader)) { ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_size); ret = -ENOMEM; @@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, /* Allocate and pin a DMA buffer */ buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (unlikely(buf == NULL)) + if (unlikely(!buf)) return -ENOMEM; ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 50be1f034f9e..5284e8d2f7ba 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) * something arbitrarily large and we will reject any layout * that doesn't fit prim_bb_mem later */ - dev->mode_config.max_width = 16384; - dev->mode_config.max_height = 16384; + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; } vmw_kms_create_implicit_placement_property(dev_priv, false); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 5c1c711a21af..778272514164 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev) host->rst = devm_reset_control_get(&pdev->dev, "host1x"); if (IS_ERR(host->rst)) { - err = PTR_ERR(host->clk); + err = PTR_ERR(host->rst); dev_err(&pdev->dev, "failed to get reset: %d\n", err); return err; } @@ -186,8 +186,13 @@ static int host1x_probe(struct platform_device *pdev) return -ENOMEM; err = iommu_attach_device(host->domain, &pdev->dev); - if (err) + if (err == -ENODEV) { + iommu_domain_free(host->domain); + host->domain = NULL; + goto skip_iommu; + } else if (err) { goto fail_free_domain; + } geometry = &host->domain->geometry; @@ -198,6 +203,7 @@ static int host1x_probe(struct platform_device *pdev) host->iova_end = geometry->aperture_end; } +skip_iommu: err = host1x_channel_list_init(&host->channel_list, host->info->nb_channels); if (err) { diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 92f1452dad57..76875f6299b8 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -417,7 +417,7 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { struct vga_device *vgadev, *conflict; unsigned long flags; - wait_queue_t wait; + wait_queue_entry_t wait; int rc = 0; vga_check_first_use(); |