diff options
Diffstat (limited to 'drivers/gpu')
129 files changed, 2176 insertions, 1071 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 4f4e7fa001c1..c4fd57d8b717 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -161,7 +161,7 @@ config DRM_LOAD_EDID_FIRMWARE monitor are unable to provide appropriate EDID data. Since this feature is provided as a workaround for broken hardware, the default case is N. Details and instructions how to build your own - EDID data are given in Documentation/driver-api/edid.rst. + EDID data are given in Documentation/admin-guide/edid.rst. config DRM_DP_CEC bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 3f2b695cf19e..53b4126373a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -186,7 +186,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s * disabled. The memory must be pinned and mapped to the hardware when * this is called in hqd_load functions, so it should never fault in * the first place. This resolves a circular lock dependency involving - * four locks, including the DQM lock and mmap_sem. + * four locks, including the DQM lock and mmap_lock. */ #define read_user_wptr(mmptr, wptr, dst) \ ({ \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 0b7e78748540..c6944739183a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -237,7 +237,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); - /* read_user_ptr may take the mm->mmap_sem. + /* read_user_ptr may take the mm->mmap_lock. * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index ccd635b812b5..2f4bdc80a6b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -224,7 +224,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data); - /* read_user_ptr may take the mm->mmap_sem. + /* read_user_ptr may take the mm->mmap_lock. * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 68e6e1bc8f3a..b91b5171270f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1393,9 +1393,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( * concurrently and the queues are actually stopped */ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { - down_write(¤t->mm->mmap_sem); + mmap_write_lock(current->mm); is_invalid_userptr = atomic_read(&mem->invalid); - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); } mutex_lock(&mem->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 682a514f1794..d7e17e34fee1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -192,7 +192,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) /* only need to skip on ATPX */ if (amdgpu_device_supports_boco(dev) && !amdgpu_is_atpx_hybrid()) - dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_allow(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index eff1f73302de..e59c01a83dac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -859,18 +859,6 @@ struct amdgpu_ttm_tt { }; #ifdef CONFIG_DRM_AMDGPU_USERPTR -/* flags used by HMM internal, not related to CPU/GPU PTE flags */ -static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { - (1 << 0), /* HMM_PFN_VALID */ - (1 << 1), /* HMM_PFN_WRITE */ -}; - -static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { - 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ - 0, /* HMM_PFN_NONE */ - 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ -}; - /** * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user * memory and start HMM tracking CPU page table update @@ -909,23 +897,20 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) goto out; } range->notifier = &bo->notifier; - range->flags = hmm_range_flags; - range->values = hmm_range_values; - range->pfn_shift = PAGE_SHIFT; range->start = bo->notifier.interval_tree.start; range->end = bo->notifier.interval_tree.last + 1; - range->default_flags = hmm_range_flags[HMM_PFN_VALID]; + range->default_flags = HMM_PFN_REQ_FAULT; if (!amdgpu_ttm_tt_is_readonly(ttm)) - range->default_flags |= range->flags[HMM_PFN_WRITE]; + range->default_flags |= HMM_PFN_REQ_WRITE; - range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns), - GFP_KERNEL); - if (unlikely(!range->pfns)) { + range->hmm_pfns = kvmalloc_array(ttm->num_pages, + sizeof(*range->hmm_pfns), GFP_KERNEL); + if (unlikely(!range->hmm_pfns)) { r = -ENOMEM; goto out_free_ranges; } - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, start); if (unlikely(!vma || start < vma->vm_start)) { r = -EFAULT; @@ -936,36 +921,32 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) r = -EPERM; goto out_unlock; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); retry: range->notifier_seq = mmu_interval_read_begin(&bo->notifier); - down_read(&mm->mmap_sem); + mmap_read_lock(mm); r = hmm_range_fault(range); - up_read(&mm->mmap_sem); - if (unlikely(r <= 0)) { + mmap_read_unlock(mm); + if (unlikely(r)) { /* * FIXME: This timeout should encompass the retry from * mmu_interval_read_retry() as well. */ - if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout)) + if (r == -EBUSY && !time_after(jiffies, timeout)) goto retry; goto out_free_pfns; } - for (i = 0; i < ttm->num_pages; i++) { - /* FIXME: The pages cannot be touched outside the notifier_lock */ - pages[i] = hmm_device_entry_to_page(range, range->pfns[i]); - if (unlikely(!pages[i])) { - pr_err("Page fault failed for pfn[%lu] = 0x%llx\n", - i, range->pfns[i]); - r = -ENOMEM; - - goto out_free_pfns; - } - } + /* + * Due to default_flags, all pages are HMM_PFN_VALID or + * hmm_range_fault() fails. FIXME: The pages cannot be touched outside + * the notifier_lock, and mmu_interval_read_retry() must be done first. + */ + for (i = 0; i < ttm->num_pages; i++) + pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); gtt->range = range; mmput(mm); @@ -973,9 +954,9 @@ retry: return 0; out_unlock: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); out_free_pfns: - kvfree(range->pfns); + kvfree(range->hmm_pfns); out_free_ranges: kfree(range); out: @@ -1000,7 +981,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n", gtt->userptr, ttm->num_pages); - WARN_ONCE(!gtt->range || !gtt->range->pfns, + WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns, "No user pages to check\n"); if (gtt->range) { @@ -1010,7 +991,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) */ r = mmu_interval_read_retry(gtt->range->notifier, gtt->range->notifier_seq); - kvfree(gtt->range->pfns); + kvfree(gtt->range->hmm_pfns); kfree(gtt->range); gtt->range = NULL; } @@ -1101,8 +1082,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; i++) { if (ttm->pages[i] != - hmm_device_entry_to_page(gtt->range, - gtt->range->pfns[i])) + hmm_pfn_to_page(gtt->range->hmm_pfns[i])) break; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 15476fca8fa6..a9583b95fcc1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -901,7 +901,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, memset(&memory_exception_data, 0, sizeof(memory_exception_data)); - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma(mm, address); memory_exception_data.gpu_id = dev->id; @@ -924,7 +924,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, memory_exception_data.failure.NoExecute = 0; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); mmput(mm); pr_debug("notpresent %d, noexecute %d, readonly %d\n", diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index cde5e4c7caa1..f0587d94294d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1073,7 +1073,7 @@ void kfd_dec_compute_active(struct kfd_dev *dev); /* Check with device cgroup if @kfd device is accessible */ static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) { -#if defined(CONFIG_CGROUP_DEVICE) +#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) struct drm_device *ddev = kfd->ddev; return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f42e7e67ddba..7ced9f87be97 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4925,7 +4925,6 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) static int amdgpu_dm_connector_late_register(struct drm_connector *connector) { -#if defined(CONFIG_DEBUG_FS) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); int r; @@ -4938,6 +4937,7 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector) return r; } +#if defined(CONFIG_DEBUG_FS) connector_debugfs_init(amdgpu_dm_connector); #endif diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index dd9ed71ed942..5fc25c3f445c 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -313,7 +313,7 @@ static void __exit armada_drm_exit(void) } module_exit(armada_drm_exit); -MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>"); +MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>"); MODULE_DESCRIPTION("Armada DRM Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:armada-drm"); diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c index 2bc6e4f85171..9af39ec958db 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c @@ -485,6 +485,9 @@ static int anx6345_get_modes(struct drm_connector *connector) num_modes += drm_add_edid_modes(connector, anx6345->edid); + /* Driver currently supports only 6bpc */ + connector->display_info.bpc = 6; + unlock: if (power_off) anx6345_poweroff(anx6345); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index dd56996fe9c7..d0db1acf11d7 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -630,7 +630,7 @@ static struct platform_driver snd_dw_hdmi_driver = { module_platform_driver(snd_dw_hdmi_driver); -MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>"); +MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>"); MODULE_DESCRIPTION("Synopsis Designware HDMI AHB ALSA interface"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index b90cca361afe..1e26b89628f9 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3386,8 +3386,12 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, drm_dp_queue_down_tx(mgr, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); - if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) - ret = -EIO; + if (ret > 0) { + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) + ret = -EIO; + else + ret = size; + } kfree(txmsg); fail_put: @@ -4238,6 +4242,7 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, if (pos->vcpi) { drm_dp_mst_put_port_malloc(port); pos->vcpi = 0; + pos->pbn = 0; } return 0; @@ -5442,7 +5447,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) { struct drm_dp_mst_port *immediate_upstream_port; struct drm_dp_mst_port *fec_port; - struct drm_dp_desc desc = { 0 }; + struct drm_dp_desc desc = { }; u8 endpoint_fec; u8 endpoint_dsc; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3bd95c4b02eb..fed653f13c26 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -191,10 +191,11 @@ static const struct edid_quirk { { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP }, - /* Oculus Rift DK1, DK2, and CV1 VR Headsets */ + /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */ { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP }, { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP }, { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP }, + { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP }, /* Windows Mixed Reality Headsets */ { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP }, @@ -5128,7 +5129,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d struct drm_display_mode *mode; unsigned pixel_clock = (timings->pixel_clock[0] | (timings->pixel_clock[1] << 8) | - (timings->pixel_clock[2] << 16)); + (timings->pixel_clock[2] << 16)) + 1; unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1; unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1; unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 02fc24026872..170aa7689110 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -307,13 +307,13 @@ static void drm_fb_helper_sysrq(int dummy1) schedule_work(&drm_fb_helper_restore_work); } -static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { +static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { .handler = drm_fb_helper_sysrq, .help_msg = "force-fb(V)", .action_msg = "Restore framebuffer console", }; #else -static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; +static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; #endif static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 7194e67e78bd..2f12b8c1d01c 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -569,9 +569,6 @@ ssize_t drm_read(struct file *filp, char __user *buffer, struct drm_device *dev = file_priv->minor->dev; ssize_t ret; - if (!access_ok(buffer, count)) - return -EFAULT; - ret = mutex_lock_interruptible(&file_priv->event_read_lock); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c index 7f386adcf872..910108ccaae1 100644 --- a/drivers/gpu/drm/drm_hdcp.c +++ b/drivers/gpu/drm/drm_hdcp.c @@ -241,8 +241,12 @@ static int drm_hdcp_request_srm(struct drm_device *drm_dev, ret = request_firmware_direct(&fw, (const char *)fw_name, drm_dev->dev); - if (ret < 0) + if (ret < 0) { + *revoked_ksv_cnt = 0; + *revoked_ksv_list = NULL; + ret = 0; goto exit; + } if (fw->size && fw->data) ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list, @@ -287,6 +291,8 @@ int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs, ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list, &revoked_ksv_cnt); + if (ret) + return ret; /* revoked_ksv_cnt will be zero when above function failed */ for (i = 0; i < revoked_ksv_cnt; i++) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 73e31dd4e442..328502aafaf7 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -741,7 +741,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { * }; * * Please make sure that you follow all the best practices from - * ``Documentation/ioctl/botching-up-ioctls.rst``. Note that drm_ioctl() + * ``Documentation/process/botching-up-ioctls.rst``. Note that drm_ioctl() * automatically zero-extends structures, hence make sure you can add more stuff * at the end, i.e. don't put a variable sized array there. * diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index ca520028b2cb..f4e6184d1877 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -43,15 +43,6 @@ #define DEBUG_SCATTER 0 -static inline void *drm_vmalloc_dma(unsigned long size) -{ -#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) - return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL)); -#else - return vmalloc_32(size); -#endif -} - static void drm_sg_cleanup(struct drm_sg_mem * entry) { struct page *page; @@ -126,7 +117,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data, return -ENOMEM; } - entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT); + entry->virtual = vmalloc_32(pages << PAGE_SHIFT); if (!entry->virtual) { kfree(entry->busaddr); kfree(entry->pagelist); diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 56197ae0b2f9..4391e242356d 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -37,6 +37,7 @@ #include <linux/pci.h> #include <linux/seq_file.h> #include <linux/vmalloc.h> +#include <linux/pgtable.h> #if defined(__ia64__) #include <linux/efi.h> @@ -44,7 +45,6 @@ #endif #include <linux/mem_encrypt.h> -#include <asm/pgtable.h> #include <drm/drm_agpsupport.h> #include <drm/drm_device.h> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 27c948f5dfeb..f9afe11c50f0 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -726,7 +726,7 @@ static void __exit etnaviv_exit(void) module_exit(etnaviv_exit); MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>"); -MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>"); +MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>"); MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>"); MODULE_DESCRIPTION("etnaviv DRM Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 648cf0207309..706af0304ca4 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -154,8 +154,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) file_size += sizeof(*iter.hdr) * n_obj; /* Allocate the file in vmalloc memory, it's likely to be big */ - iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, - PAGE_KERNEL); + iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | + __GFP_NORETRY); if (!iter.start) { mutex_unlock(&gpu->mmu_context->lock); dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index dc9ef302f517..701f3995f621 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr; int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; - might_lock_read(¤t->mm->mmap_sem); + might_lock_read(¤t->mm->mmap_lock); if (userptr->mm != current->mm) return -EPERM; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 3b0afa156d92..54def341c1db 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -238,8 +238,10 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit) } if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && - submit->bos[i].va != mapping->iova) + submit->bos[i].va != mapping->iova) { + etnaviv_gem_mapping_unreference(mapping); return -EINVAL; + } atomic_inc(&etnaviv_obj->gpu_active); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index e6795bafcbb9..75f9db8f7bec 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -453,7 +453,7 @@ static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu, if (!(gpu->identity.features & meta->feature)) continue; - if (meta->nr_domains < (index - offset)) { + if (index - offset >= meta->nr_domains) { offset += meta->nr_domains; continue; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index f5d59d18cd5b..30c229fcb404 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1921,11 +1921,6 @@ get_engines(struct i915_gem_context *ctx, } user = u64_to_user_ptr(args->value); - if (!access_ok(user, size)) { - err = -EFAULT; - goto err_free; - } - if (put_user(0, &user->extensions)) { err = -EFAULT; goto err_free; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 1d646f519070..db8eb1c6afe9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -3055,7 +3055,8 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, * And this range already got effectively checked earlier * when we did the "copy_from_user()" above. */ - if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) + if (!user_write_access_begin(user_exec_list, + count * sizeof(*user_exec_list))) goto end; for (i = 0; i < args->buffer_count; i++) { @@ -3069,7 +3070,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, end_user); } end_user: - user_access_end(); + user_write_access_end(); end:; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 70f5f82da288..fe45bd4d63a5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -93,7 +93,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - if (down_write_killable(&mm->mmap_sem)) { + if (mmap_write_lock_killable(mm)) { addr = -EINTR; goto err; } @@ -103,7 +103,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); else addr = -ENOMEM; - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); if (IS_ERR_VALUE(addr)) goto err; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 7ffd7afeb7a5..c31a6744daee 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -200,10 +200,10 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm) if (IS_ERR(mn)) err = PTR_ERR(mn); - down_write(&mm->mm->mmap_sem); + mmap_write_lock(mm->mm); mutex_lock(&mm->i915->mm_lock); if (mm->mn == NULL && !err) { - /* Protected by mmap_sem (write-lock) */ + /* Protected by mmap_lock (write-lock) */ err = __mmu_notifier_register(&mn->mn, mm->mm); if (!err) { /* Protected by mm_lock */ @@ -217,7 +217,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm) err = 0; } mutex_unlock(&mm->i915->mm_lock); - up_write(&mm->mm->mmap_sem); + mmap_write_unlock(mm->mm); if (mn && !IS_ERR(mn)) kfree(mn); @@ -468,10 +468,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) if (mmget_not_zero(mm)) { while (pinned < npages) { if (!locked) { - down_read(&mm->mmap_sem); + mmap_read_lock(mm); locked = 1; } - ret = get_user_pages_remote + ret = pin_user_pages_remote (work->task, mm, obj->userptr.ptr + pinned * PAGE_SIZE, npages - pinned, @@ -483,7 +483,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) pinned += ret; } if (locked) - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); mmput(mm); } } @@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) } mutex_unlock(&obj->mm.lock); - release_pages(pvec, pinned); + unpin_user_pages(pvec, pinned); kvfree(pvec); i915_gem_object_put(obj); @@ -522,8 +522,8 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) /* Spawn a worker so that we can acquire the * user pages without holding our mutex. Access - * to the user pages requires mmap_sem, and we have - * a strict lock ordering of mmap_sem, struct_mutex - + * to the user pages requires mmap_lock, and we have + * a strict lock ordering of mmap_lock, struct_mutex - * we already hold struct_mutex here and so cannot * call gup without encountering a lock inversion. * @@ -564,6 +564,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) struct sg_table *pages; bool active; int pinned; + unsigned int gup_flags = 0; /* If userspace should engineer that these pages are replaced in * the vma between us binding this page into the GTT and completion @@ -598,11 +599,22 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); - if (pvec) /* defer to worker if malloc fails */ - pinned = __get_user_pages_fast(obj->userptr.ptr, - num_pages, - !i915_gem_object_is_readonly(obj), - pvec); + /* + * Using __get_user_pages_fast() with a read-only + * access is questionable. A read-only page may be + * COW-broken, and then this might end up giving + * the wrong side of the COW.. + * + * We may or may not care. + */ + if (pvec) { + /* defer to worker if malloc fails */ + if (!i915_gem_object_is_readonly(obj)) + gup_flags |= FOLL_WRITE; + pinned = pin_user_pages_fast_only(obj->userptr.ptr, + num_pages, gup_flags, + pvec); + } } active = false; @@ -620,7 +632,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) __i915_gem_userptr_set_active(obj, true); if (IS_ERR(pages)) - release_pages(pvec, pinned); + unpin_user_pages(pvec, pinned); kvfree(pvec); return PTR_ERR_OR_ZERO(pages); @@ -675,7 +687,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, } mark_page_accessed(page); - put_page(page); + unpin_user_page(page); } obj->mm.dirty = false; diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c index 9272bef57092..debaf7b18ab5 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c @@ -66,7 +66,7 @@ static void *mock_dmabuf_vmap(struct dma_buf *dma_buf) { struct mock_dmabuf *mock = to_mock(dma_buf); - return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL); + return vm_map_ram(mock->pages, mock->npages, 0); } static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index a83df2f84eb9..a1696e9ce4b6 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -208,14 +208,41 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | SKL_FUSE_PG_DIST_STATUS(SKL_PG2); - vgpu_vreg_t(vgpu, LCPLL1_CTL) |= - LCPLL_PLL_ENABLE | - LCPLL_PLL_LOCK; - vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; - + /* + * Only 1 PIPE enabled in current vGPU display and PIPE_A is + * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, + * TRANSCODER_A can be enabled. PORT_x depends on the input of + * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x + * so we fixed to DPLL0 here. + * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode + */ + vgpu_vreg_t(vgpu, DPLL_CTRL1) = + DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0); + vgpu_vreg_t(vgpu, DPLL_CTRL1) |= + DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0); + vgpu_vreg_t(vgpu, LCPLL1_CTL) = + LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK; + vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0); + /* + * Golden M/N are calculated based on: + * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), + * DP link clk 1620 MHz and non-constant_n. + * TODO: calculate DP link symbol clk and stream clk m/n. + */ + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; + vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; + vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; + vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B); vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | @@ -236,6 +263,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | @@ -256,6 +289,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 3a9bd8e4d8db..0fb1df71c637 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -416,7 +416,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { struct i915_page_directory * const pd = i915_pd_entry(ppgtt->pd, i); - + /* skip now as current i915 ppgtt alloc won't allocate + top level pdp for non 4-level table, won't impact + shadow ppgtt. */ + if (!pd) + break; px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; } } diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 8e45ca3d2ede..55b97c3a3dde 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -47,20 +47,16 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd, unsigned long arg) { struct drm_i915_getparam32 req32; - drm_i915_getparam_t __user *request; + struct drm_i915_getparam req; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; - request = compat_alloc_user_space(sizeof(*request)); - if (!access_ok(request, sizeof(*request)) || - __put_user(req32.param, &request->param) || - __put_user((void __user *)(unsigned long)req32.value, - &request->value)) - return -EFAULT; + req.param = req32.param; + req.value = compat_ptr(req32.value); - return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM, - (unsigned long)request); + return drm_ioctl_kernel(file, i915_getparam_ioctl, &req, + DRM_RENDER_ALLOW); } static drm_ioctl_compat_t *i915_compat_ioctls[] = { diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index b6376b25ef63..43039dc8c607 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -25,7 +25,6 @@ #include <linux/mm.h> #include <linux/io-mapping.h> -#include <asm/pgtable.h> #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index f35712d04ba4..25329b7600c9 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3415,10 +3415,10 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf, /* Similar to perf's kernel.perf_paranoid_cpu sysctl option * we check a dev.i915.perf_stream_paranoid sysctl option * to determine if it's ok to access system wide OA counters - * without CAP_SYS_ADMIN privileges. + * without CAP_PERFMON or CAP_SYS_ADMIN privileges. */ if (privileged_op && - i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { + i915_perf_stream_paranoid && !perfmon_capable()) { DRM_DEBUG("Insufficient privileges to open i915 perf stream\n"); ret = -EACCES; goto err_ctx; @@ -3612,9 +3612,8 @@ static int read_properties_unlocked(struct i915_perf *perf, } else oa_freq_hz = 0; - if (oa_freq_hz > i915_oa_max_sample_rate && - !capable(CAP_SYS_ADMIN)) { - DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n", + if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) { + DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n", i915_oa_max_sample_rate); return -EACCES; } @@ -3677,7 +3676,7 @@ static int read_properties_unlocked(struct i915_perf *perf, * buffered data written by the GPU besides periodic OA metrics. * * Note we copy the properties from userspace outside of the i915 perf - * mutex to avoid an awkward lockdep with mmap_sem. + * mutex to avoid an awkward lockdep with mmap_lock. * * Most of the implementation details are handled by * i915_perf_open_ioctl_locked() after taking the &perf->lock @@ -3897,9 +3896,6 @@ static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf, if (!n_regs) return NULL; - if (!access_ok(regs, n_regs * sizeof(u32) * 2)) - return ERR_PTR(-EFAULT); - /* No is_valid function means we're not allowing any register to be programmed. */ GEM_BUG_ON(!is_valid); if (!is_valid) @@ -4000,7 +3996,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { + if (i915_perf_stream_paranoid && !perfmon_capable()) { DRM_DEBUG("Insufficient privileges to add i915 OA config\n"); return -EACCES; } @@ -4147,7 +4143,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, return -ENOTSUPP; } - if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { + if (i915_perf_stream_paranoid && !perfmon_capable()) { DRM_DEBUG("Insufficient privileges to remove i915 OA config\n"); return -EACCES; } diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index ef25ce6e395e..e75c528ebbe0 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -25,10 +25,6 @@ static int copy_query_item(void *query_hdr, size_t query_sz, query_sz)) return -EFAULT; - if (!access_ok(u64_to_user_ptr(query_item->data_ptr), - total_length)) - return -EFAULT; - return 0; } @@ -72,20 +68,20 @@ static int query_topology_info(struct drm_i915_private *dev_priv, topo.eu_offset = slice_length + subslice_length; topo.eu_stride = sseu->eu_stride; - if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr), &topo, sizeof(topo))) return -EFAULT; - if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)), &sseu->slice_mask, slice_length)) return -EFAULT; - if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo) + slice_length), sseu->subslice_mask, subslice_length)) return -EFAULT; - if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + + if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo) + slice_length + subslice_length), sseu->eu_mask, eu_length)) @@ -131,14 +127,14 @@ query_engine_info(struct drm_i915_private *i915, info.engine.engine_instance = engine->uabi_instance; info.capabilities = engine->uabi_capabilities; - if (__copy_to_user(info_ptr, &info, sizeof(info))) + if (copy_to_user(info_ptr, &info, sizeof(info))) return -EFAULT; query.num_engines++; info_ptr++; } - if (__copy_to_user(query_ptr, &query, sizeof(query))) + if (copy_to_user(query_ptr, &query, sizeof(query))) return -EFAULT; return len; @@ -158,10 +154,6 @@ static int can_copy_perf_config_registers_or_number(u32 user_n_regs, if (user_n_regs < kernel_n_regs) return -EINVAL; - if (!access_ok(u64_to_user_ptr(user_regs_ptr), - 2 * sizeof(u32) * kernel_n_regs)) - return -EFAULT; - return 0; } @@ -170,6 +162,7 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel u64 user_regs_ptr, u32 *user_n_regs) { + u32 __user *p = u64_to_user_ptr(user_regs_ptr); u32 r; if (*user_n_regs == 0) { @@ -179,25 +172,19 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel *user_n_regs = kernel_n_regs; - for (r = 0; r < kernel_n_regs; r++) { - u32 __user *user_reg_ptr = - u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2); - u32 __user *user_val_ptr = - u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 + - sizeof(u32)); - int ret; - - ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr), - user_reg_ptr); - if (ret) - return -EFAULT; + if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs)) + return -EFAULT; - ret = __put_user(kernel_regs[r].value, user_val_ptr); - if (ret) - return -EFAULT; + for (r = 0; r < kernel_n_regs; r++, p += 2) { + unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr), + p, Efault); + unsafe_put_user(kernel_regs[r].value, p + 1, Efault); } - + user_write_access_end(); return 0; +Efault: + user_write_access_end(); + return -EFAULT; } static int query_perf_config_data(struct drm_i915_private *i915, @@ -233,10 +220,7 @@ static int query_perf_config_data(struct drm_i915_private *i915, return -EINVAL; } - if (!access_ok(user_query_config_ptr, total_size)) - return -EFAULT; - - if (__get_user(flags, &user_query_config_ptr->flags)) + if (get_user(flags, &user_query_config_ptr->flags)) return -EFAULT; if (flags != 0) @@ -249,7 +233,7 @@ static int query_perf_config_data(struct drm_i915_private *i915, BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid)); memset(&uuid, 0, sizeof(uuid)); - if (__copy_from_user(uuid, user_query_config_ptr->uuid, + if (copy_from_user(uuid, user_query_config_ptr->uuid, sizeof(user_query_config_ptr->uuid))) return -EFAULT; @@ -263,7 +247,7 @@ static int query_perf_config_data(struct drm_i915_private *i915, } rcu_read_unlock(); } else { - if (__get_user(config_id, &user_query_config_ptr->config)) + if (get_user(config_id, &user_query_config_ptr->config)) return -EFAULT; oa_config = i915_perf_get_oa_config(perf, config_id); @@ -271,8 +255,7 @@ static int query_perf_config_data(struct drm_i915_private *i915, if (!oa_config) return -ENOENT; - if (__copy_from_user(&user_config, user_config_ptr, - sizeof(user_config))) { + if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) { ret = -EFAULT; goto out; } @@ -318,8 +301,7 @@ static int query_perf_config_data(struct drm_i915_private *i915, memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid)); - if (__copy_to_user(user_config_ptr, &user_config, - sizeof(user_config))) { + if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) { ret = -EFAULT; goto out; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6c076a24eb82..7717581350bd 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -186,7 +186,7 @@ typedef struct { #define INVALID_MMIO_REG _MMIO(0) -static inline u32 i915_mmio_reg_offset(i915_reg_t reg) +static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg) { return reg.reg; } diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index ad719c9602af..9cb2d7548daa 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -549,7 +549,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) * becaue the HDA driver may require us to enable the audio power * domain during system suspend. */ - dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP); + dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE); pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ pm_runtime_mark_last_busy(kdev); diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index 632d72177123..55b49a31729b 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -330,8 +330,8 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc, if (!drm_atomic_crtc_needs_modeset(state)) return 0; - if (state->mode.hdisplay > priv->soc_info->max_height || - state->mode.vdisplay > priv->soc_info->max_width) + if (state->mode.hdisplay > priv->soc_info->max_width || + state->mode.vdisplay > priv->soc_info->max_height) return -EINVAL; rate = clk_round_rate(priv->pix_clk, @@ -476,7 +476,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder, static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg) { - struct ingenic_drm *priv = arg; + struct ingenic_drm *priv = drm_device_get_priv(arg); unsigned int state; regmap_read(priv->map, JZ_REG_LCD_STATE, &state); @@ -824,6 +824,7 @@ static const struct of_device_id ingenic_drm_of_match[] = { { .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info }, { /* sentinel */ }, }; +MODULE_DEVICE_TABLE(of, ingenic_drm_of_match); static struct platform_driver ingenic_drm_driver = { .driver = { diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index fa5ffc4fe823..c420f5a3d33b 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -11,6 +11,7 @@ config DRM_MEDIATEK select DRM_MIPI_DSI select DRM_PANEL select MEMORY + select MTK_MMSYS select MTK_SMI select VIDEOMODE_HELPERS help diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c index 6fb0d6983a4a..3ae9c810845b 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_color.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -119,7 +119,10 @@ static int mtk_disp_color_probe(struct platform_device *pdev) ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id, &mtk_disp_color_funcs); if (ret) { - dev_err(dev, "Failed to initialize component: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to initialize component: %d\n", + ret); + return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 891d80c73e04..28651bc579bc 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -386,7 +386,10 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id, &mtk_disp_ovl_funcs); if (ret) { - dev_err(dev, "Failed to initialize component: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to initialize component: %d\n", + ret); + return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 0cb848d64206..e04319fedf46 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -294,7 +294,10 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev) ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id, &mtk_disp_rdma_funcs); if (ret) { - dev_err(dev, "Failed to initialize component: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to initialize component: %d\n", + ret); + return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 945c3ac92998..d4f0fb7ad312 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -739,21 +739,27 @@ static int mtk_dpi_probe(struct platform_device *pdev) dpi->engine_clk = devm_clk_get(dev, "engine"); if (IS_ERR(dpi->engine_clk)) { ret = PTR_ERR(dpi->engine_clk); - dev_err(dev, "Failed to get engine clock: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get engine clock: %d\n", ret); + return ret; } dpi->pixel_clk = devm_clk_get(dev, "pixel"); if (IS_ERR(dpi->pixel_clk)) { ret = PTR_ERR(dpi->pixel_clk); - dev_err(dev, "Failed to get pixel clock: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get pixel clock: %d\n", ret); + return ret; } dpi->tvd_clk = devm_clk_get(dev, "pll"); if (IS_ERR(dpi->tvd_clk)) { ret = PTR_ERR(dpi->tvd_clk); - dev_err(dev, "Failed to get tvdpll clock: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get tvdpll clock: %d\n", ret); + return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index fe85e487e477..fe46c4bac64d 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -6,6 +6,7 @@ #include <linux/clk.h> #include <linux/pm_runtime.h> #include <linux/soc/mediatek/mtk-cmdq.h> +#include <linux/soc/mediatek/mtk-mmsys.h> #include <asm/barrier.h> #include <soc/mediatek/smi.h> @@ -28,7 +29,7 @@ * @enabled: records whether crtc_enable succeeded * @planes: array of 4 drm_plane structures, one for each overlay plane * @pending_planes: whether any plane has pending changes to be applied - * @config_regs: memory mapped mmsys configuration register space + * @mmsys_dev: pointer to the mmsys device for configuration registers * @mutex: handle to one of the ten disp_mutex streams * @ddp_comp_nr: number of components in ddp_comp * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc @@ -50,7 +51,7 @@ struct mtk_drm_crtc { u32 cmdq_event; #endif - void __iomem *config_regs; + struct device *mmsys_dev; struct mtk_disp_mutex *mutex; unsigned int ddp_comp_nr; struct mtk_ddp_comp **ddp_comp; @@ -300,9 +301,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n"); for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { - mtk_ddp_add_comp_to_path(mtk_crtc->config_regs, - mtk_crtc->ddp_comp[i]->id, - mtk_crtc->ddp_comp[i + 1]->id); + mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev, + mtk_crtc->ddp_comp[i]->id, + mtk_crtc->ddp_comp[i + 1]->id); mtk_disp_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); } @@ -360,9 +361,9 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) mtk_crtc->ddp_comp[i]->id); mtk_disp_mutex_disable(mtk_crtc->mutex); for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { - mtk_ddp_remove_comp_from_path(mtk_crtc->config_regs, - mtk_crtc->ddp_comp[i]->id, - mtk_crtc->ddp_comp[i + 1]->id); + mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev, + mtk_crtc->ddp_comp[i]->id, + mtk_crtc->ddp_comp[i + 1]->id); mtk_disp_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); } @@ -766,7 +767,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, if (!mtk_crtc) return -ENOMEM; - mtk_crtc->config_regs = priv->config_regs; + mtk_crtc->mmsys_dev = priv->mmsys_dev; mtk_crtc->ddp_comp_nr = path_len; mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr, sizeof(*mtk_crtc->ddp_comp), diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 13035c906035..014c1bbe1df2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -13,26 +13,6 @@ #include "mtk_drm_ddp.h" #include "mtk_drm_ddp_comp.h" -#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040 -#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044 -#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048 -#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c -#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050 -#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084 -#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088 -#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4 -#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8 -#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac -#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8 -#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4 -#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8 -#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100 - -#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030 -#define DISP_REG_CONFIG_OUT_SEL 0x04c -#define DISP_REG_CONFIG_DSI_SEL 0x050 -#define DISP_REG_CONFIG_DPI_SEL 0x064 - #define MT2701_DISP_MUTEX0_MOD0 0x2c #define MT2701_DISP_MUTEX0_SOF0 0x30 @@ -94,48 +74,6 @@ #define MUTEX_SOF_DSI2 5 #define MUTEX_SOF_DSI3 6 -#define OVL0_MOUT_EN_COLOR0 0x1 -#define OD_MOUT_EN_RDMA0 0x1 -#define OD1_MOUT_EN_RDMA1 BIT(16) -#define UFOE_MOUT_EN_DSI0 0x1 -#define COLOR0_SEL_IN_OVL0 0x1 -#define OVL1_MOUT_EN_COLOR1 0x1 -#define GAMMA_MOUT_EN_RDMA1 0x1 -#define RDMA0_SOUT_DPI0 0x2 -#define RDMA0_SOUT_DPI1 0x3 -#define RDMA0_SOUT_DSI1 0x1 -#define RDMA0_SOUT_DSI2 0x4 -#define RDMA0_SOUT_DSI3 0x5 -#define RDMA1_SOUT_DPI0 0x2 -#define RDMA1_SOUT_DPI1 0x3 -#define RDMA1_SOUT_DSI1 0x1 -#define RDMA1_SOUT_DSI2 0x4 -#define RDMA1_SOUT_DSI3 0x5 -#define RDMA2_SOUT_DPI0 0x2 -#define RDMA2_SOUT_DPI1 0x3 -#define RDMA2_SOUT_DSI1 0x1 -#define RDMA2_SOUT_DSI2 0x4 -#define RDMA2_SOUT_DSI3 0x5 -#define DPI0_SEL_IN_RDMA1 0x1 -#define DPI0_SEL_IN_RDMA2 0x3 -#define DPI1_SEL_IN_RDMA1 (0x1 << 8) -#define DPI1_SEL_IN_RDMA2 (0x3 << 8) -#define DSI0_SEL_IN_RDMA1 0x1 -#define DSI0_SEL_IN_RDMA2 0x4 -#define DSI1_SEL_IN_RDMA1 0x1 -#define DSI1_SEL_IN_RDMA2 0x4 -#define DSI2_SEL_IN_RDMA1 (0x1 << 16) -#define DSI2_SEL_IN_RDMA2 (0x4 << 16) -#define DSI3_SEL_IN_RDMA1 (0x1 << 16) -#define DSI3_SEL_IN_RDMA2 (0x4 << 16) -#define COLOR1_SEL_IN_OVL1 0x1 - -#define OVL_MOUT_EN_RDMA 0x1 -#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8 -#define BLS_TO_DPI_RDMA1_TO_DSI 0x2 -#define DSI_SEL_IN_BLS 0x0 -#define DPI_SEL_IN_BLS 0x0 -#define DSI_SEL_IN_RDMA 0x1 struct mtk_disp_mutex { int id; @@ -246,200 +184,6 @@ static const struct mtk_ddp_data mt8173_ddp_driver_data = { .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, }; -static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, - enum mtk_ddp_comp_id next, - unsigned int *addr) -{ - unsigned int value; - - if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) { - *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN; - value = OVL0_MOUT_EN_COLOR0; - } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) { - *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN; - value = OVL_MOUT_EN_RDMA; - } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) { - *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN; - value = OD_MOUT_EN_RDMA0; - } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) { - *addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN; - value = UFOE_MOUT_EN_DSI0; - } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) { - *addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN; - value = OVL1_MOUT_EN_COLOR1; - } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) { - *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN; - value = GAMMA_MOUT_EN_RDMA1; - } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) { - *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN; - value = OD1_MOUT_EN_RDMA1; - } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { - *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; - value = RDMA0_SOUT_DPI0; - } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) { - *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; - value = RDMA0_SOUT_DPI1; - } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) { - *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; - value = RDMA0_SOUT_DSI1; - } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { - *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; - value = RDMA0_SOUT_DSI2; - } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) { - *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; - value = RDMA0_SOUT_DSI3; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { - *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN; - value = RDMA1_SOUT_DSI1; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) { - *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN; - value = RDMA1_SOUT_DSI2; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) { - *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN; - value = RDMA1_SOUT_DSI3; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) { - *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN; - value = RDMA1_SOUT_DPI0; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { - *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN; - value = RDMA1_SOUT_DPI1; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) { - *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT; - value = RDMA2_SOUT_DPI0; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { - *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT; - value = RDMA2_SOUT_DPI1; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { - *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT; - value = RDMA2_SOUT_DSI1; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { - *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT; - value = RDMA2_SOUT_DSI2; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) { - *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT; - value = RDMA2_SOUT_DSI3; - } else { - value = 0; - } - - return value; -} - -static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, - enum mtk_ddp_comp_id next, - unsigned int *addr) -{ - unsigned int value; - - if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) { - *addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN; - value = COLOR0_SEL_IN_OVL0; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) { - *addr = DISP_REG_CONFIG_DPI_SEL_IN; - value = DPI0_SEL_IN_RDMA1; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { - *addr = DISP_REG_CONFIG_DPI_SEL_IN; - value = DPI1_SEL_IN_RDMA1; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) { - *addr = DISP_REG_CONFIG_DSIE_SEL_IN; - value = DSI0_SEL_IN_RDMA1; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { - *addr = DISP_REG_CONFIG_DSIO_SEL_IN; - value = DSI1_SEL_IN_RDMA1; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) { - *addr = DISP_REG_CONFIG_DSIE_SEL_IN; - value = DSI2_SEL_IN_RDMA1; - } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) { - *addr = DISP_REG_CONFIG_DSIO_SEL_IN; - value = DSI3_SEL_IN_RDMA1; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) { - *addr = DISP_REG_CONFIG_DPI_SEL_IN; - value = DPI0_SEL_IN_RDMA2; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { - *addr = DISP_REG_CONFIG_DPI_SEL_IN; - value = DPI1_SEL_IN_RDMA2; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) { - *addr = DISP_REG_CONFIG_DSIE_SEL_IN; - value = DSI0_SEL_IN_RDMA2; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { - *addr = DISP_REG_CONFIG_DSIO_SEL_IN; - value = DSI1_SEL_IN_RDMA2; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { - *addr = DISP_REG_CONFIG_DSIE_SEL_IN; - value = DSI2_SEL_IN_RDMA2; - } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) { - *addr = DISP_REG_CONFIG_DSIE_SEL_IN; - value = DSI3_SEL_IN_RDMA2; - } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) { - *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN; - value = COLOR1_SEL_IN_OVL1; - } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) { - *addr = DISP_REG_CONFIG_DSI_SEL; - value = DSI_SEL_IN_BLS; - } else { - value = 0; - } - - return value; -} - -static void mtk_ddp_sout_sel(void __iomem *config_regs, - enum mtk_ddp_comp_id cur, - enum mtk_ddp_comp_id next) -{ - if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) { - writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1, - config_regs + DISP_REG_CONFIG_OUT_SEL); - } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) { - writel_relaxed(BLS_TO_DPI_RDMA1_TO_DSI, - config_regs + DISP_REG_CONFIG_OUT_SEL); - writel_relaxed(DSI_SEL_IN_RDMA, - config_regs + DISP_REG_CONFIG_DSI_SEL); - writel_relaxed(DPI_SEL_IN_BLS, - config_regs + DISP_REG_CONFIG_DPI_SEL); - } -} - -void mtk_ddp_add_comp_to_path(void __iomem *config_regs, - enum mtk_ddp_comp_id cur, - enum mtk_ddp_comp_id next) -{ - unsigned int addr, value, reg; - - value = mtk_ddp_mout_en(cur, next, &addr); - if (value) { - reg = readl_relaxed(config_regs + addr) | value; - writel_relaxed(reg, config_regs + addr); - } - - mtk_ddp_sout_sel(config_regs, cur, next); - - value = mtk_ddp_sel_in(cur, next, &addr); - if (value) { - reg = readl_relaxed(config_regs + addr) | value; - writel_relaxed(reg, config_regs + addr); - } -} - -void mtk_ddp_remove_comp_from_path(void __iomem *config_regs, - enum mtk_ddp_comp_id cur, - enum mtk_ddp_comp_id next) -{ - unsigned int addr, value, reg; - - value = mtk_ddp_mout_en(cur, next, &addr); - if (value) { - reg = readl_relaxed(config_regs + addr) & ~value; - writel_relaxed(reg, config_regs + addr); - } - - value = mtk_ddp_sel_in(cur, next, &addr); - if (value) { - reg = readl_relaxed(config_regs + addr) & ~value; - writel_relaxed(reg, config_regs + addr); - } -} - struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id) { struct mtk_ddp *ddp = dev_get_drvdata(dev); @@ -628,7 +372,8 @@ static int mtk_ddp_probe(struct platform_device *pdev) if (!ddp->data->no_clk) { ddp->clk = devm_clk_get(dev, NULL); if (IS_ERR(ddp->clk)) { - dev_err(dev, "Failed to get clock\n"); + if (PTR_ERR(ddp->clk) != -EPROBE_DEFER) + dev_err(dev, "Failed to get clock\n"); return PTR_ERR(ddp->clk); } } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h index 827be424a148..6b691a57be4a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h @@ -12,13 +12,6 @@ struct regmap; struct device; struct mtk_disp_mutex; -void mtk_ddp_add_comp_to_path(void __iomem *config_regs, - enum mtk_ddp_comp_id cur, - enum mtk_ddp_comp_id next); -void mtk_ddp_remove_comp_from_path(void __iomem *config_regs, - enum mtk_ddp_comp_id cur, - enum mtk_ddp_comp_id next); - struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id); int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex); void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index ce570283b55f..6bd369434d9d 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -10,6 +10,7 @@ #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/pm_runtime.h> +#include <linux/soc/mediatek/mtk-mmsys.h> #include <linux/dma-mapping.h> #include <drm/drm_atomic.h> @@ -418,11 +419,22 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { { } }; +static const struct of_device_id mtk_drm_of_ids[] = { + { .compatible = "mediatek,mt2701-mmsys", + .data = &mt2701_mmsys_driver_data}, + { .compatible = "mediatek,mt2712-mmsys", + .data = &mt2712_mmsys_driver_data}, + { .compatible = "mediatek,mt8173-mmsys", + .data = &mt8173_mmsys_driver_data}, + { } +}; + static int mtk_drm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct device_node *phandle = dev->parent->of_node; + const struct of_device_id *of_id; struct mtk_drm_private *private; - struct resource *mem; struct device_node *node; struct component_match *match = NULL; int ret; @@ -433,18 +445,20 @@ static int mtk_drm_probe(struct platform_device *pdev) return -ENOMEM; private->data = of_device_get_match_data(dev); - - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - private->config_regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(private->config_regs)) { - ret = PTR_ERR(private->config_regs); - dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n", - ret); - return ret; + private->mmsys_dev = dev->parent; + if (!private->mmsys_dev) { + dev_err(dev, "Failed to get MMSYS device\n"); + return -ENODEV; } + of_id = of_match_node(mtk_drm_of_ids, phandle); + if (!of_id) + return -ENODEV; + + private->data = of_id->data; + /* Iterate over sibling DISP function blocks */ - for_each_child_of_node(dev->of_node->parent, node) { + for_each_child_of_node(phandle->parent, node) { const struct of_device_id *of_id; enum mtk_ddp_comp_type comp_type; int comp_id; @@ -578,22 +592,11 @@ static int mtk_drm_sys_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend, mtk_drm_sys_resume); -static const struct of_device_id mtk_drm_of_ids[] = { - { .compatible = "mediatek,mt2701-mmsys", - .data = &mt2701_mmsys_driver_data}, - { .compatible = "mediatek,mt2712-mmsys", - .data = &mt2712_mmsys_driver_data}, - { .compatible = "mediatek,mt8173-mmsys", - .data = &mt8173_mmsys_driver_data}, - { } -}; - static struct platform_driver mtk_drm_platform_driver = { .probe = mtk_drm_probe, .remove = mtk_drm_remove, .driver = { .name = "mediatek-drm", - .of_match_table = mtk_drm_of_ids, .pm = &mtk_drm_pm_ops, }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index 17bc99b9f5d4..b5be63e53176 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -39,7 +39,7 @@ struct mtk_drm_private { struct device_node *mutex_node; struct device *mutex_dev; - void __iomem *config_regs; + struct device *mmsys_dev; struct device_node *comp_node[DDP_COMPONENT_ID_MAX]; struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX]; const struct mtk_mmsys_driver_data *data; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index a9a25087112f..270bf22c98fe 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -1186,14 +1186,18 @@ static int mtk_dsi_probe(struct platform_device *pdev) dsi->engine_clk = devm_clk_get(dev, "engine"); if (IS_ERR(dsi->engine_clk)) { ret = PTR_ERR(dsi->engine_clk); - dev_err(dev, "Failed to get engine clock: %d\n", ret); + + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get engine clock: %d\n", ret); goto err_unregister_host; } dsi->digital_clk = devm_clk_get(dev, "digital"); if (IS_ERR(dsi->digital_clk)) { ret = PTR_ERR(dsi->digital_clk); - dev_err(dev, "Failed to get digital clock: %d\n", ret); + + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get digital clock: %d\n", ret); goto err_unregister_host; } diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 7bc086ec74f7..5feb760617cb 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1470,7 +1470,9 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, ret = mtk_hdmi_get_all_clk(hdmi, np); if (ret) { - dev_err(dev, "Failed to get clocks: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get clocks: %d\n", ret); + return ret; } diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 621f6de0f076..4c5aafcec799 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -440,9 +440,7 @@ static int __maybe_unused meson_drv_pm_resume(struct device *dev) if (priv->afbcd.ops) priv->afbcd.ops->init(priv); - drm_mode_config_helper_resume(priv->drm); - - return 0; + return drm_mode_config_helper_resume(priv->drm); } static int compare_of(struct device *dev, void *data) diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 5be963e9db05..24a12c453095 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -1034,10 +1034,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, return PTR_ERR(dw_plat_data->regm); irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "Failed to get hdmi top irq\n"); + if (irq < 0) return irq; - } ret = devm_request_threaded_irq(dev, irq, dw_hdmi_top_irq, dw_hdmi_top_thread_irq, IRQF_SHARED, diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 1579cf0d828f..42f8aae28b31 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -65,6 +65,7 @@ msm-y := \ disp/dpu1/dpu_hw_lm.o \ disp/dpu1/dpu_hw_pingpong.o \ disp/dpu1/dpu_hw_sspp.o \ + disp/dpu1/dpu_hw_dspp.o \ disp/dpu1/dpu_hw_top.o \ disp/dpu1/dpu_hw_util.o \ disp/dpu1/dpu_hw_vbif.o \ diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 1f83bc18d500..60f6472a3e58 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -401,6 +401,21 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu) return state; } +static struct msm_gem_address_space * +a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) +{ + struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu); + struct msm_gem_address_space *aspace; + + aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, + SZ_16M + 0xfff * SZ_64K); + + if (IS_ERR(aspace) && !IS_ERR(mmu)) + mmu->funcs->destroy(mmu); + + return aspace; +} + /* Register offset defines for A2XX - copy of A3XX */ static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = { REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE), @@ -429,6 +444,7 @@ static const struct adreno_gpu_funcs funcs = { #endif .gpu_state_get = a2xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, + .create_address_space = a2xx_create_address_space, }, }; diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index b67f88872726..0a5ea9f56cb8 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -441,6 +441,7 @@ static const struct adreno_gpu_funcs funcs = { #endif .gpu_state_get = a3xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, + .create_address_space = adreno_iommu_create_address_space, }, }; diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 253d8d85daad..b9b26b2bf9c5 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -66,19 +66,22 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu) } } - for (i = 0; i < 4; i++) { - gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i), - 0x00000922); - } + /* No CCU for A405 */ + if (!adreno_is_a405(adreno_gpu)) { + for (i = 0; i < 4; i++) { + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i), + 0x00000922); + } - for (i = 0; i < 4; i++) { - gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i), - 0x00000000); - } + for (i = 0; i < 4; i++) { + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i), + 0x00000000); + } - for (i = 0; i < 4; i++) { - gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i), - 0x00000001); + for (i = 0; i < 4; i++) { + gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i), + 0x00000001); + } } gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222); @@ -137,7 +140,9 @@ static int a4xx_hw_init(struct msm_gpu *gpu) uint32_t *ptr, len; int i, ret; - if (adreno_is_a420(adreno_gpu)) { + if (adreno_is_a405(adreno_gpu)) { + gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003); + } else if (adreno_is_a420(adreno_gpu)) { gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F); gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4); gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001); @@ -440,6 +445,52 @@ static const unsigned int a4xx_registers[] = { ~0 /* sentinel */ }; +static const unsigned int a405_registers[] = { + /* RBBM */ + 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026, + 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066, + 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF, + /* CP */ + 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B, + 0x0578, 0x058F, + /* VSC */ + 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51, + /* GRAS */ + 0x0C80, 0x0C81, 0x0C88, 0x0C8F, + /* RB */ + 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2, + /* PC */ + 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23, + /* VFD */ + 0x0E40, 0x0E4A, + /* VPC */ + 0x0E60, 0x0E61, 0x0E63, 0x0E68, + /* UCHE */ + 0x0E80, 0x0E84, 0x0E88, 0x0E95, + /* GRAS CTX 0 */ + 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E, + /* PC CTX 0 */ + 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7, + /* VFD CTX 0 */ + 0x2200, 0x2204, 0x2208, 0x22A9, + /* GRAS CTX 1 */ + 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E, + /* PC CTX 1 */ + 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7, + /* VFD CTX 1 */ + 0x2600, 0x2604, 0x2608, 0x26A9, + /* VBIF version 0x20050000*/ + 0x3000, 0x3007, 0x302C, 0x302C, 0x3030, 0x3030, 0x3034, 0x3036, + 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, 0x3049, 0x3049, + 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, 0x306C, 0x306D, + 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, 0x3098, 0x3098, + 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, 0x30D0, 0x30D0, + 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, 0x3108, 0x3108, + 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, 0x3124, 0x3125, + 0x3129, 0x3129, 0x340C, 0x340C, 0x3410, 0x3410, + ~0 /* sentinel */ +}; + static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu) { struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL); @@ -532,6 +583,7 @@ static const struct adreno_gpu_funcs funcs = { #endif .gpu_state_get = a4xx_gpu_state_get, .gpu_state_put = adreno_gpu_state_put, + .create_address_space = adreno_iommu_create_address_space, }, .get_timestamp = a4xx_get_timestamp, }; @@ -563,13 +615,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) gpu->perfcntrs = NULL; gpu->num_perfcntrs = 0; - adreno_gpu->registers = a4xx_registers; - adreno_gpu->reg_offsets = a4xx_register_offsets; - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) goto fail; + adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers : + a4xx_registers; + adreno_gpu->reg_offsets = a4xx_register_offsets; + /* if needed, allocate gmem: */ if (adreno_is_a4xx(adreno_gpu)) { ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu, diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 724024a2243a..d95970a73fb4 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1404,6 +1404,10 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) { u64 busy_cycles, busy_time; + /* Only read the gpu busy if the hardware is already active */ + if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0) + return 0; + busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, REG_A5XX_RBBM_PERFCTR_RBBM_0_HI); @@ -1412,6 +1416,8 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu) gpu->devfreq.busy_cycles = busy_cycles; + pm_runtime_put(&gpu->pdev->dev); + if (WARN_ON(busy_time > ~0LU)) return ~0LU; @@ -1439,6 +1445,7 @@ static const struct adreno_gpu_funcs funcs = { .gpu_busy = a5xx_gpu_busy, .gpu_state_get = a5xx_gpu_state_get, .gpu_state_put = a5xx_gpu_state_put, + .create_address_space = adreno_iommu_create_address_space, }, .get_timestamp = a5xx_get_timestamp, }; diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h index ed78fee2a262..47840b73cdda 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h @@ -1047,6 +1047,8 @@ enum a6xx_tex_type { #define REG_A6XX_CP_MISC_CNTL 0x00000840 +#define REG_A6XX_CP_APRIV_CNTL 0x00000844 + #define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1 #define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2 @@ -1764,6 +1766,8 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val) #define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010 +#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011 + #define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f #define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037 @@ -2418,6 +2422,16 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val) #define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604 +#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608 + +#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609 + +#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a + +#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b + +#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c + #define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610 #define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index c4e71abbdd53..096be97ce9f9 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -2,14 +2,16 @@ /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ #include <linux/clk.h> -#include <linux/dma-mapping.h> #include <linux/interconnect.h> #include <linux/pm_domain.h> #include <linux/pm_opp.h> #include <soc/qcom/cmd-db.h> +#include <drm/drm_gem.h> #include "a6xx_gpu.h" #include "a6xx_gmu.xml.h" +#include "msm_gem.h" +#include "msm_mmu.h" static void a6xx_gmu_fault(struct a6xx_gmu *gmu) { @@ -127,8 +129,6 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) if (ret) dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); - gmu->freq = gmu->gpu_freqs[index]; - /* * Eventually we will want to scale the path vote with the frequency but * for now leave it at max so that the performance is nominal. @@ -151,8 +151,21 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) break; gmu->current_perf_index = perf_index; + gmu->freq = gmu->gpu_freqs[perf_index]; + + /* + * This can get called from devfreq while the hardware is idle. Don't + * bring up the power if it isn't already active + */ + if (pm_runtime_get_if_in_use(gmu->dev) == 0) + return; - __a6xx_gmu_set_freq(gmu, perf_index); + if (gmu->legacy) + __a6xx_gmu_set_freq(gmu, perf_index); + else + a6xx_hfi_set_freq(gmu, perf_index); + + pm_runtime_put(gmu->dev); } unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) @@ -196,6 +209,12 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) u32 val; gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); + + /* Set the log wptr index + * note: downstream saves the value in poweroff and restores it here + */ + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, @@ -232,8 +251,13 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) switch (state) { case GMU_OOB_GPU_SET: - request = GMU_OOB_GPU_SET_REQUEST; - ack = GMU_OOB_GPU_SET_ACK; + if (gmu->legacy) { + request = GMU_OOB_GPU_SET_REQUEST; + ack = GMU_OOB_GPU_SET_ACK; + } else { + request = GMU_OOB_GPU_SET_REQUEST_NEW; + ack = GMU_OOB_GPU_SET_ACK_NEW; + } name = "GPU_SET"; break; case GMU_OOB_BOOT_SLUMBER: @@ -272,6 +296,13 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Clear a pending OOB state in the GMU */ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { + if (!gmu->legacy) { + WARN_ON(state != GMU_OOB_GPU_SET); + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, + 1 << GMU_OOB_GPU_SET_CLEAR_NEW); + return; + } + switch (state) { case GMU_OOB_GPU_SET: gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, @@ -294,6 +325,9 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu) int ret; u32 val; + if (!gmu->legacy) + return 0; + gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, @@ -313,6 +347,9 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu) u32 val; int ret; + if (!gmu->legacy) + return; + /* Make sure retention is on */ gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11)); @@ -356,6 +393,11 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) if (gmu->idle_level < GMU_IDLE_STATE_SPTP) a6xx_sptprac_disable(gmu); + if (!gmu->legacy) { + ret = a6xx_hfi_send_prep_slumber(gmu); + goto out; + } + /* Tell the GMU to get ready to slumber */ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1); @@ -371,6 +413,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) } } +out: /* Put fence into allow mode */ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); return ret; @@ -392,7 +435,7 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) return ret; } - ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, + ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, !val, 100, 10000); if (ret) { @@ -418,7 +461,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); - ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, + ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, val, val & (1 << 16), 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); @@ -441,32 +484,48 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) struct platform_device *pdev = to_platform_device(gmu->dev); void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); + uint32_t pdc_address_offset; if (!pdcptr || !seqptr) goto err; + if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu)) + pdc_address_offset = 0x30090; + else if (adreno_is_a650(adreno_gpu)) + pdc_address_offset = 0x300a0; + else + pdc_address_offset = 0x30080; + /* Disable SDE clock gating */ - gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); + gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); /* Setup RSC PDC handshake for sleep and wakeup */ - gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); - gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); - gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); - gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); - gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); - gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000); + gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510); + gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514); /* Load RSC sequencer uCode for sleep and wakeup */ - gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); - gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); - gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); - gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); - gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); + if (adreno_is_a650(adreno_gpu)) { + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad); + } else { + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2); + gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8); + } /* Load PDC sequencer uCode for power up and power down sequence */ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1); @@ -487,10 +546,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108); - if (adreno_is_a618(adreno_gpu)) - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090); - else - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7); @@ -502,17 +558,12 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000); - if (adreno_is_a618(adreno_gpu)) + if (adreno_is_a618(adreno_gpu) || adreno_is_a650(adreno_gpu)) pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2); else pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3); - - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108); - if (adreno_is_a618(adreno_gpu)) - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090); - else - pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080); + pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset); pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3); /* Setup GPU PDC */ @@ -542,6 +593,8 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) { /* Disable GMU WB/RB buffer */ gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1); + gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1); + gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); @@ -571,14 +624,95 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); } +struct block_header { + u32 addr; + u32 size; + u32 type; + u32 value; + u32 data[]; +}; + +/* this should be a general kernel helper */ +static int in_range(u32 addr, u32 start, u32 size) +{ + return addr >= start && addr < start + size; +} + +static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) +{ + if (!in_range(blk->addr, bo->iova, bo->size)) + return false; + + memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); + return true; +} + +static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) +{ + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; + const struct block_header *blk; + u32 reg_offset; + + u32 itcm_base = 0x00000000; + u32 dtcm_base = 0x00040000; + + if (adreno_is_a650(adreno_gpu)) + dtcm_base = 0x10004000; + + if (gmu->legacy) { + /* Sanity check the size of the firmware that was loaded */ + if (fw_image->size > 0x8000) { + DRM_DEV_ERROR(gmu->dev, + "GMU firmware is bigger than the available region\n"); + return -EINVAL; + } + + gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, + (u32*) fw_image->data, fw_image->size); + return 0; + } + + + for (blk = (const struct block_header *) fw_image->data; + (const u8*) blk < fw_image->data + fw_image->size; + blk = (const struct block_header *) &blk->data[blk->size >> 2]) { + if (blk->size == 0) + continue; + + if (in_range(blk->addr, itcm_base, SZ_16K)) { + reg_offset = (blk->addr - itcm_base) >> 2; + gmu_write_bulk(gmu, + REG_A6XX_GMU_CM3_ITCM_START + reg_offset, + blk->data, blk->size); + } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { + reg_offset = (blk->addr - dtcm_base) >> 2; + gmu_write_bulk(gmu, + REG_A6XX_GMU_CM3_DTCM_START + reg_offset, + blk->data, blk->size); + } else if (!fw_block_mem(&gmu->icache, blk) && + !fw_block_mem(&gmu->dcache, blk) && + !fw_block_mem(&gmu->dummy, blk)) { + DRM_DEV_ERROR(gmu->dev, + "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n", + blk->addr, blk->size, blk->data[0]); + } + } + + return 0; +} + static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) { static bool rpmh_init; struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; - int i, ret; + int ret; u32 chipid; - u32 *image; + + if (adreno_is_a650(adreno_gpu)) + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); if (state == GMU_WARM_BOOT) { ret = a6xx_rpmh_start(gmu); @@ -589,13 +723,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) "GMU firmware is not loaded\n")) return -ENOENT; - /* Sanity check the size of the firmware that was loaded */ - if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) { - DRM_DEV_ERROR(gmu->dev, - "GMU firmware is bigger than the available region\n"); - return -EINVAL; - } - /* Turn on register retention */ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); @@ -609,18 +736,16 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) return ret; } - image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data; - - for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++) - gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i, - image[i]); + ret = a6xx_gmu_fw_load(gmu); + if (ret) + return ret; } gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0); gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02); /* Write the iova of the HFI table */ - gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova); + gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, @@ -633,6 +758,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid); + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, + gmu->log.iova | (gmu->log.size / SZ_4K - 1)); + /* Set up the lowest idle level on the GMU */ a6xx_gmu_power_config(gmu); @@ -640,9 +768,11 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) if (ret) return ret; - ret = a6xx_gmu_gfx_rail_on(gmu); - if (ret) - return ret; + if (gmu->legacy) { + ret = a6xx_gmu_gfx_rail_on(gmu); + if (ret) + return ret; + } /* Enable SPTP_PC if the CPU is responsible for it */ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { @@ -683,13 +813,13 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) u32 val; /* Make sure there are no outstanding RPMh votes */ - gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val, (val & 1), 100, 10000); - gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val, (val & 1), 100, 10000); - gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val, (val & 1), 100, 10000); - gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, + gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val, (val & 1), 100, 1000); } @@ -744,6 +874,13 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? GMU_WARM_BOOT : GMU_COLD_BOOT; + /* + * Warm boot path does not work on newer GPUs + * Presumably this is because icache/dcache regions must be restored + */ + if (!gmu->legacy) + status = GMU_COLD_BOOT; + ret = a6xx_gmu_fw_start(gmu, status); if (ret) goto out; @@ -761,7 +898,10 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) enable_irq(gmu->hfi_irq); /* Set the GPU to the current freq */ - __a6xx_gmu_set_freq(gmu, gmu->current_perf_index); + if (gmu->legacy) + __a6xx_gmu_set_freq(gmu, gmu->current_perf_index); + else + a6xx_hfi_set_freq(gmu, gmu->current_perf_index); /* * "enable" the GX power domain which won't actually do anything but it @@ -919,34 +1059,75 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) return 0; } -static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo) +static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) { - if (IS_ERR_OR_NULL(bo)) - return; - - dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova); - kfree(bo); + msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false); + msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false); + msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false); + msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false); + msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false); + msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false); + + gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); + msm_gem_address_space_put(gmu->aspace); } -static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, - size_t size) +static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, + size_t size, u64 iova) { - struct a6xx_gmu_bo *bo; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct drm_device *dev = a6xx_gpu->base.base.dev; + uint32_t flags = MSM_BO_WC; + u64 range_start, range_end; + int ret; - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return ERR_PTR(-ENOMEM); + size = PAGE_ALIGN(size); + if (!iova) { + /* no fixed address - use GMU's uncached range */ + range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */ + range_end = 0x80000000; + } else { + /* range for fixed address */ + range_start = iova; + range_end = iova + size; + /* use IOMMU_PRIV for icache/dcache */ + flags |= MSM_BO_MAP_PRIV; + } - bo->size = PAGE_ALIGN(size); + bo->obj = msm_gem_new(dev, size, flags); + if (IS_ERR(bo->obj)) + return PTR_ERR(bo->obj); - bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL); + ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, + range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT); + if (ret) { + drm_gem_object_put(bo->obj); + return ret; + } + + bo->virt = msm_gem_get_vaddr(bo->obj); + bo->size = size; + + return 0; +} + +static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) +{ + struct iommu_domain *domain; + struct msm_mmu *mmu; - if (!bo->virt) { - kfree(bo); - return ERR_PTR(-ENOMEM); + domain = iommu_domain_alloc(&platform_bus_type); + if (!domain) + return -ENODEV; + + mmu = msm_iommu_new(gmu->dev, domain); + gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff); + if (IS_ERR(gmu->aspace)) { + iommu_domain_free(domain); + return PTR_ERR(gmu->aspace); } - return bo; + return 0; } /* Return the 'arc-level' for the given frequency */ @@ -1011,8 +1192,8 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, if (j == pri_count) { DRM_DEV_ERROR(dev, - "Level %u not found in in the RPMh list\n", - level); + "Level %u not found in the RPMh list\n", + level); DRM_DEV_ERROR(dev, "Available levels:\n"); for (j = 0; j < pri_count; j++) DRM_DEV_ERROR(dev, " %u\n", pri[j]); @@ -1190,6 +1371,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) { struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + struct platform_device *pdev = to_platform_device(gmu->dev); if (!gmu->initialized) return; @@ -1202,9 +1384,12 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) } iounmap(gmu->mmio); + if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) + iounmap(gmu->rscc); gmu->mmio = NULL; + gmu->rscc = NULL; - a6xx_gmu_memory_free(gmu, gmu->hfi); + a6xx_gmu_memory_free(gmu); free_irq(gmu->gmu_irq, gmu); free_irq(gmu->hfi_irq, gmu); @@ -1217,6 +1402,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; struct platform_device *pdev = of_find_device_by_node(node); int ret; @@ -1226,15 +1412,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) gmu->dev = &pdev->dev; - /* Pass force_dma false to require the DT to set the dma region */ - ret = of_dma_configure(gmu->dev, node, false); - if (ret) - return ret; - - /* Set the mask after the of_dma_configure() */ - ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31)); - if (ret) - return ret; + of_dma_configure(gmu->dev, node, true); /* Fow now, don't do anything fancy until we get our feet under us */ gmu->idle_level = GMU_IDLE_STATE_ACTIVE; @@ -1246,20 +1424,64 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (ret) goto err_put_device; + ret = a6xx_gmu_memory_probe(gmu); + if (ret) + goto err_put_device; + + /* Allocate memory for the GMU dummy page */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000); + if (ret) + goto err_memory; + + if (adreno_is_a650(adreno_gpu)) { + ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, + SZ_16M - SZ_16K, 0x04000); + if (ret) + goto err_memory; + } else if (adreno_is_a640(adreno_gpu)) { + ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, + SZ_256K - SZ_16K, 0x04000); + if (ret) + goto err_memory; + + ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache, + SZ_256K - SZ_16K, 0x44000); + if (ret) + goto err_memory; + } else { + /* HFI v1, has sptprac */ + gmu->legacy = true; + + /* Allocate memory for the GMU debug region */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0); + if (ret) + goto err_memory; + } + /* Allocate memory for for the HFI queues */ - gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); - if (IS_ERR(gmu->hfi)) + ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0); + if (ret) goto err_memory; - /* Allocate memory for the GMU debug region */ - gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K); - if (IS_ERR(gmu->debug)) + /* Allocate memory for the GMU log region */ + ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0); + if (ret) goto err_memory; /* Map the GMU registers */ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); - if (IS_ERR(gmu->mmio)) + if (IS_ERR(gmu->mmio)) { + ret = PTR_ERR(gmu->mmio); goto err_memory; + } + + if (adreno_is_a650(adreno_gpu)) { + gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); + if (IS_ERR(gmu->rscc)) + goto err_mmio; + } else { + gmu->rscc = gmu->mmio + 0x23000; + } /* Get the HFI and GMU interrupts */ gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); @@ -1286,13 +1508,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) err_mmio: iounmap(gmu->mmio); + if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) + iounmap(gmu->rscc); free_irq(gmu->gmu_irq, gmu); free_irq(gmu->hfi_irq, gmu); -err_memory: - a6xx_gmu_memory_free(gmu, gmu->hfi); ret = -ENODEV; +err_memory: + a6xx_gmu_memory_free(gmu); err_put_device: /* Drop reference taken in of_find_device_by_node */ put_device(gmu->dev); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 4af65a36d5ca..47df4745db50 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -10,9 +10,10 @@ #include "a6xx_hfi.h" struct a6xx_gmu_bo { + struct drm_gem_object *obj; void *virt; size_t size; - dma_addr_t iova; + u64 iova; }; /* @@ -43,7 +44,10 @@ struct a6xx_gmu_bo { struct a6xx_gmu { struct device *dev; + struct msm_gem_address_space *aspace; + void * __iomem mmio; + void * __iomem rscc; int hfi_irq; int gmu_irq; @@ -52,8 +56,12 @@ struct a6xx_gmu { int idle_level; - struct a6xx_gmu_bo *hfi; - struct a6xx_gmu_bo *debug; + struct a6xx_gmu_bo hfi; + struct a6xx_gmu_bo debug; + struct a6xx_gmu_bo icache; + struct a6xx_gmu_bo dcache; + struct a6xx_gmu_bo dummy; + struct a6xx_gmu_bo log; int nr_clocks; struct clk_bulk_data *clocks; @@ -76,6 +84,7 @@ struct a6xx_gmu { bool initialized; bool hung; + bool legacy; /* a618 or a630 */ }; static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) @@ -88,6 +97,13 @@ static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value) return msm_writel(value, gmu->mmio + (offset << 2)); } +static inline void +gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size) +{ + memcpy_toio(gmu->mmio + (offset << 2), data, size); + wmb(); +} + static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or) { u32 val = gmu_read(gmu, reg); @@ -111,6 +127,15 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi) readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \ interval, timeout) +static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value) +{ + return msm_writel(value, gmu->rscc + (offset << 2)); +} + +#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \ + readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \ + interval, timeout) + /* * These are the available OOB (out of band requests) to the GMU where "out of * band" means that the CPU talks to the GMU directly and not through HFI. @@ -156,10 +181,16 @@ enum a6xx_gmu_oob_state { #define GMU_OOB_GPU_SET_ACK 24 #define GMU_OOB_GPU_SET_CLEAR 24 +#define GMU_OOB_GPU_SET_REQUEST_NEW 30 +#define GMU_OOB_GPU_SET_ACK_NEW 31 +#define GMU_OOB_GPU_SET_CLEAR_NEW 31 + void a6xx_hfi_init(struct a6xx_gmu *gmu); int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state); void a6xx_hfi_stop(struct a6xx_gmu *gmu); +int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu); +int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index); bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu); bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h index 1cc1c135236b..176ae94d9fe6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h @@ -101,6 +101,10 @@ static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val) #define REG_A6XX_GMU_DCVS_RETURN 0x000023ff +#define REG_A6XX_GMU_ICACHE_CONFIG 0x00004c00 + +#define REG_A6XX_GMU_DCACHE_CONFIG 0x00004c01 + #define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f #define REG_A6XX_GMU_CM3_SYSRESET 0x00005000 @@ -199,6 +203,12 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val) #define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec +#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0 + +#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100 + +#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101 + #define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0 #define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157 @@ -330,8 +340,6 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val) #define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316 -#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00008c04 - #define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307 #define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308 @@ -344,39 +352,41 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val) #define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42 -#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00008c08 +#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004 + +#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008 -#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00008c09 +#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00000009 -#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x00008c0a +#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x0000000a -#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x00008c0b +#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x0000000b -#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x00008c0d +#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x0000000d -#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x00008c0e +#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x0000000e -#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00008c82 +#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00000082 -#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00008c83 +#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00000083 -#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00008c89 +#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00000089 -#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x00008c8c +#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x0000008c -#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00008d00 +#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00000100 -#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00008d01 +#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101 -#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00008d80 +#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180 -#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00008f46 +#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346 -#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000090ae +#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000003ee -#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00009216 +#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00000496 -#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000937e +#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e #endif /* A6XX_GMU_XML */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 68af24150de5..a1589e040c57 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -414,7 +414,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu) a6xx_set_hwcg(gpu, true); /* VBIF/GBIF start*/ - gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); + if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620); + gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620); + gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620); + gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); + gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620); + gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3); + } else { + gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3); + } + if (adreno_is_a630(adreno_gpu)) gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009); @@ -429,25 +439,35 @@ static int a6xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000); gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff); - /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ - gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, - REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000); + if (!adreno_is_a650(adreno_gpu)) { + /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ + gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO, + REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000); - gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO, - REG_A6XX_UCHE_GMEM_RANGE_MAX_HI, - 0x00100000 + adreno_gpu->gmem - 1); + gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO, + REG_A6XX_UCHE_GMEM_RANGE_MAX_HI, + 0x00100000 + adreno_gpu->gmem - 1); + } gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804); gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4); - gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); + if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) + gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140); + else + gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0); gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c); /* Setting the mem pool size */ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128); /* Setting the primFifo thresholds default values */ - gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11)); + if (adreno_is_a650(adreno_gpu)) + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300000); + else if (adreno_is_a640(adreno_gpu)) + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200000); + else + gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11)); /* Set the AHB default slave response to "ERROR" */ gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1); @@ -471,6 +491,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1); + /* Set weights for bicubic filtering */ + if (adreno_is_a650(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0); + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1, + 0x3fe05ff4); + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2, + 0x3fa0ebee); + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3, + 0x3f5193ed); + gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4, + 0x3f0243f0); + } + /* Protect registers from the CP */ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003); @@ -508,6 +541,11 @@ static int a6xx_hw_init(struct msm_gpu *gpu) A6XX_PROTECT_RDONLY(0x980, 0x4)); gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0)); + if (adreno_is_a650(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL, + (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1)); + } + /* Enable interrupts */ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK); @@ -566,8 +604,10 @@ out: */ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); - /* Take the GMU out of its special boot mode */ - a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); + if (a6xx_gpu->gmu.legacy) { + /* Take the GMU out of its special boot mode */ + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER); + } return ret; } @@ -810,6 +850,11 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); u64 busy_cycles, busy_time; + + /* Only read the gpu busy if the hardware is already active */ + if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0) + return 0; + busy_cycles = gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H); @@ -819,6 +864,8 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) gpu->devfreq.busy_cycles = busy_cycles; + pm_runtime_put(a6xx_gpu->gmu.dev); + if (WARN_ON(busy_time > ~0LU)) return ~0LU; @@ -846,6 +893,7 @@ static const struct adreno_gpu_funcs funcs = { #if defined(CONFIG_DRM_MSM_GPU_STATE) .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, + .create_address_space = adreno_iommu_create_address_space, #endif }, .get_timestamp = a6xx_get_timestamp, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c index e450e0b97211..9921e632f1ca 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -17,10 +17,14 @@ static const char * const a6xx_hfi_msg_id[] = { HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE), HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE), HFI_MSG_ID(HFI_H2F_MSG_TEST), + HFI_MSG_ID(HFI_H2F_MSG_START), + HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START), + HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE), + HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER), }; -static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data, - u32 dwords) +static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, + struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) { struct a6xx_hfi_queue_header *header = queue->header; u32 i, hdr, index = header->read_index; @@ -48,6 +52,9 @@ static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data, index = (index + 1) % header->size; } + if (!gmu->legacy) + index = ALIGN(index, 4) % header->size; + header->read_index = index; return HFI_HEADER_SIZE(hdr); } @@ -73,6 +80,12 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, index = (index + 1) % header->size; } + /* Cookify any non used data at the end of the write buffer */ + if (!gmu->legacy) { + for (; index % 4; index = (index + 1) % header->size) + queue->data[index] = 0xfafafafa; + } + header->write_index = index; spin_unlock(&queue->lock); @@ -106,7 +119,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, struct a6xx_hfi_msg_response resp; /* Get the next packet */ - ret = a6xx_hfi_queue_read(queue, (u32 *) &resp, + ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp, sizeof(resp) >> 2); /* If the queue is empty our response never made it */ @@ -176,8 +189,8 @@ static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) { struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 }; - msg.dbg_buffer_addr = (u32) gmu->debug->iova; - msg.dbg_buffer_size = (u32) gmu->debug->size; + msg.dbg_buffer_addr = (u32) gmu->debug.iova; + msg.dbg_buffer_size = (u32) gmu->debug.size; msg.boot_state = boot_state; return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg), @@ -195,6 +208,28 @@ static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version) version, sizeof(*version)); } +static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_perf_table_v1 msg = { 0 }; + int i; + + msg.num_gpu_levels = gmu->nr_gpu_freqs; + msg.num_gmu_levels = gmu->nr_gmu_freqs; + + for (i = 0; i < gmu->nr_gpu_freqs; i++) { + msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; + msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; + } + + for (i = 0; i < gmu->nr_gmu_freqs; i++) { + msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; + msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; + } + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), + NULL, 0); +} + static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) { struct a6xx_hfi_msg_perf_table msg = { 0 }; @@ -205,6 +240,7 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) for (i = 0; i < gmu->nr_gpu_freqs; i++) { msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; + msg.gx_votes[i].acd = 0xffffffff; msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; } @@ -306,7 +342,45 @@ static int a6xx_hfi_send_test(struct a6xx_gmu *gmu) NULL, 0); } -int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) +static int a6xx_hfi_send_start(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_start msg = { 0 }; + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg), + NULL, 0); +} + +static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_core_fw_start msg = { 0 }; + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg, + sizeof(msg), NULL, 0); +} + +int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index) +{ + struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 }; + + msg.ack_type = 1; /* blocking */ + msg.freq = index; + msg.bw = 0; /* TODO: bus scaling */ + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg, + sizeof(msg), NULL, 0); +} + +int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_prep_slumber_cmd msg = { 0 }; + + /* TODO: should freq and bw fields be non-zero ? */ + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg, + sizeof(msg), NULL, 0); +} + +static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state) { int ret; @@ -324,7 +398,7 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) * the GMU firmware */ - ret = a6xx_hfi_send_perf_table(gmu); + ret = a6xx_hfi_send_perf_table_v1(gmu); if (ret) return ret; @@ -341,6 +415,37 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) return 0; } +int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) +{ + int ret; + + if (gmu->legacy) + return a6xx_hfi_start_v1(gmu, boot_state); + + + ret = a6xx_hfi_send_perf_table(gmu); + if (ret) + return ret; + + ret = a6xx_hfi_send_bw_table(gmu); + if (ret) + return ret; + + ret = a6xx_hfi_send_core_fw_start(gmu); + if (ret) + return ret; + + /* + * Downstream driver sends this in its "a6xx_hw_init" equivalent, + * but seems to be no harm in sending it here + */ + ret = a6xx_hfi_send_start(gmu); + if (ret) + return ret; + + return 0; +} + void a6xx_hfi_stop(struct a6xx_gmu *gmu) { int i; @@ -385,7 +490,7 @@ static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue, void a6xx_hfi_init(struct a6xx_gmu *gmu) { - struct a6xx_gmu_bo *hfi = gmu->hfi; + struct a6xx_gmu_bo *hfi = &gmu->hfi; struct a6xx_hfi_queue_table_header *table = hfi->virt; struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table); u64 offset; @@ -415,5 +520,5 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu) /* GMU response queue */ offset += SZ_4K; a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, - hfi->iova + offset, 4); + hfi->iova + offset, gmu->legacy ? 4 : 1); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h index 60d1319fa44f..2bd670ca42d6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h @@ -51,7 +51,8 @@ struct a6xx_hfi_queue { /* HFI message types */ #define HFI_MSG_CMD 0 -#define HFI_MSG_ACK 2 +#define HFI_MSG_ACK 1 +#define HFI_MSG_ACK_V1 2 #define HFI_F2H_MSG_ACK 126 @@ -94,7 +95,13 @@ struct perf_level { u32 freq; }; -struct a6xx_hfi_msg_perf_table { +struct perf_gx_level { + u32 vote; + u32 acd; + u32 freq; +}; + +struct a6xx_hfi_msg_perf_table_v1 { u32 header; u32 num_gpu_levels; u32 num_gmu_levels; @@ -103,6 +110,15 @@ struct a6xx_hfi_msg_perf_table { struct perf_level cx_votes[4]; }; +struct a6xx_hfi_msg_perf_table { + u32 header; + u32 num_gpu_levels; + u32 num_gmu_levels; + + struct perf_gx_level gx_votes[16]; + struct perf_level cx_votes[4]; +}; + #define HFI_H2F_MSG_BW_TABLE 3 struct a6xx_hfi_msg_bw_table { @@ -124,4 +140,34 @@ struct a6xx_hfi_msg_test { u32 header; }; +#define HFI_H2F_MSG_START 10 + +struct a6xx_hfi_msg_start { + u32 header; +}; + +#define HFI_H2F_MSG_CORE_FW_START 14 + +struct a6xx_hfi_msg_core_fw_start { + u32 header; + u32 handle; +}; + +#define HFI_H2F_MSG_GX_BW_PERF_VOTE 30 + +struct a6xx_hfi_gx_bw_perf_vote_cmd { + u32 header; + u32 ack_type; + u32 freq; + u32 bw; +}; + +#define HFI_H2F_MSG_PREPARE_SLUMBER 33 + +struct a6xx_hfi_prep_slumber_cmd { + u32 header; + u32 bw; + u32 freq; +}; + #endif diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index cb3a6e597d76..7732f03d9e3a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -93,6 +93,17 @@ static const struct adreno_info gpulist[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a3xx_gpu_init, }, { + .rev = ADRENO_REV(4, 0, 5, ANY_ID), + .revn = 405, + .name = "A405", + .fw = { + [ADRENO_FW_PM4] = "a420_pm4.fw", + [ADRENO_FW_PFP] = "a420_pfp.fw", + }, + .gmem = SZ_256K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a4xx_gpu_init, + }, { .rev = ADRENO_REV(4, 2, 0, ANY_ID), .revn = 420, .name = "A420", @@ -189,6 +200,30 @@ static const struct adreno_info gpulist[] = { .inactive_period = DRM_MSM_INACTIVE_PERIOD, .init = a6xx_gpu_init, .zapfw = "a630_zap.mdt", + }, { + .rev = ADRENO_REV(6, 4, 0, ANY_ID), + .revn = 640, + .name = "A640", + .fw = { + [ADRENO_FW_SQE] = "a630_sqe.fw", + [ADRENO_FW_GMU] = "a640_gmu.bin", + }, + .gmem = SZ_1M, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a640_zap.mdt", + }, { + .rev = ADRENO_REV(6, 5, 0, ANY_ID), + .revn = 650, + .name = "A650", + .fw = { + [ADRENO_FW_SQE] = "a650_sqe.fw", + [ADRENO_FW_GMU] = "a650_gmu.bin", + }, + .gmem = SZ_1M + SZ_128K, + .inactive_period = DRM_MSM_INACTIVE_PERIOD, + .init = a6xx_gpu_init, + .zapfw = "a650_zap.mdt", }, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 1d5c43c22269..89673c7ed473 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -185,6 +185,23 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); } +struct msm_gem_address_space * +adreno_iommu_create_address_space(struct msm_gpu *gpu, + struct platform_device *pdev) +{ + struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type); + struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu); + struct msm_gem_address_space *aspace; + + aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M, + 0xfffffff); + + if (IS_ERR(aspace) && !IS_ERR(mmu)) + mmu->funcs->destroy(mmu); + + return aspace; +} + int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -197,7 +214,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) *value = adreno_gpu->gmem; return 0; case MSM_PARAM_GMEM_BASE: - *value = 0x100000; + *value = !adreno_is_a650(adreno_gpu) ? 0x100000 : 0; return 0; case MSM_PARAM_CHIP_ID: *value = adreno_gpu->rev.patchid | @@ -459,7 +476,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, break; /* fall-thru */ case MSM_SUBMIT_CMD_BUF: - OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? + OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, submit->cmd[i].size); @@ -988,12 +1005,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; - adreno_gpu_config.va_start = SZ_16M; - adreno_gpu_config.va_end = 0xffffffff; - /* maximum range of a2xx mmu */ - if (adreno_is_a2xx(adreno_gpu)) - adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K; - adreno_gpu_config.nr_rings = nr_rings; adreno_get_pwrlevels(&pdev->dev, gpu); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 9ff4e550e7bd..2f5d2c3acc3a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -202,6 +202,11 @@ static inline bool adreno_is_a4xx(struct adreno_gpu *gpu) return (gpu->revn >= 400) && (gpu->revn < 500); } +static inline int adreno_is_a405(struct adreno_gpu *gpu) +{ + return gpu->revn == 405; +} + static inline int adreno_is_a420(struct adreno_gpu *gpu) { return gpu->revn == 420; @@ -237,6 +242,16 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu) return gpu->revn == 630; } +static inline int adreno_is_a640(struct adreno_gpu *gpu) +{ + return gpu->revn == 640; +} + +static inline int adreno_is_a650(struct adreno_gpu *gpu) +{ + return gpu->revn == 650; +} + int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname); @@ -273,6 +288,14 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state); int adreno_gpu_state_put(struct msm_gpu_state *state); /* + * Common helper function to initialize the default address space for arm-smmu + * attached targets + */ +struct msm_gem_address_space * +adreno_iommu_create_address_space(struct msm_gpu *gpu, + struct platform_device *pdev); + +/* * For a5xx and a6xx targets load the zap shader that is used to pull the GPU * out of secure mode */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index 11f2bebe3869..7c230f719ad3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -36,22 +36,6 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) return to_dpu_kms(priv->kms); } -static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc) -{ - struct drm_crtc *tmp_crtc; - - drm_for_each_crtc(tmp_crtc, crtc->dev) { - if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) && - tmp_crtc->enabled) { - DPU_DEBUG("video interface connected crtc:%d\n", - tmp_crtc->base.id); - return true; - } - } - - return false; -} - static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms, struct drm_crtc *crtc, struct drm_crtc_state *state, @@ -94,7 +78,6 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, u32 bw, threshold; u64 bw_sum_of_intfs = 0; enum dpu_crtc_client_type curr_client_type; - bool is_video_mode; struct dpu_crtc_state *dpu_cstate; struct drm_crtc *tmp_crtc; struct dpu_kms *kms; @@ -144,11 +127,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc, bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000); DPU_DEBUG("calculated bandwidth=%uk\n", bw); - is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO; - threshold = (is_video_mode || - _dpu_core_video_mode_intf_connected(crtc)) ? - kms->catalog->perf.max_bw_low : - kms->catalog->perf.max_bw_high; + threshold = kms->catalog->perf.max_bw_high; DPU_DEBUG("final threshold bw limit = %d\n", threshold); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 17448505a9b5..e15b42a780e0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -9,6 +9,7 @@ #include <linux/sort.h> #include <linux/debugfs.h> #include <linux/ktime.h> +#include <linux/bits.h> #include <drm/drm_crtc.h> #include <drm/drm_flip_work.h> @@ -20,6 +21,7 @@ #include "dpu_kms.h" #include "dpu_hw_lm.h" #include "dpu_hw_ctl.h" +#include "dpu_hw_dspp.h" #include "dpu_crtc.h" #include "dpu_plane.h" #include "dpu_encoder.h" @@ -40,6 +42,9 @@ /* timeout in ms waiting for frame done */ #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60 +#define CONVERT_S3_15(val) \ + (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0)) + static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc) { struct msm_drm_private *priv = crtc->dev->dev_private; @@ -88,11 +93,9 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer, static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc) { - struct dpu_crtc *dpu_crtc; struct dpu_crtc_state *crtc_state; int lm_idx, lm_horiz_position; - dpu_crtc = to_dpu_crtc(crtc); crtc_state = to_dpu_crtc_state(crtc->state); lm_horiz_position = 0; @@ -422,6 +425,74 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc, drm_mode_debug_printmodeline(adj_mode); } +static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state, + struct dpu_hw_pcc_cfg *cfg) +{ + struct drm_color_ctm *ctm; + + memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg)); + + ctm = (struct drm_color_ctm *)state->ctm->data; + + if (!ctm) + return; + + cfg->r.r = CONVERT_S3_15(ctm->matrix[0]); + cfg->g.r = CONVERT_S3_15(ctm->matrix[1]); + cfg->b.r = CONVERT_S3_15(ctm->matrix[2]); + + cfg->r.g = CONVERT_S3_15(ctm->matrix[3]); + cfg->g.g = CONVERT_S3_15(ctm->matrix[4]); + cfg->b.g = CONVERT_S3_15(ctm->matrix[5]); + + cfg->r.b = CONVERT_S3_15(ctm->matrix[6]); + cfg->g.b = CONVERT_S3_15(ctm->matrix[7]); + cfg->b.b = CONVERT_S3_15(ctm->matrix[8]); +} + +static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) +{ + struct drm_crtc_state *state = crtc->state; + struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state); + struct dpu_crtc_mixer *mixer = cstate->mixers; + struct dpu_hw_pcc_cfg cfg; + struct dpu_hw_ctl *ctl; + struct dpu_hw_mixer *lm; + struct dpu_hw_dspp *dspp; + int i; + + + if (!state->color_mgmt_changed) + return; + + for (i = 0; i < cstate->num_mixers; i++) { + ctl = mixer[i].lm_ctl; + lm = mixer[i].hw_lm; + dspp = mixer[i].hw_dspp; + + if (!dspp || !dspp->ops.setup_pcc) + continue; + + if (!state->ctm) { + dspp->ops.setup_pcc(dspp, NULL); + } else { + _dpu_crtc_get_pcc_coeff(state, &cfg); + dspp->ops.setup_pcc(dspp, &cfg); + } + + mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl, + mixer[i].hw_dspp->idx); + + /* stage config flush mask */ + ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); + + DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n", + mixer[i].hw_lm->idx - DSPP_0, + ctl->idx - CTL_0, + mixer[i].flush_mask); + } +} + static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -430,7 +501,6 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_encoder *encoder; struct drm_device *dev; unsigned long flags; - struct dpu_crtc_smmu_state_data *smmu_state; if (!crtc) { DPU_ERROR("invalid crtc\n"); @@ -448,7 +518,6 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, dpu_crtc = to_dpu_crtc(crtc); cstate = to_dpu_crtc_state(crtc->state); dev = crtc->dev; - smmu_state = &dpu_crtc->smmu_state; _dpu_crtc_setup_lm_bounds(crtc, crtc->state); @@ -475,6 +544,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc, _dpu_crtc_blend_setup(crtc); + _dpu_crtc_setup_cp_blocks(crtc); + /* * PP_DONE irq is only used by command mode for now. * It is better to request pending before FLUSH and START trigger @@ -491,7 +562,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_device *dev; struct drm_plane *plane; struct msm_drm_private *priv; - struct msm_drm_thread *event_thread; unsigned long flags; struct dpu_crtc_state *cstate; @@ -513,8 +583,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, return; } - event_thread = &priv->event_thread[crtc->index]; - if (dpu_crtc->event) { DPU_DEBUG("already received dpu_crtc->event\n"); } else { @@ -567,7 +635,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc, static void dpu_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct dpu_crtc *dpu_crtc; struct dpu_crtc_state *cstate; if (!crtc || !state) { @@ -575,7 +642,6 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc, return; } - dpu_crtc = to_dpu_crtc(crtc); cstate = to_dpu_crtc_state(state); DPU_DEBUG("crtc%d\n", crtc->base.id); @@ -662,11 +728,9 @@ static void dpu_crtc_reset(struct drm_crtc *crtc) /** * dpu_crtc_duplicate_state - state duplicate hook * @crtc: Pointer to drm crtc structure - * @Returns: Pointer to new drm_crtc_state structure */ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) { - struct dpu_crtc *dpu_crtc; struct dpu_crtc_state *cstate, *old_cstate; if (!crtc || !crtc->state) { @@ -674,7 +738,6 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc) return NULL; } - dpu_crtc = to_dpu_crtc(crtc); old_cstate = to_dpu_crtc_state(crtc->state); cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL); if (!cstate) { @@ -693,9 +756,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, { struct dpu_crtc *dpu_crtc; struct dpu_crtc_state *cstate; - struct drm_display_mode *mode; struct drm_encoder *encoder; - struct msm_drm_private *priv; unsigned long flags; bool release_bandwidth = false; @@ -705,8 +766,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc, } dpu_crtc = to_dpu_crtc(crtc); cstate = to_dpu_crtc_state(crtc->state); - mode = &cstate->base.adjusted_mode; - priv = crtc->dev->dev_private; DRM_DEBUG_KMS("crtc%d\n", crtc->base.id); @@ -768,14 +827,12 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, { struct dpu_crtc *dpu_crtc; struct drm_encoder *encoder; - struct msm_drm_private *priv; bool request_bandwidth; if (!crtc) { DPU_ERROR("invalid crtc\n"); return; } - priv = crtc->dev->dev_private; pm_runtime_get_sync(crtc->dev->dev); @@ -1319,6 +1376,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane, drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs); + drm_crtc_enable_color_mgmt(crtc, 0, true, 0); + /* save user friendly CRTC name for later */ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index 5174e86124cc..cec3474340e8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -73,12 +73,14 @@ struct dpu_crtc_smmu_state_data { * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC * @hw_lm: LM HW Driver context * @lm_ctl: CTL Path HW driver context + * @lm_dspp: DSPP HW driver context * @mixer_op_mode: mixer blending operation mode * @flush_mask: mixer flush mask for ctl, mixer and pipe */ struct dpu_crtc_mixer { struct dpu_hw_mixer *hw_lm; struct dpu_hw_ctl *lm_ctl; + struct dpu_hw_dspp *hw_dspp; u32 mixer_op_mode; u32 flush_mask; }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index a1b79ee2bd9d..63976dcd2ac8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -20,6 +20,7 @@ #include "dpu_hw_catalog.h" #include "dpu_hw_intf.h" #include "dpu_hw_ctl.h" +#include "dpu_hw_dspp.h" #include "dpu_formats.h" #include "dpu_encoder_phys.h" #include "dpu_crtc.h" @@ -536,6 +537,7 @@ static struct msm_display_topology dpu_encoder_get_topology( * 1 LM, 1 INTF * 2 LM, 1 INTF (stream merge to support high resolution interfaces) * + * Adding color blocks only to primary interface */ if (intf_count == 2) topology.num_lm = 2; @@ -544,6 +546,9 @@ static struct msm_display_topology dpu_encoder_get_topology( else topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) + topology.num_dspp = topology.num_lm; + topology.num_enc = 0; topology.num_intf = intf_count; @@ -959,7 +964,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC]; struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; - int num_lm, num_ctl, num_pp; + struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL }; + int num_lm, num_ctl, num_pp, num_dspp; int i, j; if (!drm_enc) { @@ -1008,6 +1014,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl)); num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm)); + num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp, + ARRAY_SIZE(hw_dspp)); for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) @@ -1020,6 +1029,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]); cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); + cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]); } cstate->num_mixers = num_lm; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index c567917541e8..29d4fde3172b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -41,6 +41,8 @@ #define PINGPONG_SDM845_SPLIT_MASK \ (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2)) +#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC) + #define DEFAULT_PIXEL_RAM_SIZE (50 * 1024) #define DEFAULT_DPU_LINE_WIDTH 2048 #define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560 @@ -291,29 +293,30 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = { }, }; -#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \ +#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair, _dspp) \ { \ .name = _name, .id = _id, \ .base = _base, .len = 0x320, \ .features = _fmask, \ .sblk = _sblk, \ .pingpong = _pp, \ - .lm_pair_mask = (1 << _lmpair) \ + .lm_pair_mask = (1 << _lmpair), \ + .dspp = _dspp \ } static const struct dpu_lm_cfg sdm845_lm[] = { LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK, - &sdm845_lm_sblk, PINGPONG_0, LM_1), + &sdm845_lm_sblk, PINGPONG_0, LM_1, 0), LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK, - &sdm845_lm_sblk, PINGPONG_1, LM_0), + &sdm845_lm_sblk, PINGPONG_1, LM_0, 0), LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK, - &sdm845_lm_sblk, PINGPONG_2, LM_5), + &sdm845_lm_sblk, PINGPONG_2, LM_5, 0), LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK, - &sdm845_lm_sblk, PINGPONG_MAX, 0), + &sdm845_lm_sblk, PINGPONG_MAX, 0, 0), LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK, - &sdm845_lm_sblk, PINGPONG_MAX, 0), + &sdm845_lm_sblk, PINGPONG_MAX, 0, 0), LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK, - &sdm845_lm_sblk, PINGPONG_3, LM_2), + &sdm845_lm_sblk, PINGPONG_3, LM_2, 0), }; /* SC7180 */ @@ -328,11 +331,30 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = { static const struct dpu_lm_cfg sc7180_lm[] = { LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK, - &sc7180_lm_sblk, PINGPONG_0, LM_1), + &sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0), LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK, - &sc7180_lm_sblk, PINGPONG_1, LM_0), + &sc7180_lm_sblk, PINGPONG_1, LM_0, 0), +}; + +/************************************************************* + * DSPP sub blocks config + *************************************************************/ +static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = { + .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700, + .len = 0x90, .version = 0x10000}, }; +#define DSPP_BLK(_name, _id, _base) \ + {\ + .name = _name, .id = _id, \ + .base = _base, .len = 0x1800, \ + .features = DSPP_SC7180_MASK, \ + .sblk = &sc7180_dspp_sblk \ + } + +static const struct dpu_dspp_cfg sc7180_dspp[] = { + DSPP_BLK("dspp_0", DSPP_0, 0x54000), +}; /************************************************************* * PINGPONG sub blocks config *************************************************************/ @@ -515,8 +537,8 @@ static const struct dpu_perf_cfg sdm845_perf_data = { }; static const struct dpu_perf_cfg sc7180_perf_data = { - .max_bw_low = 3900000, - .max_bw_high = 5500000, + .max_bw_low = 6800000, + .max_bw_high = 6800000, .min_core_ib = 2400000, .min_llcc_ib = 800000, .min_dram_ib = 800000, @@ -587,6 +609,8 @@ static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg) .sspp = sc7180_sspp, .mixer_count = ARRAY_SIZE(sc7180_lm), .mixer = sc7180_lm, + .dspp_count = ARRAY_SIZE(sc7180_dspp), + .dspp = sc7180_dspp, .pingpong_count = ARRAY_SIZE(sc7180_pp), .pingpong = sc7180_pp, .intf_count = ARRAY_SIZE(sc7180_intf), diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 09df7d87dd43..f7de43838c69 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -146,6 +146,17 @@ enum { }; /** + * DSPP sub-blocks + * @DPU_DSPP_PCC Panel color correction block + * @DPU_DSPP_GC Gamma correction block + */ +enum { + DPU_DSPP_PCC = 0x1, + DPU_DSPP_GC, + DPU_DSPP_MAX +}; + +/** * PINGPONG sub-blocks * @DPU_PINGPONG_TE Tear check block * @DPU_PINGPONG_TE2 Additional tear check block for split pipes @@ -377,6 +388,16 @@ struct dpu_lm_sub_blks { struct dpu_pp_blk gc; }; +/** + * struct dpu_dspp_sub_blks: Information of DSPP block + * @gc : gamma correction block + * @pcc: pixel color correction block + */ +struct dpu_dspp_sub_blks { + struct dpu_pp_blk gc; + struct dpu_pp_blk pcc; +}; + struct dpu_pingpong_sub_blks { struct dpu_pp_blk te; struct dpu_pp_blk te2; @@ -471,10 +492,24 @@ struct dpu_lm_cfg { DPU_HW_BLK_INFO; const struct dpu_lm_sub_blks *sblk; u32 pingpong; + u32 dspp; unsigned long lm_pair_mask; }; /** + * struct dpu_dspp_cfg - information of DSPP blocks + * @id enum identifying this block + * @base register offset of this block + * @features bit mask identifying sub-blocks/features + * supported by this block + * @sblk sub-blocks information + */ +struct dpu_dspp_cfg { + DPU_HW_BLK_INFO; + const struct dpu_dspp_sub_blks *sblk; +}; + +/** * struct dpu_pingpong_cfg - information of PING-PONG blocks * @id enum identifying this block * @base register offset of this block @@ -688,6 +723,9 @@ struct dpu_mdss_cfg { u32 ad_count; + u32 dspp_count; + const struct dpu_dspp_cfg *dspp; + /* Add additional block data structures here */ struct dpu_perf_cfg perf; @@ -716,6 +754,7 @@ struct dpu_mdss_hw_cfg_handler { #define BLK_PINGPONG(s) ((s)->pingpong) #define BLK_INTF(s) ((s)->intf) #define BLK_AD(s) ((s)->ad) +#define BLK_DSPP(s) ((s)->dspp) /** * dpu_hw_catalog_init - dpu hardware catalog init API retrieves diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index 831e5f7a9b7f..613ae8f0cfcd 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -272,6 +272,31 @@ static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx, return 0; } +static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx, + enum dpu_dspp dspp) +{ + uint32_t flushbits = 0; + + switch (dspp) { + case DSPP_0: + flushbits = BIT(13); + break; + case DSPP_1: + flushbits = BIT(14); + break; + case DSPP_2: + flushbits = BIT(15); + break; + case DSPP_3: + flushbits = BIT(21); + break; + default: + return 0; + } + + return flushbits; +} + static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) { struct dpu_hw_blk_reg_map *c = &ctx->hw; @@ -548,6 +573,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, ops->setup_blendstage = dpu_hw_ctl_setup_blendstage; ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp; ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer; + ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp; }; static struct dpu_hw_blk_ops dpu_hw_ops; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index 09e1263c72e2..ec579b470a80 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -139,6 +139,9 @@ struct dpu_hw_ctl_ops { uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx, enum dpu_lm blk); + uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx, + enum dpu_dspp blk); + /** * Query the value of the intf flush mask * No effect on hardware diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c new file mode 100644 index 000000000000..a7a24539921f --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#include "dpu_hwio.h" +#include "dpu_hw_catalog.h" +#include "dpu_hw_lm.h" +#include "dpu_hw_dspp.h" +#include "dpu_kms.h" + + +/* DSPP_PCC */ +#define PCC_EN BIT(0) +#define PCC_DIS 0 +#define PCC_RED_R_OFF 0x10 +#define PCC_RED_G_OFF 0x1C +#define PCC_RED_B_OFF 0x28 +#define PCC_GREEN_R_OFF 0x14 +#define PCC_GREEN_G_OFF 0x20 +#define PCC_GREEN_B_OFF 0x2C +#define PCC_BLUE_R_OFF 0x18 +#define PCC_BLUE_G_OFF 0x24 +#define PCC_BLUE_B_OFF 0x30 + +static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx, + struct dpu_hw_pcc_cfg *cfg) +{ + + u32 base = ctx->cap->sblk->pcc.base; + + if (!ctx || !base) { + DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base); + return; + } + + if (!cfg) { + DRM_DEBUG_DRIVER("disable pcc feature\n"); + DPU_REG_WRITE(&ctx->hw, base, PCC_DIS); + return; + } + + DPU_REG_WRITE(&ctx->hw, base + PCC_RED_R_OFF, cfg->r.r); + DPU_REG_WRITE(&ctx->hw, base + PCC_RED_G_OFF, cfg->r.g); + DPU_REG_WRITE(&ctx->hw, base + PCC_RED_B_OFF, cfg->r.b); + + DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_R_OFF, cfg->g.r); + DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_G_OFF, cfg->g.g); + DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_B_OFF, cfg->g.b); + + DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_R_OFF, cfg->b.r); + DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_G_OFF, cfg->b.g); + DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_B_OFF, cfg->b.b); + + DPU_REG_WRITE(&ctx->hw, base, PCC_EN); +} + +static void _setup_dspp_ops(struct dpu_hw_dspp *c, + unsigned long features) +{ + if (test_bit(DPU_DSPP_PCC, &features) && + IS_SC7180_TARGET(c->hw.hwversion)) + c->ops.setup_pcc = dpu_setup_dspp_pcc; +} + +static const struct dpu_dspp_cfg *_dspp_offset(enum dpu_dspp dspp, + const struct dpu_mdss_cfg *m, + void __iomem *addr, + struct dpu_hw_blk_reg_map *b) +{ + int i; + + if (!m || !addr || !b) + return ERR_PTR(-EINVAL); + + for (i = 0; i < m->dspp_count; i++) { + if (dspp == m->dspp[i].id) { + b->base_off = addr; + b->blk_off = m->dspp[i].base; + b->length = m->dspp[i].len; + b->hwversion = m->hwversion; + b->log_mask = DPU_DBG_MASK_DSPP; + return &m->dspp[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +static struct dpu_hw_blk_ops dpu_hw_ops; + +struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx, + void __iomem *addr, + const struct dpu_mdss_cfg *m) +{ + struct dpu_hw_dspp *c; + const struct dpu_dspp_cfg *cfg; + + if (!addr || !m) + return ERR_PTR(-EINVAL); + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return ERR_PTR(-ENOMEM); + + cfg = _dspp_offset(idx, m, addr, &c->hw); + if (IS_ERR_OR_NULL(cfg)) { + kfree(c); + return ERR_PTR(-EINVAL); + } + + /* Assign ops */ + c->idx = idx; + c->cap = cfg; + _setup_dspp_ops(c, c->cap->features); + + dpu_hw_blk_init(&c->base, DPU_HW_BLK_DSPP, idx, &dpu_hw_ops); + + return c; +} + +void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp) +{ + if (dspp) + dpu_hw_blk_destroy(&dspp->base); + + kfree(dspp); +} + + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h new file mode 100644 index 000000000000..7fa189cfcb06 --- /dev/null +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + */ + +#ifndef _DPU_HW_DSPP_H +#define _DPU_HW_DSPP_H + +#include "dpu_hw_blk.h" + +struct dpu_hw_dspp; + +/** + * struct dpu_hw_pcc_coeff - PCC coefficient structure for each color + * component. + * @r: red coefficient. + * @g: green coefficient. + * @b: blue coefficient. + */ + +struct dpu_hw_pcc_coeff { + __u32 r; + __u32 g; + __u32 b; +}; + +/** + * struct dpu_hw_pcc - pcc feature structure + * @r: red coefficients. + * @g: green coefficients. + * @b: blue coefficients. + */ +struct dpu_hw_pcc_cfg { + struct dpu_hw_pcc_coeff r; + struct dpu_hw_pcc_coeff g; + struct dpu_hw_pcc_coeff b; +}; + +/** + * struct dpu_hw_dspp_ops - interface to the dspp hardware driver functions + * Caller must call the init function to get the dspp context for each dspp + * Assumption is these functions will be called after clocks are enabled + */ +struct dpu_hw_dspp_ops { + /** + * setup_pcc - setup dspp pcc + * @ctx: Pointer to dspp context + * @cfg: Pointer to configuration + */ + void (*setup_pcc)(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg); + +}; + +/** + * struct dpu_hw_dspp - dspp description + * @base: Hardware block base structure + * @hw: Block hardware details + * @idx: DSPP index + * @cap: Pointer to layer_cfg + * @ops: Pointer to operations possible for this DSPP + */ +struct dpu_hw_dspp { + struct dpu_hw_blk base; + struct dpu_hw_blk_reg_map hw; + + /* dspp */ + int idx; + const struct dpu_dspp_cfg *cap; + + /* Ops */ + struct dpu_hw_dspp_ops ops; +}; + +/** + * dpu_hw_dspp - convert base object dpu_hw_base to container + * @hw: Pointer to base hardware block + * return: Pointer to hardware block container + */ +static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw) +{ + return container_of(hw, struct dpu_hw_dspp, base); +} + +/** + * dpu_hw_dspp_init - initializes the dspp hw driver object. + * should be called once before accessing every dspp. + * @idx: DSPP index for which driver object is required + * @addr: Mapped register io address of MDP + * @Return: pointer to structure or ERR_PTR + */ +struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx, + void __iomem *addr, const struct dpu_mdss_cfg *m); + +/** + * dpu_hw_dspp_destroy(): Destroys DSPP driver context + * @dspp: Pointer to DSPP driver context + */ +void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp); + +#endif /*_DPU_HW_DSPP_H */ + diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index 686882132bf6..402dc5832361 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -95,6 +95,7 @@ enum dpu_hw_blk_type { DPU_HW_BLK_PINGPONG, DPU_HW_BLK_INTF, DPU_HW_BLK_WB, + DPU_HW_BLK_DSPP, DPU_HW_BLK_MAX, }; @@ -425,5 +426,6 @@ struct dpu_mdss_color { #define DPU_DBG_MASK_TOP (1 << 7) #define DPU_DBG_MASK_VBIF (1 << 8) #define DPU_DBG_MASK_ROT (1 << 9) +#define DPU_DBG_MASK_DSPP (1 << 10) #endif /* _DPU_HW_MDSS_H */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index ce19f1d39367..b8615d4fe8a3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -772,29 +772,21 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) { struct iommu_domain *domain; struct msm_gem_address_space *aspace; - int ret; + struct msm_mmu *mmu; domain = iommu_domain_alloc(&platform_bus_type); if (!domain) return 0; - domain->geometry.aperture_start = 0x1000; - domain->geometry.aperture_end = 0xffffffff; + mmu = msm_iommu_new(dpu_kms->dev->dev, domain); + aspace = msm_gem_address_space_create(mmu, "dpu1", + 0x1000, 0xfffffff); - aspace = msm_gem_address_space_create(dpu_kms->dev->dev, - domain, "dpu1"); if (IS_ERR(aspace)) { - iommu_domain_free(domain); + mmu->funcs->destroy(mmu); return PTR_ERR(aspace); } - ret = aspace->mmu->funcs->attach(aspace->mmu); - if (ret) { - DPU_ERROR("failed to attach iommu %d\n", ret); - msm_gem_address_space_put(aspace); - return ret; - } - dpu_kms->base.aspace = aspace; return 0; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 211f5de99a44..a3b122bfb676 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -158,6 +158,7 @@ struct dpu_global_state { uint32_t mixer_to_enc_id[LM_MAX - LM_0]; uint32_t ctl_to_enc_id[CTL_MAX - CTL_0]; uint32_t intf_to_enc_id[INTF_MAX - INTF_0]; + uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0]; }; struct dpu_global_state @@ -170,7 +171,7 @@ struct dpu_global_state * * Main debugfs documentation is located at, * - * Documentation/filesystems/debugfs.txt + * Documentation/filesystems/debugfs.rst * * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32 * @dpu_debugfs_create_regset32: Create 32-bit register dump file diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 9b62451b01ee..9b2b5044e8e0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -9,6 +9,7 @@ #include "dpu_hw_ctl.h" #include "dpu_hw_pingpong.h" #include "dpu_hw_intf.h" +#include "dpu_hw_dspp.h" #include "dpu_encoder.h" #include "dpu_trace.h" @@ -174,6 +175,23 @@ int dpu_rm_init(struct dpu_rm *rm, rm->ctl_blks[ctl->id - CTL_0] = &hw->base; } + for (i = 0; i < cat->dspp_count; i++) { + struct dpu_hw_dspp *hw; + const struct dpu_dspp_cfg *dspp = &cat->dspp[i]; + + if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) { + DPU_ERROR("skip dspp %d with invalid id\n", dspp->id); + continue; + } + hw = dpu_hw_dspp_init(dspp->id, mmio, cat); + if (IS_ERR_OR_NULL(hw)) { + rc = PTR_ERR(hw); + DPU_ERROR("failed dspp object creation: err %d\n", rc); + goto fail; + } + rm->dspp_blks[dspp->id - DSPP_0] = &hw->base; + } + return 0; fail: @@ -222,12 +240,17 @@ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, * if lm, and all other hardwired blocks connected to the lm (pp) is * available and appropriate * @pp_idx: output parameter, index of pingpong block attached to the layer - * mixer in rm->pongpong_blks[]. + * mixer in rm->pingpong_blks[]. + * @dspp_idx: output parameter, index of dspp block attached to the layer + * mixer in rm->dspp_blks[]. + * @reqs: input parameter, rm requirements for HW blocks needed in the + * datapath. * @Return: true if lm matches all requirements, false otherwise */ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, struct dpu_global_state *global_state, - uint32_t enc_id, int lm_idx, int *pp_idx) + uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx, + struct dpu_rm_requirements *reqs) { const struct dpu_lm_cfg *lm_cfg; int idx; @@ -251,6 +274,23 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, return false; } *pp_idx = idx; + + if (!reqs->topology.num_dspp) + return true; + + idx = lm_cfg->dspp - DSPP_0; + if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) { + DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp); + return false; + } + + if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) { + DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id, + lm_cfg->dspp); + return false; + } + *dspp_idx = idx; + return true; } @@ -262,6 +302,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, { int lm_idx[MAX_BLOCKS]; int pp_idx[MAX_BLOCKS]; + int dspp_idx[MAX_BLOCKS] = {0}; int i, j, lm_count = 0; if (!reqs->topology.num_lm) { @@ -279,7 +320,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, lm_idx[lm_count] = i; if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, - enc_id, i, &pp_idx[lm_count])) { + enc_id, i, &pp_idx[lm_count], + &dspp_idx[lm_count], reqs)) { continue; } @@ -299,7 +341,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, enc_id, j, - &pp_idx[lm_count])) { + &pp_idx[lm_count], &dspp_idx[lm_count], + reqs)) { continue; } @@ -316,6 +359,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm, for (i = 0; i < lm_count; i++) { global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; + global_state->dspp_to_enc_id[dspp_idx[i]] = + reqs->topology.num_dspp ? enc_id : 0; trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, pp_idx[i] + PINGPONG_0); @@ -560,6 +605,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm, hw_to_enc_id = global_state->intf_to_enc_id; max_blks = ARRAY_SIZE(rm->intf_blks); break; + case DPU_HW_BLK_DSPP: + hw_blks = rm->dspp_blks; + hw_to_enc_id = global_state->dspp_to_enc_id; + max_blks = ARRAY_SIZE(rm->dspp_blks); + break; default: DPU_ERROR("blk type %d not managed by rm\n", type); return 0; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h index 6d2b04f306f0..08726bb1063a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h @@ -19,6 +19,7 @@ struct dpu_global_state; * @mixer_blks: array of layer mixer hardware resources * @ctl_blks: array of ctl hardware resources * @intf_blks: array of intf hardware resources + * @dspp_blks: array of dspp hardware resources * @lm_max_width: cached layer mixer maximum width * @rm_lock: resource manager mutex */ @@ -27,6 +28,7 @@ struct dpu_rm { struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0]; struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0]; struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0]; + struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0]; uint32_t lm_max_width; }; diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index dda05436f716..08897184b1d9 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -510,18 +510,20 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) mdelay(16); if (config->iommu) { - aspace = msm_gem_address_space_create(&pdev->dev, - config->iommu, "mdp4"); + struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, + config->iommu); + + aspace = msm_gem_address_space_create(mmu, + "mdp4", 0x1000, 0xffffffff); + if (IS_ERR(aspace)) { + if (!IS_ERR(mmu)) + mmu->funcs->destroy(mmu); ret = PTR_ERR(aspace); goto fail; } kms->aspace = aspace; - - ret = aspace->mmu->funcs->attach(aspace->mmu); - if (ret) - goto fail; } else { DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys " "contig buffers for scanout\n"); @@ -569,10 +571,6 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */ config.max_clk = 266667000; config.iommu = iommu_domain_alloc(&platform_bus_type); - if (config.iommu) { - config.iommu->geometry.aperture_start = 0x1000; - config.iommu->geometry.aperture_end = 0xffffffff; - } return &config; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index e3c4c250238b..25a13a2a57a9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -342,6 +342,81 @@ static const struct mdp5_cfg_hw msm8x16_config = { .max_clk = 320000000, }; +static const struct mdp5_cfg_hw msm8x36_config = { + .name = "msm8x36", + .mdp = { + .count = 1, + .base = { 0x0 }, + .caps = MDP_CAP_SMP | + 0, + }, + .smp = { + .mmb_count = 8, + .mmb_size = 10240, + .clients = { + [SSPP_VIG0] = 1, [SSPP_DMA0] = 4, + [SSPP_RGB0] = 7, [SSPP_RGB1] = 8, + }, + }, + .ctl = { + .count = 3, + .base = { 0x01000, 0x01200, 0x01400 }, + .flush_hw_mask = 0x4003ffff, + }, + .pipe_vig = { + .count = 1, + .base = { 0x04000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, + }, + .lm = { + .count = 2, + .base = { 0x44000, 0x47000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + }, + .nb_stages = 8, + .max_width = 2560, + .max_height = 0xFFFF, + }, + .pp = { + .count = 1, + .base = { 0x70000 }, + }, + .ad = { + .count = 1, + .base = { 0x78000 }, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + }, + .intf = { + .base = { 0x00000, 0x6a800, 0x6b000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + }, + }, + .max_clk = 366670000, +}; + static const struct mdp5_cfg_hw msm8x94_config = { .name = "msm8x94", .mdp = { @@ -840,6 +915,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = { { .revision = 2, .config = { .hw = &msm8x74v2_config } }, { .revision = 3, .config = { .hw = &apq8084_config } }, { .revision = 6, .config = { .hw = &msm8x16_config } }, + { .revision = 8, .config = { .hw = &msm8x36_config } }, { .revision = 9, .config = { .hw = &msm8x94_config } }, { .revision = 7, .config = { .hw = &msm8x96_config } }, { .revision = 11, .config = { .hw = &msm8x76_config } }, @@ -941,10 +1017,6 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev) static struct mdp5_cfg_platform config = {}; config.iommu = iommu_domain_alloc(&platform_bus_type); - if (config.iommu) { - config.iommu->geometry.aperture_start = 0x1000; - config.iommu->geometry.aperture_end = 0xffffffff; - } return &config; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 998bef1190a3..b5fed67c4651 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -959,7 +959,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (!ctl) return -EINVAL; - /* don't support LM cursors when we we have source split enabled */ + /* don't support LM cursors when we have source split enabled */ if (mdp5_cstate->pipeline.r_mixer) return -EINVAL; @@ -1030,7 +1030,7 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) return -EINVAL; } - /* don't support LM cursors when we we have source split enabled */ + /* don't support LM cursors when we have source split enabled */ if (mdp5_cstate->pipeline.r_mixer) return -EINVAL; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index c902c6503675..19ec48695ffb 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -624,25 +624,25 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) mdelay(16); if (config->platform.iommu) { + struct msm_mmu *mmu; + iommu_dev = &pdev->dev; if (!dev_iommu_fwspec_get(iommu_dev)) iommu_dev = iommu_dev->parent; - aspace = msm_gem_address_space_create(iommu_dev, - config->platform.iommu, "mdp5"); + mmu = msm_iommu_new(iommu_dev, config->platform.iommu); + + aspace = msm_gem_address_space_create(mmu, "mdp5", + 0x1000, 0xffffffff); + if (IS_ERR(aspace)) { + if (!IS_ERR(mmu)) + mmu->funcs->destroy(mmu); ret = PTR_ERR(aspace); goto fail; } kms->aspace = aspace; - - ret = aspace->mmu->funcs->attach(aspace->mmu); - if (ret) { - DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n", - ret); - goto fail; - } } else { DRM_DEV_INFO(&pdev->dev, "no iommu, fallback to phys contig buffers for scanout\n"); @@ -935,7 +935,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) return 0; fail: - mdp5_destroy(pdev); + if (mdp5_kms) + mdp5_destroy(pdev); return ret; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 29295dee2a2e..f6ce40bf3699 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -37,9 +37,10 @@ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get * GEM object's debug name * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl + * - 1.6.0 - Syncobj support */ #define MSM_VERSION_MAJOR 1 -#define MSM_VERSION_MINOR 5 +#define MSM_VERSION_MINOR 6 #define MSM_VERSION_PATCHLEVEL 0 static const struct drm_mode_config_funcs mode_config_funcs = { @@ -1002,7 +1003,8 @@ static struct drm_driver msm_driver = { .driver_features = DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC | - DRIVER_MODESET, + DRIVER_MODESET | + DRIVER_SYNCOBJ, .open = msm_open, .postclose = msm_postclose, .lastclose = drm_fb_helper_lastclose, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 194d900a460e..e2d6a6056418 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -105,6 +105,7 @@ struct msm_display_topology { u32 num_lm; u32 num_enc; u32 num_intf; + u32 num_dspp; }; /** @@ -236,7 +237,8 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc); void msm_crtc_disable_vblank(struct drm_crtc *crtc); int msm_gem_init_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, int npages); + struct msm_gem_vma *vma, int npages, + u64 range_start, u64 range_end); void msm_gem_purge_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma); void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, @@ -250,12 +252,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace, void msm_gem_address_space_put(struct msm_gem_address_space *aspace); struct msm_gem_address_space * -msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, - const char *name); - -struct msm_gem_address_space * -msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu, - const char *name, uint64_t va_start, uint64_t va_end); +msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, + u64 va_start, u64 size); int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu); @@ -276,6 +274,9 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); int msm_gem_get_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova); +int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova, + u64 range_start, u64 range_end); int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova); uint64_t msm_gem_iova(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 5a6a79fbc9d6..6277fde13df9 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -389,7 +389,8 @@ put_iova(struct drm_gem_object *obj) } static int msm_gem_get_iova_locked(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace, uint64_t *iova) + struct msm_gem_address_space *aspace, uint64_t *iova, + u64 range_start, u64 range_end) { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; @@ -404,7 +405,8 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj, if (IS_ERR(vma)) return PTR_ERR(vma); - ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT); + ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT, + range_start, range_end); if (ret) { del_vma(vma); return ret; @@ -426,6 +428,9 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) prot |= IOMMU_WRITE; + if (msm_obj->flags & MSM_BO_MAP_PRIV) + prot |= IOMMU_PRIV; + WARN_ON(!mutex_is_locked(&msm_obj->lock)); if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) @@ -443,9 +448,13 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, msm_obj->sgt, obj->size >> PAGE_SHIFT); } -/* get iova and pin it. Should have a matching put */ -int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace, uint64_t *iova) +/* + * get iova and pin it. Should have a matching put + * limits iova to specified range (in pages) + */ +int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova, + u64 range_start, u64 range_end) { struct msm_gem_object *msm_obj = to_msm_bo(obj); u64 local; @@ -453,7 +462,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, mutex_lock(&msm_obj->lock); - ret = msm_gem_get_iova_locked(obj, aspace, &local); + ret = msm_gem_get_iova_locked(obj, aspace, &local, + range_start, range_end); if (!ret) ret = msm_gem_pin_iova(obj, aspace); @@ -465,6 +475,13 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, return ret; } +/* get iova and pin it. Should have a matching put */ +int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint64_t *iova) +{ + return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX); +} + /* * Get an iova but don't pin it. Doesn't need a put because iovas are currently * valid for the life of the object @@ -476,7 +493,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int ret; mutex_lock(&msm_obj->lock); - ret = msm_gem_get_iova_locked(obj, aspace, iova); + ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX); mutex_unlock(&msm_obj->lock); return ret; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 30584eaf8cc8..972490b14ba5 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -13,6 +13,7 @@ /* Additional internal-use only BO flags: */ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ +#define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */ struct msm_gem_address_space { const char *name; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 385d4965a8d0..6630aa817505 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -8,7 +8,9 @@ #include <linux/sync_file.h> #include <linux/uaccess.h> +#include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_syncobj.h> #include "msm_drv.h" #include "msm_gpu.h" @@ -391,6 +393,186 @@ static void submit_cleanup(struct msm_gem_submit *submit) } } + +struct msm_submit_post_dep { + struct drm_syncobj *syncobj; + uint64_t point; + struct dma_fence_chain *chain; +}; + +static struct drm_syncobj **msm_wait_deps(struct drm_device *dev, + struct drm_file *file, + uint64_t in_syncobjs_addr, + uint32_t nr_in_syncobjs, + size_t syncobj_stride, + struct msm_ringbuffer *ring) +{ + struct drm_syncobj **syncobjs = NULL; + struct drm_msm_gem_submit_syncobj syncobj_desc = {0}; + int ret = 0; + uint32_t i, j; + + syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs), + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (!syncobjs) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < nr_in_syncobjs; ++i) { + uint64_t address = in_syncobjs_addr + i * syncobj_stride; + struct dma_fence *fence; + + if (copy_from_user(&syncobj_desc, + u64_to_user_ptr(address), + min(syncobj_stride, sizeof(syncobj_desc)))) { + ret = -EFAULT; + break; + } + + if (syncobj_desc.point && + !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) { + ret = -EOPNOTSUPP; + break; + } + + if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) { + ret = -EINVAL; + break; + } + + ret = drm_syncobj_find_fence(file, syncobj_desc.handle, + syncobj_desc.point, 0, &fence); + if (ret) + break; + + if (!dma_fence_match_context(fence, ring->fctx->context)) + ret = dma_fence_wait(fence, true); + + dma_fence_put(fence); + if (ret) + break; + + if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) { + syncobjs[i] = + drm_syncobj_find(file, syncobj_desc.handle); + if (!syncobjs[i]) { + ret = -EINVAL; + break; + } + } + } + + if (ret) { + for (j = 0; j <= i; ++j) { + if (syncobjs[j]) + drm_syncobj_put(syncobjs[j]); + } + kfree(syncobjs); + return ERR_PTR(ret); + } + return syncobjs; +} + +static void msm_reset_syncobjs(struct drm_syncobj **syncobjs, + uint32_t nr_syncobjs) +{ + uint32_t i; + + for (i = 0; syncobjs && i < nr_syncobjs; ++i) { + if (syncobjs[i]) + drm_syncobj_replace_fence(syncobjs[i], NULL); + } +} + +static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, + struct drm_file *file, + uint64_t syncobjs_addr, + uint32_t nr_syncobjs, + size_t syncobj_stride) +{ + struct msm_submit_post_dep *post_deps; + struct drm_msm_gem_submit_syncobj syncobj_desc = {0}; + int ret = 0; + uint32_t i, j; + + post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps), + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + if (!post_deps) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < nr_syncobjs; ++i) { + uint64_t address = syncobjs_addr + i * syncobj_stride; + + if (copy_from_user(&syncobj_desc, + u64_to_user_ptr(address), + min(syncobj_stride, sizeof(syncobj_desc)))) { + ret = -EFAULT; + break; + } + + post_deps[i].point = syncobj_desc.point; + post_deps[i].chain = NULL; + + if (syncobj_desc.flags) { + ret = -EINVAL; + break; + } + + if (syncobj_desc.point) { + if (!drm_core_check_feature(dev, + DRIVER_SYNCOBJ_TIMELINE)) { + ret = -EOPNOTSUPP; + break; + } + + post_deps[i].chain = + kmalloc(sizeof(*post_deps[i].chain), + GFP_KERNEL); + if (!post_deps[i].chain) { + ret = -ENOMEM; + break; + } + } + + post_deps[i].syncobj = + drm_syncobj_find(file, syncobj_desc.handle); + if (!post_deps[i].syncobj) { + ret = -EINVAL; + break; + } + } + + if (ret) { + for (j = 0; j <= i; ++j) { + kfree(post_deps[j].chain); + if (post_deps[j].syncobj) + drm_syncobj_put(post_deps[j].syncobj); + } + + kfree(post_deps); + return ERR_PTR(ret); + } + + return post_deps; +} + +static void msm_process_post_deps(struct msm_submit_post_dep *post_deps, + uint32_t count, struct dma_fence *fence) +{ + uint32_t i; + + for (i = 0; post_deps && i < count; ++i) { + if (post_deps[i].chain) { + drm_syncobj_add_point(post_deps[i].syncobj, + post_deps[i].chain, + fence, post_deps[i].point); + post_deps[i].chain = NULL; + } else { + drm_syncobj_replace_fence(post_deps[i].syncobj, + fence); + } + } +} + int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file) { @@ -403,6 +585,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct sync_file *sync_file = NULL; struct msm_gpu_submitqueue *queue; struct msm_ringbuffer *ring; + struct msm_submit_post_dep *post_deps = NULL; + struct drm_syncobj **syncobjs_to_reset = NULL; int out_fence_fd = -1; struct pid *pid = get_pid(task_pid(current)); bool has_ww_ticket = false; @@ -411,6 +595,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (!gpu) return -ENXIO; + if (args->pad) + return -EINVAL; + /* for now, we just have 3d pipe.. eventually this would need to * be more clever to dispatch to appropriate gpu module: */ @@ -458,9 +645,29 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, return ret; } + if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) { + syncobjs_to_reset = msm_wait_deps(dev, file, + args->in_syncobjs, + args->nr_in_syncobjs, + args->syncobj_stride, ring); + if (IS_ERR(syncobjs_to_reset)) + return PTR_ERR(syncobjs_to_reset); + } + + if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) { + post_deps = msm_parse_post_deps(dev, file, + args->out_syncobjs, + args->nr_out_syncobjs, + args->syncobj_stride); + if (IS_ERR(post_deps)) { + ret = PTR_ERR(post_deps); + goto out_post_unlock; + } + } + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) - return ret; + goto out_post_unlock; if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { out_fence_fd = get_unused_fd_flags(O_CLOEXEC); @@ -587,6 +794,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, args->fence_fd = out_fence_fd; } + msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs); + msm_process_post_deps(post_deps, args->nr_out_syncobjs, + submit->fence); + + out: submit_cleanup(submit); if (has_ww_ticket) @@ -597,5 +809,23 @@ out_unlock: if (ret && (out_fence_fd >= 0)) put_unused_fd(out_fence_fd); mutex_unlock(&dev->struct_mutex); + +out_post_unlock: + if (!IS_ERR_OR_NULL(post_deps)) { + for (i = 0; i < args->nr_out_syncobjs; ++i) { + kfree(post_deps[i].chain); + drm_syncobj_put(post_deps[i].syncobj); + } + kfree(post_deps); + } + + if (!IS_ERR_OR_NULL(syncobjs_to_reset)) { + for (i = 0; i < args->nr_in_syncobjs; ++i) { + if (syncobjs_to_reset[i]) + drm_syncobj_put(syncobjs_to_reset[i]); + } + kfree(syncobjs_to_reset); + } + return ret; } diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 1af5354bcd46..5f6a11211b64 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -103,7 +103,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace, /* Initialize a new vma and allocate an iova for it */ int msm_gem_init_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, int npages) + struct msm_gem_vma *vma, int npages, + u64 range_start, u64 range_end) { int ret; @@ -111,7 +112,8 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace, return -EBUSY; spin_lock(&aspace->lock); - ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); + ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0, + 0, range_start, range_end, 0); spin_unlock(&aspace->lock); if (ret) @@ -125,37 +127,14 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace, return 0; } - struct msm_gem_address_space * -msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, - const char *name) +msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, + u64 va_start, u64 size) { struct msm_gem_address_space *aspace; - u64 size = domain->geometry.aperture_end - - domain->geometry.aperture_start; - - aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); - if (!aspace) - return ERR_PTR(-ENOMEM); - - spin_lock_init(&aspace->lock); - aspace->name = name; - aspace->mmu = msm_iommu_new(dev, domain); - - drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), - size >> PAGE_SHIFT); - kref_init(&aspace->kref); - - return aspace; -} - -struct msm_gem_address_space * -msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu, - const char *name, uint64_t va_start, uint64_t va_end) -{ - struct msm_gem_address_space *aspace; - u64 size = va_end - va_start; + if (IS_ERR(mmu)) + return ERR_CAST(mmu); aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); if (!aspace) @@ -163,10 +142,9 @@ msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu, spin_lock_init(&aspace->lock); aspace->name = name; - aspace->mmu = msm_gpummu_new(dev, gpu); + aspace->mmu = mmu; - drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT), - size >> PAGE_SHIFT); + drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT); kref_init(&aspace->kref); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 615c5cda5389..a22d30622306 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -821,51 +821,6 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) return 0; } -static struct msm_gem_address_space * -msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, - uint64_t va_start, uint64_t va_end) -{ - struct msm_gem_address_space *aspace; - int ret; - - /* - * Setup IOMMU.. eventually we will (I think) do this once per context - * and have separate page tables per context. For now, to keep things - * simple and to get something working, just use a single address space: - */ - if (!adreno_is_a2xx(to_adreno_gpu(gpu))) { - struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type); - if (!iommu) - return NULL; - - iommu->geometry.aperture_start = va_start; - iommu->geometry.aperture_end = va_end; - - DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name); - - aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu"); - if (IS_ERR(aspace)) - iommu_domain_free(iommu); - } else { - aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu", - va_start, va_end); - } - - if (IS_ERR(aspace)) { - DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n", - PTR_ERR(aspace)); - return ERR_CAST(aspace); - } - - ret = aspace->mmu->funcs->attach(aspace->mmu); - if (ret) { - msm_gem_address_space_put(aspace); - return ERR_PTR(ret); - } - - return aspace; -} - int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, struct msm_gpu_config *config) @@ -938,8 +893,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, msm_devfreq_init(gpu); - gpu->aspace = msm_gpu_create_address_space(gpu, pdev, - config->va_start, config->va_end); + + gpu->aspace = gpu->funcs->create_address_space(gpu, pdev); if (gpu->aspace == NULL) DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 6ccae4ba905c..429cb40f7931 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -21,8 +21,6 @@ struct msm_gpu_state; struct msm_gpu_config { const char *ioname; - uint64_t va_start; - uint64_t va_end; unsigned int nr_rings; }; @@ -64,6 +62,8 @@ struct msm_gpu_funcs { int (*gpu_state_put)(struct msm_gpu_state *state); unsigned long (*gpu_get_freq)(struct msm_gpu *gpu); void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq); + struct msm_gem_address_space *(*create_address_space) + (struct msm_gpu *gpu, struct platform_device *pdev); }; struct msm_gpu { diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c index 34980d8eb7ad..310a31b05faa 100644 --- a/drivers/gpu/drm/msm/msm_gpummu.c +++ b/drivers/gpu/drm/msm/msm_gpummu.c @@ -21,17 +21,12 @@ struct msm_gpummu { #define GPUMMU_PAGE_SIZE SZ_4K #define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE) -static int msm_gpummu_attach(struct msm_mmu *mmu) -{ - return 0; -} - static void msm_gpummu_detach(struct msm_mmu *mmu) { } static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, - struct sg_table *sgt, unsigned len, int prot) + struct sg_table *sgt, size_t len, int prot) { struct msm_gpummu *gpummu = to_msm_gpummu(mmu); unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; @@ -59,7 +54,7 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, return 0; } -static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len) +static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) { struct msm_gpummu *gpummu = to_msm_gpummu(mmu); unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; @@ -85,7 +80,6 @@ static void msm_gpummu_destroy(struct msm_mmu *mmu) } static const struct msm_mmu_funcs funcs = { - .attach = msm_gpummu_attach, .detach = msm_gpummu_detach, .map = msm_gpummu_map, .unmap = msm_gpummu_unmap, diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index ad58cfe5998e..3a381a9674c9 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -23,13 +23,6 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, return 0; } -static int msm_iommu_attach(struct msm_mmu *mmu) -{ - struct msm_iommu *iommu = to_msm_iommu(mmu); - - return iommu_attach_device(iommu->domain, mmu->dev); -} - static void msm_iommu_detach(struct msm_mmu *mmu) { struct msm_iommu *iommu = to_msm_iommu(mmu); @@ -38,7 +31,7 @@ static void msm_iommu_detach(struct msm_mmu *mmu) } static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, - struct sg_table *sgt, unsigned len, int prot) + struct sg_table *sgt, size_t len, int prot) { struct msm_iommu *iommu = to_msm_iommu(mmu); size_t ret; @@ -49,7 +42,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, return (ret == len) ? 0 : -EINVAL; } -static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len) +static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) { struct msm_iommu *iommu = to_msm_iommu(mmu); @@ -66,7 +59,6 @@ static void msm_iommu_destroy(struct msm_mmu *mmu) } static const struct msm_mmu_funcs funcs = { - .attach = msm_iommu_attach, .detach = msm_iommu_detach, .map = msm_iommu_map, .unmap = msm_iommu_unmap, @@ -76,6 +68,10 @@ static const struct msm_mmu_funcs funcs = { struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) { struct msm_iommu *iommu; + int ret; + + if (!domain) + return ERR_PTR(-ENODEV); iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); if (!iommu) @@ -85,5 +81,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) msm_mmu_init(&iommu->base, dev, &funcs); iommu_set_fault_handler(domain, msm_fault_handler, iommu); + ret = iommu_attach_device(iommu->domain, dev); + if (ret) { + kfree(iommu); + return ERR_PTR(ret); + } + return &iommu->base; } diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 67a623f14319..3a534ee59bf6 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -10,11 +10,10 @@ #include <linux/iommu.h> struct msm_mmu_funcs { - int (*attach)(struct msm_mmu *mmu); void (*detach)(struct msm_mmu *mmu); int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, - unsigned len, int prot); - int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len); + size_t len, int prot); + int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); void (*destroy)(struct msm_mmu *mmu); }; diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 732f65df5c4f..fea30e7aa9e8 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -29,8 +29,6 @@ * or shader programs (if not emitted inline in cmdstream). */ -#ifdef CONFIG_DEBUG_FS - #include <linux/circ_buf.h> #include <linux/debugfs.h> #include <linux/kfifo.h> @@ -47,6 +45,8 @@ bool rd_full = false; MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents"); module_param_named(rd_full, rd_full, bool, 0600); +#ifdef CONFIG_DEBUG_FS + enum rd_sect_type { RD_NONE, RD_TEST, /* ascii text */ diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 339a0c387eae..e5c230d9ae24 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -94,7 +94,7 @@ static struct nouveau_drm *page_to_drm(struct page *page) return chunk->drm; } -static unsigned long nouveau_dmem_page_addr(struct page *page) +unsigned long nouveau_dmem_page_addr(struct page *page) { struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page); unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) - @@ -662,28 +662,3 @@ out_free_src: out: return ret; } - -void -nouveau_dmem_convert_pfn(struct nouveau_drm *drm, - struct hmm_range *range) -{ - unsigned long i, npages; - - npages = (range->end - range->start) >> PAGE_SHIFT; - for (i = 0; i < npages; ++i) { - struct page *page; - uint64_t addr; - - page = hmm_device_entry_to_page(range, range->pfns[i]); - if (page == NULL) - continue; - - if (!is_device_private_page(page)) - continue; - - addr = nouveau_dmem_page_addr(page); - range->pfns[i] &= ((1UL << range->pfn_shift) - 1); - range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift; - range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM; - } -} diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h index 3e03d9629a38..64da5d3635c8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.h +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h @@ -39,9 +39,8 @@ int nouveau_dmem_migrate_vma(struct nouveau_drm *drm, struct vm_area_struct *vma, unsigned long start, unsigned long end); +unsigned long nouveau_dmem_page_addr(struct page *page); -void nouveau_dmem_convert_pfn(struct nouveau_drm *drm, - struct hmm_range *range); #else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */ static inline void nouveau_dmem_init(struct nouveau_drm *drm) {} static inline void nouveau_dmem_fini(struct nouveau_drm *drm) {} diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index fe89abf237a8..ba9f9359c30e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -175,10 +175,10 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, */ mm = get_task_mm(current); - down_read(&mm->mmap_sem); + mmap_read_lock(mm); if (!cli->svm.svmm) { - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); return -EINVAL; } @@ -205,7 +205,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, */ args->result = 0; - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); mmput(mm); return 0; @@ -355,7 +355,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data, if (ret) goto out_free; - down_write(¤t->mm->mmap_sem); + mmap_write_lock(current->mm); svmm->notifier.ops = &nouveau_mn_ops; ret = __mmu_notifier_register(&svmm->notifier, current->mm); if (ret) @@ -364,31 +364,18 @@ nouveau_svmm_init(struct drm_device *dev, void *data, cli->svm.svmm = svmm; cli->svm.cli = cli; - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); mutex_unlock(&cli->mutex); return 0; out_mm_unlock: - up_write(¤t->mm->mmap_sem); + mmap_write_unlock(current->mm); out_free: mutex_unlock(&cli->mutex); kfree(svmm); return ret; } -static const u64 -nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = { - [HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V, - [HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W, -}; - -static const u64 -nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = { - [HMM_PFN_ERROR ] = ~NVIF_VMM_PFNMAP_V0_V, - [HMM_PFN_NONE ] = NVIF_VMM_PFNMAP_V0_NONE, - [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V, -}; - /* Issue fault replay for GPU to retry accesses that faulted previously. */ static void nouveau_svm_fault_replay(struct nouveau_svm *svm) @@ -526,9 +513,45 @@ static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = { .invalidate = nouveau_svm_range_invalidate, }; +static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm, + struct hmm_range *range, u64 *ioctl_addr) +{ + unsigned long i, npages; + + /* + * The ioctl_addr prepared here is passed through nvif_object_ioctl() + * to an eventual DMA map in something like gp100_vmm_pgt_pfn() + * + * This is all just encoding the internal hmm representation into a + * different nouveau internal representation. + */ + npages = (range->end - range->start) >> PAGE_SHIFT; + for (i = 0; i < npages; ++i) { + struct page *page; + + if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) { + ioctl_addr[i] = 0; + continue; + } + + page = hmm_pfn_to_page(range->hmm_pfns[i]); + if (is_device_private_page(page)) + ioctl_addr[i] = nouveau_dmem_page_addr(page) | + NVIF_VMM_PFNMAP_V0_V | + NVIF_VMM_PFNMAP_V0_VRAM; + else + ioctl_addr[i] = page_to_phys(page) | + NVIF_VMM_PFNMAP_V0_V | + NVIF_VMM_PFNMAP_V0_HOST; + if (range->hmm_pfns[i] & HMM_PFN_WRITE) + ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W; + } +} + static int nouveau_range_fault(struct nouveau_svmm *svmm, struct nouveau_drm *drm, void *data, u32 size, - u64 *pfns, struct svm_notifier *notifier) + unsigned long hmm_pfns[], u64 *ioctl_addr, + struct svm_notifier *notifier) { unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); @@ -537,26 +560,27 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, .notifier = ¬ifier->notifier, .start = notifier->notifier.interval_tree.start, .end = notifier->notifier.interval_tree.last + 1, - .pfns = pfns, - .flags = nouveau_svm_pfn_flags, - .values = nouveau_svm_pfn_values, - .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT, + .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE, + .hmm_pfns = hmm_pfns, }; struct mm_struct *mm = notifier->notifier.mm; - long ret; + int ret; while (true) { if (time_after(jiffies, timeout)) return -EBUSY; range.notifier_seq = mmu_interval_read_begin(range.notifier); - range.default_flags = 0; - range.pfn_flags_mask = -1UL; - down_read(&mm->mmap_sem); + mmap_read_lock(mm); ret = hmm_range_fault(&range); - up_read(&mm->mmap_sem); - if (ret <= 0) { - if (ret == 0 || ret == -EBUSY) + mmap_read_unlock(mm); + if (ret) { + /* + * FIXME: the input PFN_REQ flags are destroyed on + * -EBUSY, we need to regenerate them, also for the + * other continue below + */ + if (ret == -EBUSY) continue; return ret; } @@ -570,7 +594,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, break; } - nouveau_dmem_convert_pfn(drm, &range); + nouveau_hmm_convert_pfn(drm, &range, ioctl_addr); svmm->vmm->vmm.object.client->super = true; ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL); @@ -597,6 +621,7 @@ nouveau_svm_fault(struct nvif_notify *notify) } i; u64 phys[16]; } args; + unsigned long hmm_pfns[ARRAY_SIZE(args.phys)]; struct vm_area_struct *vma; u64 inst, start, limit; int fi, fn, pi, fill; @@ -680,18 +705,18 @@ nouveau_svm_fault(struct nvif_notify *notify) /* Intersect fault window with the CPU VMA, cancelling * the fault if the address is invalid. */ - down_read(&mm->mmap_sem); + mmap_read_lock(mm); vma = find_vma_intersection(mm, start, limit); if (!vma) { SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); mmput(mm); nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); continue; } start = max_t(u64, start, vma->vm_start); limit = min_t(u64, limit, vma->vm_end); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); if (buffer->fault[fi]->addr != start) { @@ -712,12 +737,17 @@ nouveau_svm_fault(struct nvif_notify *notify) * access flags. *XXX: atomic? */ - if (buffer->fault[fn]->access != 0 /* READ. */ && - buffer->fault[fn]->access != 3 /* PREFETCH. */) { - args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V | - NVIF_VMM_PFNMAP_V0_W; - } else { - args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V; + switch (buffer->fault[fn]->access) { + case 0: /* READ. */ + hmm_pfns[pi++] = HMM_PFN_REQ_FAULT; + break; + case 3: /* PREFETCH. */ + hmm_pfns[pi++] = 0; + break; + default: + hmm_pfns[pi++] = HMM_PFN_REQ_FAULT | + HMM_PFN_REQ_WRITE; + break; } args.i.p.size = pi << PAGE_SHIFT; @@ -745,7 +775,7 @@ nouveau_svm_fault(struct nvif_notify *notify) fill = (buffer->fault[fn ]->addr - buffer->fault[fn - 1]->addr) >> PAGE_SHIFT; while (--fill) - args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE; + hmm_pfns[pi++] = 0; } SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)", @@ -761,7 +791,7 @@ nouveau_svm_fault(struct nvif_notify *notify) ret = nouveau_range_fault( svmm, svm->drm, &args, sizeof(args.i) + pi * sizeof(args.phys[0]), - args.phys, ¬ifier); + hmm_pfns, args.phys, ¬ifier); mmu_interval_notifier_remove(¬ifier.notifier); } mmput(mm); diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index d1086b2a6892..05863b253d68 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, return ret; ret = qxl_release_reserve_list(release, true); - if (ret) + if (ret) { + qxl_release_free(qdev, release); return ret; - + } cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_SURFACE_CMD_CREATE; cmd->flags = QXL_SURF_FLAG_KEEP_DATA; @@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, /* no need to add a release to the fence for this surface bo, since it is only released when we ask to destroy the surface and it would never signal otherwise */ - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); surf->hw_surf_alloc = true; spin_lock(&qdev->surf_id_idr_lock); @@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, cmd->surface_id = id; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); - qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 1082cd5d2fd4..9d45d5a4278f 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -510,8 +510,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane) cmd->u.set.visible = 1; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); return ret; @@ -652,8 +652,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane, cmd->u.position.y = plane->state->crtc_y + fb->hot_y; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); if (old_cursor_bo != NULL) qxl_bo_unpin(old_cursor_bo); @@ -700,8 +700,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane, cmd->type = QXL_CURSOR_HIDE; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); } static void qxl_update_dumb_head(struct qxl_device *qdev, diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 5bebf1ea1c5d..3599db096973 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, goto out_release_backoff; rects = drawable_set_clipping(qdev, num_clips, clips_bo); - if (!rects) + if (!rects) { + ret = -EINVAL; goto out_release_backoff; - + } drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); drawable->clip.type = SPICE_CLIP_TYPE_RECTS; @@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, } qxl_bo_kunmap(clips_bo); - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); out_release_backoff: if (ret) diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index 43688ecdd8a0..60ab7151b84d 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev, break; default: DRM_ERROR("unsupported image bit depth\n"); - return -EINVAL; /* TODO: cleanup */ + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); + return -EINVAL; } image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; image->u.bitmap.x = width; diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index d9a583966949..13bd1d11c703 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev, apply_surf_reloc(qdev, &reloc_info[i]); } + qxl_release_fence_buffer_objects(release); ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); - if (ret) - qxl_release_backoff_reserve_list(release); - else - qxl_release_fence_buffer_objects(release); out_free_bos: out_free_release: diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 0d0ab8e0ff3b..cc31d187042e 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -196,12 +196,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, &p->validated); if (need_mmap_lock) - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); if (need_mmap_lock) - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 068c3e5da173..3c8f570a20ee 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -342,17 +342,17 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, } if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { - down_read(¤t->mm->mmap_sem); + mmap_read_lock(current->mm); r = radeon_bo_reserve(bo, true); if (r) { - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); goto release_object; } radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); radeon_bo_unreserve(bo); - up_read(¤t->mm->mmap_sem); + mmap_read_unlock(current->mm); if (r) goto release_object; } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 95006cbf42c3..c5d1dc9618a4 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -158,7 +158,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) } if (radeon_is_px(dev)) { - dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 8e731ed0d9d9..2f319102ae9f 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -676,7 +676,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) */ if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && !cancel_delayed_work(&sched->work_tdr)) || - __kthread_should_park(sched->thread)) + kthread_should_park()) return NULL; spin_lock(&sched->job_list_lock); diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index e324d7db7b7d..ce07ddc3e058 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -279,7 +279,7 @@ static const struct drm_connector_funcs sun4i_hdmi_connector_funcs = { }; #ifdef CONFIG_DRM_SUN4I_HDMI_CEC -static bool sun4i_hdmi_cec_pin_read(struct cec_adapter *adap) +static int sun4i_hdmi_cec_pin_read(struct cec_adapter *adap) { struct sun4i_hdmi *hdmi = cec_get_drvdata(adap); diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index f6c67dd87a05..aa67cb037e9d 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -718,7 +718,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder) struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder); struct mipi_dsi_device *device = dsi->device; - union phy_configure_opts opts = { 0 }; + union phy_configure_opts opts = { }; struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy; u16 delay; int err; diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index d4f51b5c7ee5..211906347f3f 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1039,6 +1039,7 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, static bool host1x_drm_wants_iommu(struct host1x_device *dev) { + struct host1x *host1x = dev_get_drvdata(dev->dev.parent); struct iommu_domain *domain; /* @@ -1076,7 +1077,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) * sufficient and whether or not the host1x is attached to an IOMMU * doesn't matter. */ - if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32)) + if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32)) return true; return domain != NULL; diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c index 2396262c09e4..89a226912de8 100644 --- a/drivers/gpu/drm/tidss/tidss_crtc.c +++ b/drivers/gpu/drm/tidss/tidss_crtc.c @@ -379,9 +379,17 @@ static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc) return &state->base; } +static void tidss_crtc_destroy(struct drm_crtc *crtc) +{ + struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(tcrtc); +} + static const struct drm_crtc_funcs tidss_crtc_funcs = { .reset = tidss_crtc_reset, - .destroy = drm_crtc_cleanup, + .destroy = tidss_crtc_destroy, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = tidss_crtc_duplicate_state, @@ -400,7 +408,7 @@ struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss, bool has_ctm = tidss->feat->vp_feat.color.has_ctm; int ret; - tcrtc = devm_kzalloc(tidss->dev, sizeof(*tcrtc), GFP_KERNEL); + tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL); if (!tcrtc) return ERR_PTR(-ENOMEM); @@ -411,8 +419,10 @@ struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss, ret = drm_crtc_init_with_planes(&tidss->ddev, crtc, primary, NULL, &tidss_crtc_funcs, NULL); - if (ret < 0) + if (ret < 0) { + kfree(tcrtc); return ERR_PTR(ret); + } drm_crtc_helper_add(crtc, &tidss_crtc_helper_funcs); diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c index 4c0558286f5e..30bf2a65949c 100644 --- a/drivers/gpu/drm/tidss/tidss_encoder.c +++ b/drivers/gpu/drm/tidss/tidss_encoder.c @@ -8,9 +8,8 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_of.h> #include <drm/drm_panel.h> -#include <drm/drm_simple_kms_helper.h> +#include <drm/drm_of.h> #include "tidss_crtc.h" #include "tidss_drv.h" @@ -56,25 +55,38 @@ static int tidss_encoder_atomic_check(struct drm_encoder *encoder, return 0; } +static void tidss_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + static const struct drm_encoder_helper_funcs encoder_helper_funcs = { .atomic_check = tidss_encoder_atomic_check, }; +static const struct drm_encoder_funcs encoder_funcs = { + .destroy = tidss_encoder_destroy, +}; + struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss, u32 encoder_type, u32 possible_crtcs) { struct drm_encoder *enc; int ret; - enc = devm_kzalloc(tidss->dev, sizeof(*enc), GFP_KERNEL); + enc = kzalloc(sizeof(*enc), GFP_KERNEL); if (!enc) return ERR_PTR(-ENOMEM); enc->possible_crtcs = possible_crtcs; - ret = drm_simple_encoder_init(&tidss->ddev, enc, encoder_type); - if (ret < 0) + ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs, + encoder_type, NULL); + if (ret < 0) { + kfree(enc); return ERR_PTR(ret); + } drm_encoder_helper_add(enc, &encoder_helper_funcs); diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c index 23bb3e59504b..0a563eabcbb9 100644 --- a/drivers/gpu/drm/tidss/tidss_plane.c +++ b/drivers/gpu/drm/tidss/tidss_plane.c @@ -141,6 +141,14 @@ static void tidss_plane_atomic_disable(struct drm_plane *plane, dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false); } +static void drm_plane_destroy(struct drm_plane *plane) +{ + struct tidss_plane *tplane = to_tidss_plane(plane); + + drm_plane_cleanup(plane); + kfree(tplane); +} + static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = { .atomic_check = tidss_plane_atomic_check, .atomic_update = tidss_plane_atomic_update, @@ -151,7 +159,7 @@ static const struct drm_plane_funcs tidss_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .reset = drm_atomic_helper_plane_reset, - .destroy = drm_plane_cleanup, + .destroy = drm_plane_destroy, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; @@ -175,7 +183,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss, BIT(DRM_MODE_BLEND_COVERAGE)); int ret; - tplane = devm_kzalloc(tidss->dev, sizeof(*tplane), GFP_KERNEL); + tplane = kzalloc(sizeof(*tplane), GFP_KERNEL); if (!tplane) return ERR_PTR(-ENOMEM); @@ -190,7 +198,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss, formats, num_formats, NULL, type, NULL); if (ret < 0) - return ERR_PTR(ret); + goto err; drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs); @@ -203,15 +211,19 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss, default_encoding, default_range); if (ret) - return ERR_PTR(ret); + goto err; ret = drm_plane_create_alpha_property(&tplane->plane); if (ret) - return ERR_PTR(ret); + goto err; ret = drm_plane_create_blend_mode_property(&tplane->plane, blend_modes); if (ret) - return ERR_PTR(ret); + goto err; return tplane; + +err: + kfree(tplane); + return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 52d2b71f1588..f09b096ba4fd 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -257,54 +257,6 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page) return 0; } -#ifdef CONFIG_X86 -#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot) -#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr) -#else -#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot) -#define __ttm_kunmap_atomic(__addr) vunmap(__addr) -#endif - - -/** - * ttm_kmap_atomic_prot - Efficient kernel map of a single page with - * specified page protection. - * - * @page: The page to map. - * @prot: The page protection. - * - * This function maps a TTM page using the kmap_atomic api if available, - * otherwise falls back to vmap. The user must make sure that the - * specified page does not have an aliased mapping with a different caching - * policy unless the architecture explicitly allows it. Also mapping and - * unmapping using this api must be correctly nested. Unmapping should - * occur in the reverse order of mapping. - */ -void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot) -{ - if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) - return kmap_atomic(page); - else - return __ttm_kmap_atomic_prot(page, prot); -} -EXPORT_SYMBOL(ttm_kmap_atomic_prot); - -/** - * ttm_kunmap_atomic_prot - Unmap a page that was mapped using - * ttm_kmap_atomic_prot. - * - * @addr: The virtual address from the map. - * @prot: The page protection. - */ -void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot) -{ - if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) - kunmap_atomic(addr); - else - __ttm_kunmap_atomic(addr); -} -EXPORT_SYMBOL(ttm_kunmap_atomic_prot); - static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, unsigned long page, pgprot_t prot) @@ -316,13 +268,13 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, return -ENOMEM; src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); - dst = ttm_kmap_atomic_prot(d, prot); + dst = kmap_atomic_prot(d, prot); if (!dst) return -ENOMEM; memcpy_fromio(dst, src, PAGE_SIZE); - ttm_kunmap_atomic_prot(dst, prot); + kunmap_atomic(dst); return 0; } @@ -338,13 +290,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, return -ENOMEM; dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); - src = ttm_kmap_atomic_prot(s, prot); + src = kmap_atomic_prot(s, prot); if (!src) return -ENOMEM; memcpy_toio(dst, src, PAGE_SIZE); - ttm_kunmap_atomic_prot(src, prot); + kunmap_atomic(src); return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 0ad30b112982..a43aa7275f12 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -58,7 +58,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, goto out_clear; /* - * If possible, avoid waiting for GPU with mmap_sem + * If possible, avoid waiting for GPU with mmap_lock * held. We only do this if the fault allows retry and this * is the first attempt. */ @@ -68,7 +68,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, goto out_unlock; ttm_bo_get(bo); - up_read(&vmf->vma->vm_mm->mmap_sem); + mmap_read_unlock(vmf->vma->vm_mm); (void) dma_fence_wait(bo->moving, true); dma_resv_unlock(bo->base.resv); ttm_bo_put(bo); @@ -131,20 +131,20 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, { /* * Work around locking order reversal in fault / nopfn - * between mmap_sem and bo_reserve: Perform a trylock operation + * between mmap_lock and bo_reserve: Perform a trylock operation * for reserve, and if it fails, retry the fault after waiting * for the buffer to become unreserved. */ if (unlikely(!dma_resv_trylock(bo->base.resv))) { /* * If the fault allows retry and this is the first - * fault attempt, we try to release the mmap_sem + * fault attempt, we try to release the mmap_lock * before waiting */ if (fault_flag_allow_retry_first(vmf->flags)) { if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { ttm_bo_get(bo); - up_read(&vmf->vma->vm_mm->mmap_sem); + mmap_read_unlock(vmf->vma->vm_mm); if (!dma_resv_lock_interruptible(bo->base.resv, NULL)) dma_resv_unlock(bo->base.resv); diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 49bebdee6d91..9ff9f4ac0522 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -221,6 +221,7 @@ struct virtio_gpu_fpriv { /* virtgpu_ioctl.c */ #define DRM_VIRTIO_NUM_IOCTLS 10 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; +void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); /* virtgpu_kms.c */ int virtio_gpu_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 1025658be4df..d6cb350ae52a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -39,6 +39,9 @@ static int virtio_gpu_gem_create(struct drm_file *file, int ret; u32 handle; + if (vgdev->has_virgl_3d) + virtio_gpu_create_context(dev, file); + ret = virtio_gpu_object_create(vgdev, params, &obj, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 867c5e239d55..5df722072ba0 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -27,14 +27,14 @@ #include <linux/file.h> #include <linux/sync_file.h> +#include <linux/uaccess.h> #include <drm/drm_file.h> #include <drm/virtgpu_drm.h> #include "virtgpu_drv.h" -static void virtio_gpu_create_context(struct drm_device *dev, - struct drm_file *file) +void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file) { struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 023a030ca7b9..0a5c8cf409fb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -25,6 +25,7 @@ #include <linux/virtio.h> #include <linux/virtio_config.h> +#include <linux/virtio_ring.h> #include <drm/drm_file.h> @@ -52,14 +53,6 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work) events_clear, &events_clear); } -static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev, - uint32_t ctx_id) -{ - virtio_gpu_cmd_context_destroy(vgdev, ctx_id); - virtio_gpu_notify(vgdev); - ida_free(&vgdev->ctx_id_ida, ctx_id - 1); -} - static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, void (*work_func)(struct work_struct *work)) { @@ -274,14 +267,17 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct virtio_gpu_device *vgdev = dev->dev_private; - struct virtio_gpu_fpriv *vfpriv; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; if (!vgdev->has_virgl_3d) return; - vfpriv = file->driver_priv; + if (vfpriv->context_created) { + virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id); + virtio_gpu_notify(vgdev); + } - virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id); + ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1); mutex_destroy(&vfpriv->context_lock); kfree(vfpriv); file->driver_priv = NULL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index bb46ca0c458f..1629427d5734 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -27,6 +27,7 @@ **************************************************************************/ #include "vmwgfx_drv.h" +#include <linux/highmem.h> /* * Template that implements find_first_diff() for a generic @@ -374,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset); if (unmap_src) { - ttm_kunmap_atomic_prot(d->src_addr, d->src_prot); + kunmap_atomic(d->src_addr); d->src_addr = NULL; } if (unmap_dst) { - ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot); + kunmap_atomic(d->dst_addr); d->dst_addr = NULL; } @@ -388,8 +389,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, return -EINVAL; d->dst_addr = - ttm_kmap_atomic_prot(d->dst_pages[dst_page], - d->dst_prot); + kmap_atomic_prot(d->dst_pages[dst_page], + d->dst_prot); if (!d->dst_addr) return -ENOMEM; @@ -401,8 +402,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, return -EINVAL; d->src_addr = - ttm_kmap_atomic_prot(d->src_pages[src_page], - d->src_prot); + kmap_atomic_prot(d->src_pages[src_page], + d->src_prot); if (!d->src_addr) return -ENOMEM; @@ -499,9 +500,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, } out: if (d.src_addr) - ttm_kunmap_atomic_prot(d.src_addr, d.src_prot); + kunmap_atomic(d.src_addr); if (d.dst_addr) - ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot); + kunmap_atomic(d.dst_addr); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 8cdcd6e5f9e1..3596f3923ea3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -850,7 +850,7 @@ extern void vmw_bo_bo_free(struct ttm_buffer_object *bo); extern int vmw_bo_init(struct vmw_private *dev_priv, struct vmw_buffer_object *vmw_bo, size_t size, struct ttm_placement *placement, - bool interuptable, + bool interruptible, void (*bo_free)(struct ttm_buffer_object *bo)); extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, struct ttm_object_file *tfile); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 178a6cd1a06f..0f8d29397157 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -515,7 +515,7 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) struct vmw_fence_manager *fman = fman_from_fence(fence); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) - return 1; + return true; vmw_fences_update(fman); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 7ef51fa84b01..126f93c0b0b8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1651,7 +1651,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, struct vmw_surface_metadata *metadata; struct ttm_base_object *base; uint32_t backup_handle; - int ret = -EINVAL; + int ret; ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, req->handle_type, &base); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 388bcc2889aa..d24344e91922 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -192,17 +192,55 @@ static void host1x_setup_sid_table(struct host1x *host) } } +static bool host1x_wants_iommu(struct host1x *host1x) +{ + /* + * If we support addressing a maximum of 32 bits of physical memory + * and if the host1x firewall is enabled, there's no need to enable + * IOMMU support. This can happen for example on Tegra20, Tegra30 + * and Tegra114. + * + * Tegra124 and later can address up to 34 bits of physical memory and + * many platforms come equipped with more than 2 GiB of system memory, + * which requires crossing the 4 GiB boundary. But there's a catch: on + * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can + * only address up to 32 bits of memory in GATHER opcodes, which means + * that command buffers need to either be in the first 2 GiB of system + * memory (which could quickly lead to memory exhaustion), or command + * buffers need to be treated differently from other buffers (which is + * not possible with the current ABI). + * + * A third option is to use the IOMMU in these cases to make sure all + * buffers will be mapped into a 32-bit IOVA space that host1x can + * address. This allows all of the system memory to be used and works + * within the limitations of the host1x on these SoCs. + * + * In summary, default to enable IOMMU on Tegra124 and later. For any + * of the earlier SoCs, only use the IOMMU for additional safety when + * the host1x firewall is disabled. + */ + if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) { + if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) + return false; + } + + return true; +} + static struct iommu_domain *host1x_iommu_attach(struct host1x *host) { struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); int err; /* - * If the host1x firewall is enabled, there's no need to enable IOMMU - * support. Similarly, if host1x is already attached to an IOMMU (via - * the DMA API), don't try to attach again. + * We may not always want to enable IOMMU support (for example if the + * host1x firewall is already enabled and we don't support addressing + * more than 32 bits of physical memory), so check for that first. + * + * Similarly, if host1x is already attached to an IOMMU (via the DMA + * API), don't try to attach again. */ - if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain) + if (!host1x_wants_iommu(host) || domain) return domain; host->group = iommu_group_get(host->dev); @@ -502,6 +540,19 @@ static void __exit tegra_host1x_exit(void) } module_exit(tegra_host1x_exit); +/** + * host1x_get_dma_mask() - query the supported DMA mask for host1x + * @host1x: host1x instance + * + * Note that this returns the supported DMA mask for host1x, which can be + * different from the applicable DMA mask under certain circumstances. + */ +u64 host1x_get_dma_mask(struct host1x *host1x) +{ + return host1x->info->dma_mask; +} +EXPORT_SYMBOL(host1x_get_dma_mask); + MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>"); MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>"); MODULE_DESCRIPTION("Host1x driver for Tegra products"); |