diff options
Diffstat (limited to 'drivers/gpu/drm')
114 files changed, 1858 insertions, 577 deletions
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index acf3a36c9ebc..32982da82694 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev, { struct armada_private *priv = dev->dev_private; - /* - * Yes, we really must jump through these hoops just to store a - * _pointer_ to something into the kfifo. This is utterly insane - * and idiotic, because it kfifo requires the _data_ pointed to by - * the pointer const, not the pointer itself. Not only that, but - * you have to pass a pointer _to_ the pointer you want stored. - */ - const struct drm_framebuffer *silly_api_alert = fb; - WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert)); + WARN_ON(!kfifo_put(&priv->fb_unref, fb)); schedule_work(&priv->fb_unref_work); } diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 3f65dd6676b2..a28640f47c27 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, * then the BO is being moved and we should * store up the damage until later. */ - if (!drm_can_sleep()) + if (drm_can_sleep()) ret = ast_bo_reserve(bo, true); if (ret) { if (ret != -EBUSY) diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig index c8fcf12019f0..5f8b0c2b9a44 100644 --- a/drivers/gpu/drm/bochs/Kconfig +++ b/drivers/gpu/drm/bochs/Kconfig @@ -2,6 +2,7 @@ config DRM_BOCHS tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" depends on DRM && PCI select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 2fd4a92162cb..32bbba0a787b 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, * then the BO is being moved and we should * store up the damage until later. */ - if (!drm_can_sleep()) + if (drm_can_sleep()) ret = cirrus_bo_reserve(bo, true); if (ret) { if (ret != -EBUSY) diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index dffc836144cc..f4dc9b7a3831 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -296,6 +296,18 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) case DRM_CAP_ASYNC_PAGE_FLIP: req->value = dev->mode_config.async_page_flip; break; + case DRM_CAP_CURSOR_WIDTH: + if (dev->mode_config.cursor_width) + req->value = dev->mode_config.cursor_width; + else + req->value = 64; + break; + case DRM_CAP_CURSOR_HEIGHT: + if (dev->mode_config.cursor_height) + req->value = dev->mode_config.cursor_height; + else + req->value = 64; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 5736aaa7e86c..f7af69bcf3f4 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -468,8 +468,8 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) } else { list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, legacy_dev_list) { - drm_put_dev(dev); list_del(&dev->legacy_dev_list); + drm_put_dev(dev); } } DRM_INFO("Module unloaded\n"); diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index f227f544aa36..6e1a1a20cf6b 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D config DRM_EXYNOS_IPP bool "Exynos DRM IPP" - depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM + depends on DRM_EXYNOS help Choose this option if you want to use IPP feature for DRM. @@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR config DRM_EXYNOS_GSC bool "Exynos DRM GSC" - depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 + depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM help Choose this option if you want to use Exynos GSC for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 9d096a0c5f8d..c204b4e3356e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -171,22 +171,28 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) file->driver_priv = file_priv; ret = exynos_drm_subdrv_open(dev, file); - if (ret) { - kfree(file_priv); - file->driver_priv = NULL; - } + if (ret) + goto err_file_priv_free; anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, NULL, 0); if (IS_ERR(anon_filp)) { - kfree(file_priv); - return PTR_ERR(anon_filp); + ret = PTR_ERR(anon_filp); + goto err_subdrv_close; } anon_filp->f_mode = FMODE_READ | FMODE_WRITE; file_priv->anon_filp = anon_filp; return ret; + +err_subdrv_close: + exynos_drm_subdrv_close(dev, file); + +err_file_priv_free: + kfree(file_priv); + file->driver_priv = NULL; + return ret; } static void exynos_drm_preclose(struct drm_device *dev, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 380aec28840b..6c1885eedfdf 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) reg_type = REG_TYPE_NONE; DRM_ERROR("Unknown register offset![%d]\n", reg_offset); break; - }; + } return reg_type; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index d519a4e5fe40..09312b877470 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -16,7 +16,6 @@ #include <linux/types.h> #include <linux/clk.h> #include <linux/pm_runtime.h> -#include <plat/map-base.h> #include <drm/drmP.h> #include <drm/exynos_drm.h> @@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); /* - * quf == NULL condition means all event deletion. + * qbuf == NULL condition means all event deletion. * stop operations want to delete all event list. * another case delete only same buf id. */ diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index a0e10aeb0e67..c021ddc1ffb4 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -34,6 +34,7 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <linux/hdmi.h> #include <drm/exynos_drm.h> @@ -59,19 +60,6 @@ #define HDMI_AUI_VERSION 0x01 #define HDMI_AUI_LENGTH 0x0A -/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */ -enum HDMI_PACKET_TYPE { - /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */ - /* InfoFrame packet type */ - HDMI_PACKET_TYPE_INFOFRAME = 0x80, - /* Vendor-Specific InfoFrame */ - HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1, - /* Auxiliary Video information InfoFrame */ - HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2, - /* Audio information InfoFrame */ - HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4 -}; - enum hdmi_type { HDMI_TYPE13, HDMI_TYPE14, @@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = { }, }; -struct hdmi_infoframe { - enum HDMI_PACKET_TYPE type; - u8 ver; - u8 len; -}; - static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) { return readl(hdata->regs + reg_id); @@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata, } static void hdmi_reg_infoframe(struct hdmi_context *hdata, - struct hdmi_infoframe *infoframe) + union hdmi_infoframe *infoframe) { u32 hdr_sum; u8 chksum; @@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, return; } - switch (infoframe->type) { - case HDMI_PACKET_TYPE_AVI: + switch (infoframe->any.type) { + case HDMI_INFOFRAME_TYPE_AVI: hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver); - hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len); - hdr_sum = infoframe->type + infoframe->ver + infoframe->len; + hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type); + hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, + infoframe->any.version); + hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length); + hdr_sum = infoframe->any.type + infoframe->any.version + + infoframe->any.length; /* Output format zero hardcoded ,RGB YBCR selection */ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 | @@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata, hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1), - infoframe->len, hdr_sum); + infoframe->any.length, hdr_sum); DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum); hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum); break; - case HDMI_PACKET_TYPE_AUI: + case HDMI_INFOFRAME_TYPE_AUDIO: hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver); - hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len); - hdr_sum = infoframe->type + infoframe->ver + infoframe->len; + hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type); + hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, + infoframe->any.version); + hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length); + hdr_sum = infoframe->any.type + infoframe->any.version + + infoframe->any.length; chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1), - infoframe->len, hdr_sum); + infoframe->any.length, hdr_sum); DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum); hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum); break; @@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata) static void hdmi_conf_init(struct hdmi_context *hdata) { - struct hdmi_infoframe infoframe; + union hdmi_infoframe infoframe; /* disable HPD interrupts from HDMI IP block, use GPIO instead */ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | @@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata) hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); } else { - infoframe.type = HDMI_PACKET_TYPE_AVI; - infoframe.ver = HDMI_AVI_VERSION; - infoframe.len = HDMI_AVI_LENGTH; + infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI; + infoframe.any.version = HDMI_AVI_VERSION; + infoframe.any.length = HDMI_AVI_LENGTH; hdmi_reg_infoframe(hdata, &infoframe); - infoframe.type = HDMI_PACKET_TYPE_AUI; - infoframe.ver = HDMI_AUI_VERSION; - infoframe.len = HDMI_AUI_LENGTH; + infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO; + infoframe.any.version = HDMI_AUI_VERSION; + infoframe.any.length = HDMI_AUI_LENGTH; hdmi_reg_infoframe(hdata, &infoframe); /* enable AVI packet every vsync, fixes purple line problem */ diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 400b0c4a10fb..faa77f543a07 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -208,7 +208,7 @@ struct tda998x_priv { # define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1) # define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6) #define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */ -# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0) +# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0) # define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4) #define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */ # define PLL_SERIAL_3_SRL_CCIR (1 << 0) @@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p) { uint8_t buf[PB(5) + 1]; + memset(buf, 0, sizeof(buf)); buf[HB(0)] = 0x84; buf[HB(1)] = 0x01; buf[HB(2)] = 10; - buf[PB(0)] = 0; buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */ buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */ buf[PB(4)] = p->audio_frame[4]; @@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, } div = 148500 / mode->clock; + if (div != 0) { + div--; + if (div > 3) + div = 3; + } /* mute the audio FIFO: */ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO); @@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder, if (priv->rev == TDA19988) { /* let incoming pixels fill the active space (if any) */ - reg_write(encoder, REG_ENABLE_SPACE, 0x01); + reg_write(encoder, REG_ENABLE_SPACE, 0x00); } /* must be last register set: */ @@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder) { struct tda998x_priv *priv = to_tda998x_priv(encoder); drm_i2c_encoder_destroy(encoder); + if (priv->cec) + i2c_unregister_device(priv->cec); kfree(priv); } @@ -1142,8 +1149,12 @@ tda998x_encoder_init(struct i2c_client *client, priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5); - priv->current_page = 0; + priv->current_page = 0xff; priv->cec = i2c_new_dummy(client->adapter, 0x34); + if (!priv->cec) { + kfree(priv); + return -ENODEV; + } priv->dpms = DRM_MODE_DPMS_OFF; encoder_slave->slave_priv = priv; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 04f1f02c4019..ec7bb0fc71bc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -403,7 +403,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist); void intel_detect_pch(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct pci_dev *pch; + struct pci_dev *pch = NULL; /* In all current cases, num_pipes is equivalent to the PCH_NOP setting * (which really amounts to a PCH but no South Display). @@ -424,12 +424,9 @@ void intel_detect_pch(struct drm_device *dev) * all the ISA bridge devices and check for the first match, instead * of only checking the first one. */ - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); - while (pch) { - struct pci_dev *curr = pch; + while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { if (pch->vendor == PCI_VENDOR_ID_INTEL) { - unsigned short id; - id = pch->device & INTEL_PCH_DEVICE_ID_MASK; + unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; dev_priv->pch_id = id; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { @@ -461,18 +458,16 @@ void intel_detect_pch(struct drm_device *dev) DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev)); WARN_ON(!IS_ULT(dev)); - } else { - goto check_next; - } - pci_dev_put(pch); + } else + continue; + break; } -check_next: - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr); - pci_dev_put(curr); } if (!pch) - DRM_DEBUG_KMS("No PCH found?\n"); + DRM_DEBUG_KMS("No PCH found.\n"); + + pci_dev_put(pch); } bool i915_semaphore_is_enabled(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4a2bf8e3f739..df77e20e3c3d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1831,6 +1831,14 @@ struct drm_i915_file_private { /* Early gen2 have a totally busted CS tlb and require pinned batches. */ #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) +/* + * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts + * even when in MSI mode. This results in spurious interrupt warnings if the + * legacy irq no. is shared with another device. The kernel then disables that + * interrupt source and so prevents the other device from working properly. + */ +#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) +#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 1a24e84f2315..28d24caa49f3 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, "Graphics Stolen Memory"); if (r == NULL) { - DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", - base, base + (uint32_t)dev_priv->gtt.stolen_size); - base = 0; + /* + * One more attempt but this time requesting region from + * base + 1, as we have seen that this resolves the region + * conflict with the PCI Bus. + * This is a BIOS w/a: Some BIOS wrap stolen in the root + * PCI bus, but have an off-by-one error. Hence retry the + * reservation starting from 1 instead of 0. + */ + r = devm_request_mem_region(dev->dev, base + 1, + dev_priv->gtt.stolen_size - 1, + "Graphics Stolen Memory"); + if (r == NULL) { + DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", + base, base + (uint32_t)dev_priv->gtt.stolen_size); + base = 0; + } } return base; @@ -201,6 +214,13 @@ int i915_gem_init_stolen(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int bios_reserved = 0; +#ifdef CONFIG_INTEL_IOMMU + if (intel_iommu_gfx_mapped) { + DRM_INFO("DMAR active, disabling use of stolen memory\n"); + return 0; + } +#endif + if (dev_priv->gtt.stolen_size == 0) return 0; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d7fd2fd2f0a5..990cf8f43efd 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, va_list tmp; va_copy(tmp, args); - if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp))) + len = vsnprintf(NULL, 0, f, tmp); + va_end(tmp); + + if (!__i915_error_seek(e, len)) return; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 17d8fcb1b6f7..d554169ac592 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; } else { - enum transcoder cpu_transcoder = - intel_pipe_to_cpu_transcoder(dev_priv, pipe); + enum transcoder cpu_transcoder = (enum transcoder) pipe; u32 htotal; htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; @@ -619,33 +618,25 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) /* raw reads, only for fast reads of display block, no need for forcewake etc. */ #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) -#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t status; - - if (INTEL_INFO(dev)->gen < 7) { - status = pipe == PIPE_A ? - DE_PIPEA_VBLANK : - DE_PIPEB_VBLANK; + int reg; + + if (INTEL_INFO(dev)->gen >= 8) { + status = GEN8_PIPE_VBLANK; + reg = GEN8_DE_PIPE_ISR(pipe); + } else if (INTEL_INFO(dev)->gen >= 7) { + status = DE_PIPE_VBLANK_IVB(pipe); + reg = DEISR; } else { - switch (pipe) { - default: - case PIPE_A: - status = DE_PIPEA_VBLANK_IVB; - break; - case PIPE_B: - status = DE_PIPEB_VBLANK_IVB; - break; - case PIPE_C: - status = DE_PIPEC_VBLANK_IVB; - break; - } + status = DE_PIPE_VBLANK(pipe); + reg = DEISR; } - return __raw_i915_read32(dev_priv, DEISR) & status; + return __raw_i915_read32(dev_priv, reg) & status; } static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, @@ -703,7 +694,28 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, else position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; - if (HAS_PCH_SPLIT(dev)) { + if (HAS_DDI(dev)) { + /* + * On HSW HDMI outputs there seems to be a 2 line + * difference, whereas eDP has the normal 1 line + * difference that earlier platforms have. External + * DP is unknown. For now just check for the 2 line + * difference case on all output types on HSW+. + * + * This might misinterpret the scanline counter being + * one line too far along on eDP, but that's less + * dangerous than the alternative since that would lead + * the vblank timestamp code astray when it sees a + * scanline count before vblank_start during a vblank + * interrupt. + */ + in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); + if ((in_vbl && (position == vbl_start - 2 || + position == vbl_start - 1)) || + (!in_vbl && (position == vbl_end - 2 || + position == vbl_end - 1))) + position = (position + 2) % vtotal; + } else if (HAS_PCH_SPLIT(dev)) { /* * The scanline counter increments at the leading edge * of hsync, ie. it completely misses the active portion @@ -2770,10 +2782,9 @@ static void ibx_irq_postinstall(struct drm_device *dev) return; if (HAS_PCH_IBX(dev)) { - mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | - SDE_TRANSA_FIFO_UNDER | SDE_POISON; + mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; } else { - mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; + mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; I915_WRITE(SERR_INT, I915_READ(SERR_INT)); } @@ -2833,20 +2844,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev) display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | DE_PLANEB_FLIP_DONE_IVB | - DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | - DE_ERR_INT_IVB); + DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | - DE_PIPEA_VBLANK_IVB); + DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); } else { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | DE_AUX_CHANNEL_A | - DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | DE_POISON); - extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; + extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | + DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; } dev_priv->irq_mask = ~display_mask; @@ -2962,9 +2972,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) struct drm_device *dev = dev_priv->dev; uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | GEN8_PIPE_CDCLK_CRC_DONE | - GEN8_PIPE_FIFO_UNDERRUN | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; + uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | + GEN8_PIPE_FIFO_UNDERRUN; int pipe; dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index e06b9e017d6b..234ac5f7bc5a 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1244,6 +1244,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_panel_off(intel_dp); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9fa24347963a..9b8a7c7ea7fc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1092,12 +1092,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv, struct drm_device *dev = dev_priv->dev; bool cur_state; - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; - else if (IS_845G(dev) || IS_I865G(dev)) + if (IS_845G(dev) || IS_I865G(dev)) cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; - else + else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; + else + cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; WARN(cur_state != state, "cursor on pipe %c assertion failure (expected %s, current %s)\n", @@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev, if (ring->id == RCS) len += 6; + /* + * BSpec MI_DISPLAY_FLIP for IVB: + * "The full packet must be contained within the same cache line." + * + * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same + * cacheline, if we ever start emitting more commands before + * the MI_DISPLAY_FLIP we may need to first emit everything else, + * then do the cacheline alignment, and finally emit the + * MI_DISPLAY_FLIP. + */ + ret = intel_ring_cacheline_align(ring); + if (ret) + goto err_unpin; + ret = intel_ring_begin(ring, len); if (ret) goto err_unpin; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5ede4e8e290d..2688f6d64bb9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, int i, ret, recv_bytes; uint32_t status; int try, precharge, clock = 0; - bool has_aux_irq = true; + bool has_aux_irq = HAS_AUX_IRQ(dev); uint32_t timeout; /* dp aux is extremely sensitive to irq latency, hence request the @@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, uint8_t msg[20]; int msg_bytes; uint8_t ack; + int retry; if (WARN_ON(send_bytes > 16)) return -E2BIG; @@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, msg[3] = send_bytes - 1; memcpy(&msg[4], send, send_bytes); msg_bytes = send_bytes + 4; - for (;;) { + for (retry = 0; retry < 7; retry++) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); if (ret < 0) return ret; ack >>= 4; if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) - break; + return send_bytes; else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) - udelay(100); + usleep_range(400, 500); else return -EIO; } - return send_bytes; + + DRM_ERROR("too many retries, giving up\n"); + return -EIO; } /* Write a single byte to the aux channel in native mode */ @@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, int reply_bytes; uint8_t ack; int ret; + int retry; if (WARN_ON(recv_bytes > 19)) return -E2BIG; @@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, msg_bytes = 4; reply_bytes = recv_bytes + 1; - for (;;) { + for (retry = 0; retry < 7; retry++) { ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, reply, reply_bytes); if (ret == 0) @@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, return ret - 1; } else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) - udelay(100); + usleep_range(400, 500); else return -EIO; } + + DRM_ERROR("too many retries, giving up\n"); + return -EIO; } static int @@ -1242,17 +1249,24 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp) DRM_DEBUG_KMS("Turn eDP power off\n"); + WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); + pp = ironlake_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some * panels get very unhappy and cease to work. */ - pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); + pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); + intel_dp->want_panel_vdd = false; + ironlake_wait_panel_off(intel_dp); + + /* We got a reference when we enabled the VDD. */ + intel_runtime_pm_put(dev_priv); } void ironlake_edp_backlight_on(struct intel_dp *intel_dp) @@ -1632,7 +1646,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) val |= EDP_PSR_LINK_DISABLE; I915_WRITE(EDP_PSR_CTL(dev), val | - IS_BROADWELL(dev) ? 0 : link_entry_time | + (IS_BROADWELL(dev) ? 0 : link_entry_time) | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | EDP_PSR_ENABLE); @@ -1777,6 +1791,7 @@ static void intel_disable_dp(struct intel_encoder *encoder) /* Make sure the panel is off before trying to change the mode. But also * ensure that we have vdd while we switch off the panel. */ + ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); ironlake_edp_panel_off(intel_dp); @@ -1869,10 +1884,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpio_lock); - /* init power sequencer on this pipe and port */ - intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, - &power_seq); + if (is_edp(intel_dp)) { + /* init power sequencer on this pipe and port */ + intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, + &power_seq); + } intel_enable_dp(encoder); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 6db0d9d17f47..ee3181ebcc92 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -845,7 +845,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi) { struct drm_device *dev = intel_hdmi_to_dev(hdmi); - if (IS_G4X(dev)) + if (!hdmi->has_hdmi_sink || IS_G4X(dev)) return 165000; else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) return 300000; @@ -899,8 +899,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, * outputs. We also need to check that the higher clock still fits * within limits. */ - if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit - && HAS_PCH_SPLIT(dev)) { + if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && + clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); desired_bpp = 12*3; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index b1dc33f47899..d33b61d0dd33 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) algo->data = bus; } -/* - * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI - * mode. This results in spurious interrupt warnings if the legacy irq no. is - * shared with another device. The kernel then disables that interrupt source - * and so prevents the other device from working properly. - */ -#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) static int gmbus_wait_hw_status(struct drm_i915_private *dev_priv, u32 gmbus2_status, diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 4e960ec7419f..acde2945eb8a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -226,6 +226,8 @@ struct opregion_asle { #define ACPI_DIGITAL_OUTPUT (3<<8) #define ACPI_LVDS_OUTPUT (4<<8) +#define MAX_DSLP 1500 + #ifdef CONFIG_ACPI static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) { @@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) /* The spec says 2ms should be the default, but it's too small * for some machines. */ dslp = 50; - } else if (dslp > 500) { + } else if (dslp > MAX_DSLP) { /* Hey bios, trust must be earned. */ - WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp); - dslp = 500; + DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, " + "using %u ms instead\n", dslp, MAX_DSLP); + dslp = MAX_DSLP; } /* The spec tells us to do this, but we are the only user... */ diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 350de359123a..079ea38f14d9 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -698,7 +698,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector) freq /= 0xff; ctl = freq << 17; - if (IS_GEN2(dev) && panel->backlight.combination_mode) + if (panel->backlight.combination_mode) ctl |= BLM_LEGACY_MODE; if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) ctl |= BLM_POLARITY_PNV; @@ -979,7 +979,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector) ctl = I915_READ(BLC_PWM_CTL); - if (IS_GEN2(dev)) + if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev)) panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; if (IS_PINEVIEW(dev)) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d77cc81900f9..e1fc35a72656 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3493,6 +3493,8 @@ static void valleyview_setup_pctx(struct drm_device *dev) u32 pcbr; int pctx_size = 24*1024; + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + pcbr = I915_READ(VLV_PCBR); if (pcbr) { /* BIOS set it up already, grab the pre-alloc'd space */ @@ -3542,8 +3544,6 @@ static void valleyview_enable_rps(struct drm_device *dev) I915_WRITE(GTFIFODBG, gtfifodbg); } - valleyview_setup_pctx(dev); - /* If VLV, Forcewake all wells, else re-direct to regular path */ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); @@ -4395,6 +4395,8 @@ void intel_enable_gt_powersave(struct drm_device *dev) ironlake_enable_rc6(dev); intel_init_emon(dev); } else if (IS_GEN6(dev) || IS_GEN7(dev)) { + if (IS_VALLEYVIEW(dev)) + valleyview_setup_pctx(dev); /* * PCU communication is slow and this doesn't need to be * done at any specific time, so do this out of our fast path diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b7f1742caf87..31b36c5ac894 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring, return 0; } +/* Align the ring tail to a cacheline boundary */ +int intel_ring_cacheline_align(struct intel_ring_buffer *ring) +{ + int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t); + int ret; + + if (num_dwords == 0) + return 0; + + ret = intel_ring_begin(ring, num_dwords); + if (ret) + return ret; + + while (num_dwords--) + intel_ring_emit(ring, MI_NOOP); + + intel_ring_advance(ring); + + return 0; +} + void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 71a73f4fe252..0b243ce33714 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring, void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); +int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring); static inline void intel_ring_emit(struct intel_ring_buffer *ring, u32 data) { diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index f9adc27ef32a..13b7dd83faa9 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, * then the BO is being moved and we should * store up the damage until later. */ - if (!drm_can_sleep()) + if (drm_can_sleep()) ret = mgag200_bo_reserve(bo, true); if (ret) { if (ret != -EBUSY) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index b8583f275e80..968374776db9 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector, (mga_vga_calculate_mode_bandwidth(mode, bpp) > (32700 * 1024))) { return MODE_BANDWIDTH; - } else if (mode->type == G200_EH && + } else if (mdev->type == G200_EH && (mga_vga_calculate_mode_bandwidth(mode, bpp) > (37500 * 1024))) { return MODE_BANDWIDTH; - } else if (mode->type == G200_ER && + } else if (mdev->type == G200_ER && (mga_vga_calculate_mode_bandwidth(mode, bpp) > (55000 * 1024))) { return MODE_BANDWIDTH; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 1964f4f0d452..84c5b13b33c9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -39,6 +39,7 @@ struct mdp4_crtc { spinlock_t lock; bool stale; uint32_t width, height; + uint32_t x, y; /* next cursor to scan-out: */ uint32_t next_iova; @@ -57,9 +58,16 @@ struct mdp4_crtc { #define PENDING_FLIP 0x2 atomic_t pending; - /* the fb that we currently hold a scanout ref to: */ + /* the fb that we logically (from PoV of KMS API) hold a ref + * to. Which we may not yet be scanning out (we may still + * be scanning out previous in case of page_flip while waiting + * for gpu rendering to complete: + */ struct drm_framebuffer *fb; + /* the fb that we currently hold a scanout ref to: */ + struct drm_framebuffer *scanout_fb; + /* for unref'ing framebuffers after scanout completes: */ struct drm_flip_work unref_fb_work; @@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc) return to_mdp4_kms(to_mdp_kms(priv->kms)); } -static void update_fb(struct drm_crtc *crtc, bool async, - struct drm_framebuffer *new_fb) +static void request_pending(struct drm_crtc *crtc, uint32_t pending) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct drm_framebuffer *old_fb = mdp4_crtc->fb; - if (old_fb) - drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); + atomic_or(pending, &mdp4_crtc->pending); + mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); +} + +static void crtc_flush(struct drm_crtc *crtc) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); + uint32_t i, flush = 0; + + for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { + struct drm_plane *plane = mdp4_crtc->planes[i]; + if (plane) { + enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); + flush |= pipe2flush(pipe_id); + } + } + flush |= ovlp2flush(mdp4_crtc->ovlp); + + DBG("%s: flush=%08x", mdp4_crtc->name, flush); + + mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); +} + +static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct drm_framebuffer *old_fb = mdp4_crtc->fb; /* grab reference to incoming scanout fb: */ drm_framebuffer_reference(new_fb); mdp4_crtc->base.fb = new_fb; mdp4_crtc->fb = new_fb; - if (!async) { - /* enable vblank to pick up the old_fb */ - mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); - } + if (old_fb) + drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb); +} + +/* unlike update_fb(), take a ref to the new scanout fb *before* updating + * plane, then call this. Needed to ensure we don't unref the buffer that + * is actually still being scanned out. + * + * Note that this whole thing goes away with atomic.. since we can defer + * calling into driver until rendering is done. + */ +static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) +{ + struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + + /* flush updates, to make sure hw is updated to new scanout fb, + * so that we can safely queue unref to current fb (ie. next + * vblank we know hw is done w/ previous scanout_fb). + */ + crtc_flush(crtc); + + if (mdp4_crtc->scanout_fb) + drm_flip_work_queue(&mdp4_crtc->unref_fb_work, + mdp4_crtc->scanout_fb); + + mdp4_crtc->scanout_fb = fb; + + /* enable vblank to complete flip: */ + request_pending(crtc, PENDING_FLIP); } /* if file!=NULL, this is preclose potential cancel-flip path */ @@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) spin_unlock_irqrestore(&dev->event_lock, flags); } -static void crtc_flush(struct drm_crtc *crtc) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - uint32_t i, flush = 0; - - for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { - struct drm_plane *plane = mdp4_crtc->planes[i]; - if (plane) { - enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); - flush |= pipe2flush(pipe_id); - } - } - flush |= ovlp2flush(mdp4_crtc->ovlp); - - DBG("%s: flush=%08x", mdp4_crtc->name, flush); - - mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); -} - -static void request_pending(struct drm_crtc *crtc, uint32_t pending) -{ - struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - - atomic_or(pending, &mdp4_crtc->pending); - mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); -} - static void pageflip_cb(struct msm_fence_cb *cb) { struct mdp4_crtc *mdp4_crtc = @@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb) if (!fb) return; + drm_framebuffer_reference(fb); mdp4_plane_set_scanout(mdp4_crtc->plane, fb); - crtc_flush(crtc); - - /* enable vblank to complete flip: */ - request_pending(crtc, PENDING_FLIP); + update_scanout(crtc, fb); } static void unref_fb_worker(struct drm_flip_work *work, void *val) @@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, mode->vsync_end, mode->vtotal, mode->type, mode->flags); + /* grab extra ref for update_scanout() */ + drm_framebuffer_reference(crtc->fb); + + ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, + 0, 0, mode->hdisplay, mode->vdisplay, + x << 16, y << 16, + mode->hdisplay << 16, mode->vdisplay << 16); + if (ret) { + drm_framebuffer_unreference(crtc->fb); + dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", + mdp4_crtc->name, ret); + return ret; + } + mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); @@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc, mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); - update_fb(crtc, false, crtc->fb); - - ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb, - 0, 0, mode->hdisplay, mode->vdisplay, - x << 16, y << 16, - mode->hdisplay << 16, mode->vdisplay << 16); - if (ret) { - dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", - mdp4_crtc->name, ret); - return ret; - } - if (dma == DMA_E) { mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); } + update_fb(crtc, crtc->fb); + update_scanout(crtc, crtc->fb); + return 0; } @@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct drm_plane *plane = mdp4_crtc->plane; struct drm_display_mode *mode = &crtc->mode; + int ret; - update_fb(crtc, false, crtc->fb); + /* grab extra ref for update_scanout() */ + drm_framebuffer_reference(crtc->fb); - return mdp4_plane_mode_set(plane, crtc, crtc->fb, + ret = mdp4_plane_mode_set(plane, crtc, crtc->fb, 0, 0, mode->hdisplay, mode->vdisplay, x << 16, y << 16, mode->hdisplay << 16, mode->vdisplay << 16); + if (ret) { + drm_framebuffer_unreference(crtc->fb); + return ret; + } + + update_fb(crtc, crtc->fb); + update_scanout(crtc, crtc->fb); + + return 0; } static void mdp4_crtc_load_lut(struct drm_crtc *crtc) @@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc, mdp4_crtc->event = event; spin_unlock_irqrestore(&dev->event_lock, flags); - update_fb(crtc, true, new_fb); + update_fb(crtc, new_fb); return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); } @@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc, static void update_cursor(struct drm_crtc *crtc) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); + struct mdp4_kms *mdp4_kms = get_kms(crtc); enum mdp4_dma dma = mdp4_crtc->dma; unsigned long flags; spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); if (mdp4_crtc->cursor.stale) { - struct mdp4_kms *mdp4_kms = get_kms(crtc); struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; uint32_t iova = mdp4_crtc->cursor.next_iova; @@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc) mdp4_crtc->cursor.scanout_bo = next_bo; mdp4_crtc->cursor.stale = false; } + + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), + MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) | + MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y)); + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); } @@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, drm_gem_object_unreference_unlocked(old_bo); } + crtc_flush(crtc); request_pending(crtc, PENDING_CURSOR); return 0; @@ -542,12 +591,15 @@ fail: static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); - struct mdp4_kms *mdp4_kms = get_kms(crtc); - enum mdp4_dma dma = mdp4_crtc->dma; + unsigned long flags; - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), - MDP4_DMA_CURSOR_POS_X(x) | - MDP4_DMA_CURSOR_POS_Y(y)); + spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); + mdp4_crtc->cursor.x = x; + mdp4_crtc->cursor.y = y; + spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); + + crtc_flush(crtc); + request_pending(crtc, PENDING_CURSOR); return 0; } @@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, crtc = &mdp4_crtc->base; mdp4_crtc->plane = plane; + mdp4_crtc->id = id; mdp4_crtc->ovlp = ovlp_id; mdp4_crtc->dma = dma_id; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 2406027200ec..1e893dd13859 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane, MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe), - MDP4_PIPE_SRC_XY_X(crtc_x) | - MDP4_PIPE_SRC_XY_Y(crtc_y)); + MDP4_PIPE_DST_XY_X(crtc_x) | + MDP4_PIPE_DST_XY_Y(crtc_y)); mdp4_plane_set_scanout(plane, fb); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 71a3b2345eb3..f2794021f086 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc, x << 16, y << 16, mode->hdisplay << 16, mode->vdisplay << 16); if (ret) { + drm_framebuffer_unreference(crtc->fb); dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n", mdp5_crtc->name, ret); return ret; @@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 0, 0, mode->hdisplay, mode->vdisplay, x << 16, y << 16, mode->hdisplay << 16, mode->vdisplay << 16); + if (ret) { + drm_framebuffer_unreference(crtc->fb); + return ret; + } update_fb(crtc, crtc->fb); update_scanout(crtc, crtc->fb); - return ret; + return 0; } static void mdp5_crtc_load_lut(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index d8d60c969ac7..3da8264d3039 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, fail: if (obj) - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_unreference(obj); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5281d4bc37f7..5423e914e491 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -163,7 +163,7 @@ retry: /* if locking succeeded, pin bo: */ - ret = msm_gem_get_iova(&msm_obj->base, + ret = msm_gem_get_iova_locked(&msm_obj->base, submit->gpu->id, &iova); /* this would break the logic in the fail path.. there is no @@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob /* For now, just map the entire thing. Eventually we probably * to do it page-by-page, w/ kmap() if not vmap()d.. */ - ptr = msm_gem_vaddr(&obj->base); + ptr = msm_gem_vaddr_locked(&obj->base); if (IS_ERR(ptr)) { ret = PTR_ERR(ptr); @@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail) { unsigned i; - mutex_lock(&submit->dev->struct_mutex); for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; submit_unlock_unpin_bo(submit, i); list_del_init(&msm_obj->submit_entry); drm_gem_object_unreference(&msm_obj->base); } - mutex_unlock(&submit->dev->struct_mutex); ww_acquire_fini(&submit->ticket); kfree(submit); @@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (args->nr_cmds > MAX_CMDS) return -EINVAL; + mutex_lock(&dev->struct_mutex); + submit = submit_create(dev, gpu, args->nr_bos); if (!submit) { ret = -ENOMEM; @@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, out: if (submit) submit_cleanup(submit, !!ret); + mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 4ebce8be489d..0cfe3f426ee4 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_drm_private *priv = dev->dev_private; int i, ret; - mutex_lock(&dev->struct_mutex); - submit->fence = ++priv->next_fence; gpu->submitted_fence = submit->fence; @@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); } hangcheck_timer_reset(gpu); - mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index e88145ba1bf5..d310c195bdfe 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -141,6 +141,7 @@ nouveau-y += core/subdev/mc/base.o nouveau-y += core/subdev/mc/nv04.o nouveau-y += core/subdev/mc/nv40.o nouveau-y += core/subdev/mc/nv44.o +nouveau-y += core/subdev/mc/nv4c.o nouveau-y += core/subdev/mc/nv50.o nouveau-y += core/subdev/mc/nv94.o nouveau-y += core/subdev/mc/nv98.o diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c index 1b653dd74a70..08b88591ed60 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c @@ -311,7 +311,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; @@ -334,7 +334,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass; @@ -357,7 +357,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; @@ -380,7 +380,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; @@ -403,7 +403,7 @@ nv40_identify(struct nouveau_device *device) device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass; - device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass; device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass; device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c index 940eaa5d8b9a..9ad722e4e087 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c @@ -1142,7 +1142,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head) if (conf != ~0) { if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) { u32 soff = (ffs(outp.or) - 1) * 0x08; - u32 ctrl = nv_rd32(priv, 0x610798 + soff); + u32 ctrl = nv_rd32(priv, 0x610794 + soff); u32 datarate; switch ((ctrl & 0x000f0000) >> 16) { diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 9a850fe19515..54c1b5b471cd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -112,7 +112,7 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine) nv_wr32(priv, 0x002270, cur->addr >> 12); nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); - if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) + if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000)) nv_error(priv, "runlist %d update timeout\n", engine); mutex_unlock(&nv_subdev(priv)->mutex); } diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c index 30ed19c52e05..7a367c402978 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c @@ -539,7 +539,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old, ustatus &= ~0x04030000; } if (ustatus && display) { - nv_error("%s - TP%d:", name, i); + nv_error(priv, "%s - TP%d:", name, i); nouveau_bitfield_print(nv50_mpc_traps, ustatus); pr_cont("\n"); ustatus = 0; diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index adc88b73d911..3c6738edd127 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h @@ -47,6 +47,7 @@ struct nouveau_mc_oclass { extern struct nouveau_oclass *nv04_mc_oclass; extern struct nouveau_oclass *nv40_mc_oclass; extern struct nouveau_oclass *nv44_mc_oclass; +extern struct nouveau_oclass *nv4c_mc_oclass; extern struct nouveau_oclass *nv50_mc_oclass; extern struct nouveau_oclass *nv94_mc_oclass; extern struct nouveau_oclass *nv98_mc_oclass; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index aa0fbbec7f08..ef0c9c4a8cc3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -130,6 +130,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) u16 pcir; int i; + /* there is no prom on nv4x IGP's */ + if (device->card_type == NV_40 && device->chipset >= 0x4c) + return; + /* enable access to rom */ if (device->card_type >= NV_50) pcireg = 0x088050; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c index 9159a5ccee93..265d1253624a 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c @@ -36,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) { .fini = _nouveau_fb_fini, }, .base.memtype = nv04_fb_memtype_valid, - .base.ram = &nv10_ram_oclass, + .base.ram = &nv1a_ram_oclass, .tile.regions = 8, .tile.init = nv10_fb_tile_init, .tile.fini = nv10_fb_tile_fini, diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h index b0d5c31606c1..81a408e7d034 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h @@ -14,6 +14,7 @@ int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *, extern const struct nouveau_mc_intr nv04_mc_intr[]; int nv04_mc_init(struct nouveau_object *); void nv40_mc_msi_rearm(struct nouveau_mc *); +int nv44_mc_init(struct nouveau_object *object); int nv50_mc_init(struct nouveau_object *); extern const struct nouveau_mc_intr nv50_mc_intr[]; extern const struct nouveau_mc_intr nvc0_mc_intr[]; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c index 3bfee5c6c4f2..cc4d0d2d886e 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c @@ -24,7 +24,7 @@ #include "nv04.h" -static int +int nv44_mc_init(struct nouveau_object *object) { struct nv04_mc_priv *priv = (void *)object; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c new file mode 100644 index 000000000000..a75c35ccf25c --- /dev/null +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c @@ -0,0 +1,45 @@ +/* + * Copyright 2014 Ilia Mirkin + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ilia Mirkin + */ + +#include "nv04.h" + +static void +nv4c_mc_msi_rearm(struct nouveau_mc *pmc) +{ + struct nv04_mc_priv *priv = (void *)pmc; + nv_wr08(priv, 0x088050, 0xff); +} + +struct nouveau_oclass * +nv4c_mc_oclass = &(struct nouveau_mc_oclass) { + .base.handle = NV_SUBDEV(MC, 0x4c), + .base.ofuncs = &(struct nouveau_ofuncs) { + .ctor = nv04_mc_ctor, + .dtor = _nouveau_mc_dtor, + .init = nv44_mc_init, + .fini = _nouveau_mc_fini, + }, + .intr = nv04_mc_intr, + .msi_rearm = nv4c_mc_msi_rearm, +}.base; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 4ef83df2b246..83face3f608f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -106,6 +106,29 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t * return 0; } +/* + * On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special + * requirements on the fourth parameter, so a private implementation + * instead of using acpi_check_dsm(). + */ +static int nouveau_check_optimus_dsm(acpi_handle handle) +{ + int result; + + /* + * Function 0 returns a Buffer containing available functions. + * The args parameter is ignored for function 0, so just put 0 in it + */ + if (nouveau_optimus_dsm(handle, 0, 0, &result)) + return 0; + + /* + * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. + * If the n-th bit is enabled, function n is supported + */ + return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS); +} + static int nouveau_dsm(acpi_handle handle, int func, int arg) { int ret = 0; @@ -207,8 +230,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) 1 << NOUVEAU_DSM_POWER)) retval |= NOUVEAU_DSM_HAS_MUX; - if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100, - 1 << NOUVEAU_DSM_OPTIMUS_CAPS)) + if (nouveau_check_optimus_dsm(dhandle)) retval |= NOUVEAU_DSM_HAS_OPT; if (retval & NOUVEAU_DSM_HAS_OPT) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 488686d490c0..4aed1714b9ab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1249,7 +1249,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) mem->bus.is_iomem = !dev->agp->cant_use_aperture; } #endif - if (!node->memtype) + if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) /* untiled */ break; /* fallthrough, tiled memory */ diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 78c8e7146d56..89c484d8ac26 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -376,6 +376,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) if (ret) goto fail_device; + dev->irq_enabled = true; + /* workaround an odd issue on nvc1 by disabling the device's * nosnoop capability. hopefully won't cause issues until a * better fix is found - assuming there is one... @@ -475,6 +477,7 @@ nouveau_drm_remove(struct pci_dev *pdev) struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_object *device; + dev->irq_enabled = false; device = drm->client.base.device; drm_put_dev(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index 81638d7f2eff..471347edc27e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -14,7 +14,9 @@ nouveau_vga_set_decode(void *priv, bool state) { struct nouveau_device *device = nouveau_dev(priv); - if (device->chipset >= 0x40) + if (device->card_type == NV_40 && device->chipset >= 0x4c) + nv_wr32(device, 0x088060, state); + else if (device->chipset >= 0x40) nv_wr32(device, 0x088054, state); else nv_wr32(device, 0x001854, state); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a9338c85630f..daa4dd375ab1 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -559,7 +559,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, u32 adjusted_clock = mode->clock; int encoder_mode = atombios_get_encoder_mode(encoder); u32 dp_clock = mode->clock; - int bpc = radeon_get_monitor_bpc(connector); + int bpc = radeon_crtc->bpc; bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock); /* reset the pll flags */ @@ -1176,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); /* Set NUM_BANKS. */ - if (rdev->family >= CHIP_BONAIRE) { + if (rdev->family >= CHIP_TAHITI) { unsigned tileb, index, num_banks, tile_split_bytes; /* Calculate the macrotile mode index. */ @@ -1194,13 +1194,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, return -EINVAL; } - num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; + if (rdev->family >= CHIP_BONAIRE) + num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; + else + num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); } else { - /* SI and older. */ - if (rdev->family >= CHIP_TAHITI) - tmp = rdev->config.si.tile_config; - else if (rdev->family >= CHIP_CAYMAN) + /* NI and older. */ + if (rdev->family >= CHIP_CAYMAN) tmp = rdev->config.cayman.tile_config; else tmp = rdev->config.evergreen.tile_config; @@ -1773,6 +1774,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) return ATOM_PPLL1; DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; + } else if (ASIC_IS_DCE41(rdev)) { + /* Don't share PLLs on DCE4.1 chips */ + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { + if (rdev->clock.dp_extclk) + /* skip PPLL programming if using ext clock */ + return ATOM_PPLL_INVALID; + } + pll_in_use = radeon_get_pll_use_mask(crtc); + if (!(pll_in_use & (1 << ATOM_PPLL1))) + return ATOM_PPLL1; + if (!(pll_in_use & (1 << ATOM_PPLL2))) + return ATOM_PPLL2; + DRM_ERROR("unable to allocate a PPLL\n"); + return ATOM_PPLL_INVALID; } else if (ASIC_IS_DCE4(rdev)) { /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, * depending on the asic: @@ -1800,7 +1815,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) if (pll != ATOM_PPLL_INVALID) return pll; } - } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */ + } else { /* use the same PPLL for all monitors with the same clock */ pll = radeon_get_shared_nondp_ppll(crtc); if (pll != ATOM_PPLL_INVALID) diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index a42d61571f49..607dc14d195e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -464,11 +464,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) static u8 radeon_atom_get_bpc(struct drm_encoder *encoder) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); int bpc = 8; - if (connector) - bpc = radeon_get_monitor_bpc(connector); + if (encoder->crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + bpc = radeon_crtc->bpc; + } switch (bpc) { case 0: @@ -1313,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t } if (is_dp) args.v5.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v5.ucLaneNum = 8; else args.v5.ucLaneNum = 4; diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0fbd36f3d4e9..ea103ccdf4bd 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -29,6 +29,7 @@ #include "cypress_dpm.h" #include "btc_dpm.h" #include "atom.h" +#include <linux/seq_file.h> #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev) r600_free_extended_power_table(rdev); } +void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; + struct rv7xx_ps *ps = rv770_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> + CURRENT_PROFILE_INDEX_SHIFT; + + if (current_index > 2) { + seq_printf(m, "invalid dpm profile %d\n", current_index); + } else { + if (current_index == 0) + pl = &ps->low; + else if (current_index == 1) + pl = &ps->medium; + else /* current_index == 2 */ + pl = &ps->high; + seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + if (rdev->family >= CHIP_CEDAR) { + seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", + current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); + } else { + seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n", + current_index, pl->sclk, pl->mclk, pl->vddc); + } + } +} + u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) { struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h index 29e32de7e025..9c65be2d55a9 100644 --- a/drivers/gpu/drm/radeon/btcd.h +++ b/drivers/gpu/drm/radeon/btcd.h @@ -44,6 +44,10 @@ # define DYN_SPREAD_SPECTRUM_EN (1 << 23) # define AC_DC_SW (1 << 24) +#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c +# define CURRENT_PROFILE_INDEX_MASK (0xf << 4) +# define CURRENT_PROFILE_INDEX_SHIFT 4 + #define CG_BIF_REQ_AND_RSP 0x7f4 #define CG_CLIENT_REQ(x) ((x) << 0) #define CG_CLIENT_REQ_MASK (0xff << 0) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e6419ca7cd37..bbb17841a9e5 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width) } /** - * cik_select_se_sh - select which SE, SH to address + * cik_get_rb_disabled - computes the mask of disabled RBs * * @rdev: radeon_device pointer * @max_rb_num: max RBs (render backends) for the asic @@ -4134,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) { if (enable) WREG32(CP_MEC_CNTL, 0); - else + else { WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); + rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; + } udelay(50); } @@ -7902,7 +7905,8 @@ int cik_resume(struct radeon_device *rdev) /* init golden registers */ cik_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = cik_startup(rdev); diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 1ecb3f1070e3..94626ea90fa5 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); } + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; } /** @@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable) u32 me_cntl, reg_offset; int i; + if (enable == false) { + cik_sdma_gfx_stop(rdev); + cik_sdma_rlc_stop(rdev); + } + for (i = 0; i < 2; i++) { if (i == 0) reg_offset = SDMA0_REGISTER_OFFSET; @@ -420,10 +427,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev) if (!rdev->sdma_fw) return -EINVAL; - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); - /* halt the MEs */ cik_sdma_enable(rdev, false); @@ -492,9 +495,6 @@ int cik_sdma_resume(struct radeon_device *rdev) */ void cik_sdma_fini(struct radeon_device *rdev) { - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); /* halt the MEs */ cik_sdma_enable(rdev, false); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 713a5d359901..94e858751994 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -278,13 +278,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev) return !ASIC_IS_NODCE(rdev); } -static void dce6_audio_enable(struct radeon_device *rdev, - struct r600_audio_pin *pin, - bool enable) +void dce6_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + bool enable) { + if (!pin) + return; + WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL, - AUDIO_ENABLED); - DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); + enable ? AUDIO_ENABLED : 0); } static const u32 pin_offsets[7] = @@ -323,7 +325,8 @@ int dce6_audio_init(struct radeon_device *rdev) rdev->audio.pin[i].connected = false; rdev->audio.pin[i].offset = pin_offsets[i]; rdev->audio.pin[i].id = i; - dce6_audio_enable(rdev, &rdev->audio.pin[i], true); + /* disable audio. it will be set up later */ + dce6_audio_enable(rdev, &rdev->audio.pin[i], false); } return 0; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f2b9e21ce4da..27b0ff16082e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1680,7 +1680,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) case RADEON_HPD_6: if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) connected = true; - break; + break; default: break; } @@ -5299,7 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev) /* init golden registers */ evergreen_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = evergreen_startup(rdev); @@ -5475,9 +5476,9 @@ void evergreen_fini(struct radeon_device *rdev) radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - evergreen_pcie_gart_fini(rdev); uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + evergreen_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 0c6d5cef4cf1..05b0c95813fd 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -306,6 +306,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode return; offset = dig->afmt->offset; + /* disable audio prior to setting up hw */ + if (ASIC_IS_DCE6(rdev)) { + dig->afmt->pin = dce6_audio_get_pin(rdev); + dce6_audio_enable(rdev, dig->afmt->pin, false); + } else { + dig->afmt->pin = r600_audio_get_pin(rdev); + r600_audio_enable(rdev, dig->afmt->pin, false); + } + evergreen_audio_set_dto(encoder, mode->clock); WREG32(HDMI_VBI_PACKET_CONTROL + offset, @@ -409,12 +418,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001); WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001); + + /* enable audio after to setting up hw */ + if (ASIC_IS_DCE6(rdev)) + dce6_audio_enable(rdev, dig->afmt->pin, true); + else + r600_audio_enable(rdev, dig->afmt->pin, true); } void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) { - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; @@ -427,15 +440,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) if (!enable && !dig->afmt->enabled) return; - if (enable) { - if (ASIC_IS_DCE6(rdev)) - dig->afmt->pin = dce6_audio_get_pin(rdev); - else - dig->afmt->pin = r600_audio_get_pin(rdev); - } else { - dig->afmt->pin = NULL; - } - dig->afmt->enabled = enable; DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h index 76ada8cfe902..3a03ba37d043 100644 --- a/drivers/gpu/drm/radeon/evergreen_smc.h +++ b/drivers/gpu/drm/radeon/evergreen_smc.h @@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 -#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0 +#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8 #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index b6e01d5d2cce..351db361239d 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev) int kv_dpm_late_enable(struct radeon_device *rdev) { - int ret; + int ret = 0; if (rdev->irq.installed && r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ea932ac66fc6..bf6300cfd62d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2105,7 +2105,8 @@ int cayman_resume(struct radeon_device *rdev) /* init golden registers */ ni_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = cayman_startup(rdev); diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index c351226ecb31..ca814276b075 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -2588,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev, if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) enable_sq_ramping = false; - if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { @@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, struct rv7xx_power_info *pi = rv770_get_pi(rdev); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct ni_ps *ps = ni_get_ps(rps); - u16 vddc; struct rv7xx_pl *pl = &ps->performance_levels[index]; ps->performance_level_count = index + 1; @@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev, /* patch up vddc if necessary */ if (pl->vddc == 0xff01) { - if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) - pl->vddc = vddc; + if (pi->max_vddc) + pl->vddc = pi->max_vddc; } if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { @@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev, void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m) { - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; struct ni_ps *ps = ni_get_ps(rps); struct rv7xx_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index ef024ce3f7cc..3cc78bb66042 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3942,8 +3942,6 @@ int r100_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r100_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 7c63ef840e86..0b658b34b33a 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1430,8 +1430,6 @@ int r300_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r300_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 3768aab2710b..802b19220a21 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -325,8 +325,6 @@ int r420_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r420_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index e209eb75024f..98d6053c36c6 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -240,8 +240,6 @@ int r520_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r520_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 56140b4e5bb2..647ef4079217 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2968,7 +2968,8 @@ int r600_resume(struct radeon_device *rdev) /* post card */ atom_asic_init(rdev->mode_info.atom_context); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = r600_startup(rdev); @@ -3991,6 +3992,10 @@ restart_ih: break; } break; + case 124: /* UVD */ + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); + break; case 176: /* CP_INT in ring buffer */ case 177: /* CP_INT in IB1 */ case 178: /* CP_INT in IB2 */ diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 47fc2b886979..bffac10c4296 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work) } /* enable the audio stream */ -static void r600_audio_enable(struct radeon_device *rdev, - struct r600_audio_pin *pin, - bool enable) +void r600_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + bool enable) { u32 value = 0; + if (!pin) + return; + if (ASIC_IS_DCE4(rdev)) { if (enable) { value |= 0x81000000; /* Required to enable audio */ @@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev, WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000); } - DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id); } /* @@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev) rdev->audio.pin[0].status_bits = 0; rdev->audio.pin[0].category_code = 0; rdev->audio.pin[0].id = 0; - - r600_audio_enable(rdev, &rdev->audio.pin[0], true); + /* disable audio. it will be set up later */ + r600_audio_enable(rdev, &rdev->audio.pin[0], false); return 0; } diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 7b399dc5fd54..2812c7d1ae6f 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case R_008C64_SQ_VSTMP_RING_SIZE: case R_0288C8_SQ_GS_VERT_ITEMSIZE: /* get value to populate the IB don't remove */ - tmp =radeon_get_ib_value(p, idx); - ib[idx] = 0; + /*tmp =radeon_get_ib_value(p, idx); + ib[idx] = 0;*/ + break; + case SQ_ESGS_RING_BASE: + case SQ_GSVS_RING_BASE: + case SQ_ESTMP_RING_BASE: + case SQ_GSTMP_RING_BASE: + case SQ_PSTMP_RING_BASE: + case SQ_VSTMP_RING_BASE: + r = radeon_cs_packet_next_reloc(p, &reloc, 0); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); break; case SQ_CONFIG: track->sq_config = radeon_get_ib_value(p, idx); diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 3016fc14f502..85a2bb28aed2 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -329,9 +329,6 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) u8 *sadb; int sad_count; - /* XXX: setting this register causes hangs on some asics */ - return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { if (connector->encoder == encoder) { radeon_connector = to_radeon_connector(connector); @@ -460,6 +457,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod return; offset = dig->afmt->offset; + /* disable audio prior to setting up hw */ + dig->afmt->pin = r600_audio_get_pin(rdev); + r600_audio_enable(rdev, dig->afmt->pin, false); + r600_audio_set_dto(encoder, mode->clock); WREG32(HDMI0_VBI_PACKET_CONTROL + offset, @@ -531,6 +532,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); r600_hdmi_audio_workaround(encoder); + + /* enable audio after to setting up hw */ + r600_audio_enable(rdev, dig->afmt->pin, true); } /* @@ -651,11 +655,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable) if (!enable && !dig->afmt->enabled) return; - if (enable) - dig->afmt->pin = r600_audio_get_pin(rdev); - else - dig->afmt->pin = NULL; - /* Older chipsets require setting HDMI and routing manually */ if (!ASIC_IS_DCE3(rdev)) { if (enable) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 4a8ac1cd6b4c..e887d027b6d0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -135,6 +135,9 @@ extern int radeon_hard_reset; /* R600+ */ #define R600_RING_TYPE_UVD_INDEX 5 +/* number of hw syncs before falling back on blocking */ +#define RADEON_NUM_SYNCS 4 + /* hardcode those limit for now */ #define RADEON_VA_IB_OFFSET (1 << 20) #define RADEON_VA_RESERVED_SIZE (8 << 20) @@ -554,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, /* * Semaphores. */ -/* everything here is constant */ struct radeon_semaphore { struct radeon_sa_bo *sa_bo; signed waiters; @@ -2745,6 +2747,12 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, void r600_audio_update_hdmi(struct work_struct *work); struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev); struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); +void r600_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + bool enable); +void dce6_audio_enable(struct radeon_device *rdev, + struct r600_audio_pin *pin, + bool enable); /* * R600 vram scratch functions diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index f74db43346fd..dda02bfc10a4 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = { .get_sclk = &btc_dpm_get_sclk, .get_mclk = &btc_dpm_get_mclk, .print_power_state = &rv770_dpm_print_power_state, - .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, + .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level, .force_performance_level = &rv770_dpm_force_performance_level, .vblank_too_short = &btc_dpm_vblank_too_short, }, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index b3bc433eed4c..ae637cfda783 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev); u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low); u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low); bool btc_dpm_vblank_too_short(struct radeon_device *rdev); +void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, + struct seq_file *m); int sumo_dpm_init(struct radeon_device *rdev); int sumo_dpm_enable(struct radeon_device *rdev); int sumo_dpm_late_enable(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 485848f889f5..fa9a9c02751e 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -219,7 +219,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx) memcpy(&output, info->buffer.pointer, size); /* TODO: check version? */ - printk("ATPX version %u\n", output.version); + printk("ATPX version %u, functions 0x%08x\n", + output.version, output.function_bits); radeon_atpx_parse_functions(&atpx->functions, output.function_bits); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b012cbbc3ed5..044bc98fb459 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1521,13 +1521,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (r) DRM_ERROR("ib ring test failed (%d).\n", r); - if (rdev->pm.dpm_enabled) { + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { /* do dpm late init */ r = radeon_pm_late_init(rdev); if (r) { rdev->pm.dpm_enabled = false; DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); } + } else { + /* resume old pm late */ + radeon_pm_resume(rdev); } radeon_restore_bios_scratch_regs(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d680608f6f5b..fbd8b930f2be 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -571,6 +571,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index) radeon_crtc->max_cursor_width = CURSOR_WIDTH; radeon_crtc->max_cursor_height = CURSOR_HEIGHT; } + dev->mode_config.cursor_width = radeon_crtc->max_cursor_width; + dev->mode_config.cursor_height = radeon_crtc->max_cursor_height; #if 0 radeon_crtc->mode_set.crtc = &radeon_crtc->base; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index ec8c388eec17..84a1bbb75f91 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -78,9 +78,10 @@ * 2.34.0 - Add CIK tiling mode array query * 2.35.0 - Add CIK macrotile mode array query * 2.36.0 - Fix CIK DCE tiling setup + * 2.37.0 - allow GS ring setup on r6xx/r7xx */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 36 +#define KMS_DRIVER_MINOR 37 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 114d1672d616..66ed3ea71440 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -33,6 +33,13 @@ #include <linux/vga_switcheroo.h> #include <linux/slab.h> #include <linux/pm_runtime.h> + +#if defined(CONFIG_VGA_SWITCHEROO) +bool radeon_is_px(void); +#else +static inline bool radeon_is_px(void) { return false; } +#endif + /** * radeon_driver_unload_kms - Main unload function for KMS. * @@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) "Error during ACPI methods call\n"); } - if (radeon_runtime_pm != 0) { + if ((radeon_runtime_pm == 1) || + ((radeon_runtime_pm == -1) && radeon_is_px())) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); @@ -537,6 +545,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) radeon_vm_init(rdev, &fpriv->vm); + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (r) + return r; + /* map the ib pool buffer read only into * virtual address space */ bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, @@ -544,6 +556,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); + + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); if (r) { radeon_vm_fini(rdev, &fpriv->vm); kfree(fpriv); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 1b783f0e6d3a..15e44a7281ab 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, } /* 64 dwords should be enough for fence too */ - r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8); + r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8); if (r) { dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 2b42aa1914f2..9006b32d5eed 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -34,14 +34,15 @@ int radeon_semaphore_create(struct radeon_device *rdev, struct radeon_semaphore **semaphore) { + uint32_t *cpu_addr; int i, r; *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); if (*semaphore == NULL) { return -ENOMEM; } - r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, - &(*semaphore)->sa_bo, 8, 8, true); + r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, + 8 * RADEON_NUM_SYNCS, 8, true); if (r) { kfree(*semaphore); *semaphore = NULL; @@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev, } (*semaphore)->waiters = 0; (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); - *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; + + cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); + for (i = 0; i < RADEON_NUM_SYNCS; ++i) + cpu_addr[i] = 0; for (i = 0; i < RADEON_NUM_RINGS; ++i) (*semaphore)->sync_to[i] = NULL; @@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, int ring) { + unsigned count = 0; int i, r; for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -140,6 +145,12 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, return -EINVAL; } + if (++count > RADEON_NUM_SYNCS) { + /* not enough room, wait manually */ + radeon_fence_wait_locked(fence); + continue; + } + /* allocate enough space for sync command */ r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); if (r) { @@ -164,6 +175,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev, radeon_ring_commit(rdev, &rdev->ring[i]); radeon_fence_note_sync(fence, ring); + + semaphore->gpu_addr += 8; } return 0; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 77f5b0c3edb8..040a2a10ea17 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -714,6 +714,9 @@ int radeon_ttm_init(struct radeon_device *rdev) DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } + /* Change the size here instead of the init above so only lpfn is affected */ + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->stollen_vga_memory); @@ -935,7 +938,7 @@ static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, while (size) { loff_t p = *pos / PAGE_SIZE; unsigned off = *pos & ~PAGE_MASK; - ssize_t cur_size = min(size, PAGE_SIZE - off); + size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); struct page *page; void *ptr; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 6781fee1eaad..3e6804b2b2ef 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -171,6 +171,8 @@ void radeon_uvd_fini(struct radeon_device *rdev) radeon_bo_unref(&rdev->uvd.vcpu_bo); + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]); + release_firmware(rdev->uvd_fw); } diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 index 20bfbda7b3f1..ec0c6829c1dc 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -18,6 +18,7 @@ r600 0x9400 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL 0x00028A40 VGT_GS_MODE 0x00028A6C VGT_GS_OUT_PRIM_TYPE +0x00028B38 VGT_GS_MAX_VERT_OUT 0x000088C8 VGT_GS_PER_ES 0x000088E8 VGT_GS_PER_VS 0x000088D4 VGT_GS_VERTEX_REUSE diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index b5c2369cda2f..130d5cc50d43 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -474,8 +474,6 @@ int rs400_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs400_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index fdcde7693032..72d3616de08e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -1048,8 +1048,6 @@ int rs600_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs600_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 35950738bd5e..3462b64369bf 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -756,8 +756,6 @@ int rs690_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs690_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 98e8138ff779..237dd29d9f1c 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -586,8 +586,6 @@ int rv515_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rv515_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 6c772e58c784..fef310773aad 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1811,7 +1811,8 @@ int rv770_resume(struct radeon_device *rdev) /* init golden registers */ rv770_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = rv770_startup(rdev); @@ -1955,9 +1956,9 @@ void rv770_fini(struct radeon_device *rdev) radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - rv770_pcie_gart_fini(rdev); uvd_v1_0_fini(rdev); radeon_uvd_fini(rdev); + rv770_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 80c595aba359..b5f63f5e22a3 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct rv7xx_ps *ps = rv770_get_ps(rps); u32 sclk, mclk; - u16 vddc; struct rv7xx_pl *pl; switch (index) { @@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev, /* patch up vddc if necessary */ if (pl->vddc == 0xff01) { - if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0) - pl->vddc = vddc; + if (pi->max_vddc) + pl->vddc = pi->max_vddc; } if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { @@ -2527,14 +2526,7 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low) bool rv770_dpm_vblank_too_short(struct radeon_device *rdev) { u32 vblank_time = r600_dpm_get_vblank_time(rdev); - u32 switch_limit = 300; - - /* quirks */ - /* ASUS K70AF */ - if ((rdev->pdev->device == 0x9553) && - (rdev->pdev->subsystem_vendor == 0x1043) && - (rdev->pdev->subsystem_device == 0x1c42)) - switch_limit = 200; + u32 switch_limit = 200; /* 300 */ /* RV770 */ /* mclk switching doesn't seem to work reliably on desktop RV770s */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 09ec4f6c53bb..9a124d0608b3 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6338,6 +6338,10 @@ restart_ih: break; } break; + case 124: /* UVD */ + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); + break; case 146: case 147: addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); @@ -6614,7 +6618,8 @@ int si_resume(struct radeon_device *rdev) /* init golden registers */ si_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = si_startup(rdev); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 0471501338fb..0a2f5b4bca43 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2395,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev, if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) enable_sq_ramping = false; - if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) enable_sq_ramping = false; for (i = 0; i < state->performance_level_count; i++) { @@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev) void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m) { - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); + struct radeon_ps *rps = &eg_pi->current_rps; struct ni_ps *ps = ni_get_ps(rps); struct rv7xx_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index f121efe12dc5..8b47b3cd0357 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev struct seq_file *m) { struct sumo_power_info *pi = sumo_get_pi(rdev); - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct radeon_ps *rps = &pi->current_rps; struct sumo_ps *ps = sumo_get_ps(rps); struct sumo_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 2d447192d6f7..2da0e17eb960 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev, void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m) { - struct radeon_ps *rps = rdev->pm.dpm.current_ps; + struct trinity_power_info *pi = trinity_get_pi(rdev); + struct radeon_ps *rps = &pi->current_rps; struct trinity_ps *ps = trinity_get_ps(rps); struct trinity_pl *pl; u32 current_index = diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 824550db3fed..d1771004cb52 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c @@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, radeon_ring_write(ring, 0); radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); radeon_ring_write(ring, 2); - return; } /** diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 88a529008ce0..c71594754f46 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -104,7 +104,7 @@ static void tegra_drm_context_free(struct tegra_drm_context *context) static void tegra_drm_lastclose(struct drm_device *drm) { -#ifdef CONFIG_TEGRA_DRM_FBDEV +#ifdef CONFIG_DRM_TEGRA_FBDEV struct tegra_drm *tegra = drm->dev_private; tegra_fbdev_restore_mode(tegra->fbdev); diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index 338f7f6561d7..0266fb40479e 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -15,6 +15,7 @@ struct tegra_rgb { struct tegra_output output; struct tegra_dc *dc; + bool enabled; struct clk *clk_parent; struct clk *clk; @@ -89,6 +90,9 @@ static int tegra_output_rgb_enable(struct tegra_output *output) struct tegra_rgb *rgb = to_rgb(output); unsigned long value; + if (rgb->enabled) + return 0; + tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable)); value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL; @@ -122,6 +126,8 @@ static int tegra_output_rgb_enable(struct tegra_output *output) tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); + rgb->enabled = true; + return 0; } @@ -130,6 +136,9 @@ static int tegra_output_rgb_disable(struct tegra_output *output) struct tegra_rgb *rgb = to_rgb(output); unsigned long value; + if (!rgb->enabled) + return 0; + value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL); value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE | PW4_ENABLE | PM0_ENABLE | PM1_ENABLE); @@ -144,6 +153,8 @@ static int tegra_output_rgb_disable(struct tegra_output *output) tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable)); + rgb->enabled = false; + return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 3302f99e7497..764be36397fd 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -126,6 +126,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, agp_be->ttm.func = &ttm_agp_func; if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { + kfree(agp_be); return NULL; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a06651309388..214b7992a3aa 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, moved: if (bo->evicted) { - ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); - if (ret) - pr_err("Can not flush read caches\n"); + if (bdev->driver->invalidate_caches) { + ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); + if (ret) + pr_err("Can not flush read caches\n"); + } bo->evicted = false; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 801231c9ae48..0ce48e5a9cb4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, vma->vm_private_data = bo; /* - * PFNMAP is faster than MIXEDMAP due to reduced page - * administration. So use MIXEDMAP only if private VMA, where - * we need to support COW. + * We'd like to use VM_PFNMAP on shared mappings, where + * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, + * but for some reason VM_PFNMAP + x86 PAT + write-combine is very + * bad for performance. Until that has been sorted out, use + * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 */ - vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; + vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; return 0; out_unref: @@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) vma->vm_ops = &ttm_bo_vm_ops; vma->vm_private_data = ttm_bo_reference(bo); - vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; + vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags |= VM_IO | VM_DONTEXPAND; return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 37079859afc8..53b51c4e671a 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, if (ret == 0) { ref = drm_hash_entry(hash, struct ttm_ref_object, hash); - if (!kref_get_unless_zero(&ref->kref)) { + if (kref_get_unless_zero(&ref->kref)) { rcu_read_unlock(); break; } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 9af99084b344..75f319090043 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm) pgoff_t i; struct page **page = ttm->pages; + if (ttm->page_flags & TTM_PAGE_FLAG_SG) + return; + for (i = 0; i < ttm->num_pages; ++i) { (*page)->mapping = NULL; (*page++)->index = 0; diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h index d95335cb90bd..f58dc7dd15c5 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h @@ -261,12 +261,7 @@ typedef enum SVGA3dSurfaceFormat { /* Planar video formats. */ SVGA3D_YV12 = 121, - /* Shader constant formats. */ - SVGA3D_SURFACE_SHADERCONST_FLOAT = 122, - SVGA3D_SURFACE_SHADERCONST_INT = 123, - SVGA3D_SURFACE_SHADERCONST_BOOL = 124, - - SVGA3D_FORMAT_MAX = 125, + SVGA3D_FORMAT_MAX = 122, } SVGA3dSurfaceFormat; typedef uint32 SVGA3dColor; /* a, r, g, b */ @@ -1223,9 +1218,19 @@ typedef enum { #define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129 #define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130 - +#define SVGA_3D_CMD_GB_SCREEN_DMA 1131 +#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132 +#define SVGA_3D_CMD_GB_MOB_FENCE 1133 +#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134 #define SVGA_3D_CMD_DEFINE_GB_MOB64 1135 #define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136 +#define SVGA_3D_CMD_NOP_ERROR 1137 + +#define SVGA_3D_CMD_RESERVED1 1138 +#define SVGA_3D_CMD_RESERVED2 1139 +#define SVGA_3D_CMD_RESERVED3 1140 +#define SVGA_3D_CMD_RESERVED4 1141 +#define SVGA_3D_CMD_RESERVED5 1142 #define SVGA_3D_CMD_MAX 1142 #define SVGA_3D_CMD_FUTURE_MAX 3000 @@ -1973,8 +1978,7 @@ struct { uint32 sizeInBytes; uint32 validSizeInBytes; SVGAMobFormat ptDepth; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */ typedef @@ -1984,15 +1988,13 @@ struct { uint32 sizeInBytes; uint32 validSizeInBytes; SVGAMobFormat ptDepth; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */ typedef struct { SVGAOTableType type; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */ /* @@ -2005,8 +2007,7 @@ struct SVGA3dCmdDefineGBMob { SVGAMobFormat ptDepth; PPN base; uint32 sizeInBytes; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ @@ -2017,8 +2018,7 @@ SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */ typedef struct SVGA3dCmdDestroyGBMob { SVGAMobId mobid; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */ /* @@ -2031,8 +2031,7 @@ struct SVGA3dCmdRedefineGBMob { SVGAMobFormat ptDepth; PPN base; uint32 sizeInBytes; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */ /* @@ -2045,8 +2044,7 @@ struct SVGA3dCmdDefineGBMob64 { SVGAMobFormat ptDepth; PPN64 base; uint32 sizeInBytes; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */ /* @@ -2059,8 +2057,7 @@ struct SVGA3dCmdRedefineGBMob64 { SVGAMobFormat ptDepth; PPN64 base; uint32 sizeInBytes; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ /* @@ -2070,8 +2067,7 @@ SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */ typedef struct SVGA3dCmdUpdateGBMobMapping { SVGAMobId mobid; -} -__attribute__((__packed__)) +} __packed SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */ /* @@ -2087,7 +2083,8 @@ struct SVGA3dCmdDefineGBSurface { uint32 multisampleCount; SVGA3dTextureFilter autogenFilter; SVGA3dSize size; -} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ +} __packed +SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */ /* * Destroy a guest-backed surface. @@ -2096,7 +2093,8 @@ struct SVGA3dCmdDefineGBSurface { typedef struct SVGA3dCmdDestroyGBSurface { uint32 sid; -} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ +} __packed +SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */ /* * Bind a guest-backed surface to an object. @@ -2106,7 +2104,8 @@ typedef struct SVGA3dCmdBindGBSurface { uint32 sid; SVGAMobId mobid; -} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ +} __packed +SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */ /* * Conditionally bind a mob to a guest backed surface if testMobid @@ -2123,7 +2122,7 @@ struct{ SVGAMobId testMobid; SVGAMobId mobid; uint32 flags; -} +} __packed SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */ /* @@ -2135,7 +2134,8 @@ typedef struct SVGA3dCmdUpdateGBImage { SVGA3dSurfaceImageId image; SVGA3dBox box; -} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ +} __packed +SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */ /* * Update an entire guest-backed surface. @@ -2145,7 +2145,8 @@ struct SVGA3dCmdUpdateGBImage { typedef struct SVGA3dCmdUpdateGBSurface { uint32 sid; -} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ +} __packed +SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */ /* * Readback an image in a guest-backed surface. @@ -2155,7 +2156,8 @@ struct SVGA3dCmdUpdateGBSurface { typedef struct SVGA3dCmdReadbackGBImage { SVGA3dSurfaceImageId image; -} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ +} __packed +SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/ /* * Readback an entire guest-backed surface. @@ -2165,7 +2167,8 @@ struct SVGA3dCmdReadbackGBImage { typedef struct SVGA3dCmdReadbackGBSurface { uint32 sid; -} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ +} __packed +SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */ /* * Readback a sub rect of an image in a guest-backed surface. After @@ -2179,7 +2182,7 @@ struct SVGA3dCmdReadbackGBImagePartial { SVGA3dSurfaceImageId image; SVGA3dBox box; uint32 invertBox; -} +} __packed SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ /* @@ -2190,7 +2193,8 @@ SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */ typedef struct SVGA3dCmdInvalidateGBImage { SVGA3dSurfaceImageId image; -} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ +} __packed +SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */ /* * Invalidate an entire guest-backed surface. @@ -2200,7 +2204,8 @@ struct SVGA3dCmdInvalidateGBImage { typedef struct SVGA3dCmdInvalidateGBSurface { uint32 sid; -} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ +} __packed +SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */ /* * Invalidate a sub rect of an image in a guest-backed surface. After @@ -2214,7 +2219,7 @@ struct SVGA3dCmdInvalidateGBImagePartial { SVGA3dSurfaceImageId image; SVGA3dBox box; uint32 invertBox; -} +} __packed SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ /* @@ -2224,7 +2229,8 @@ SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */ typedef struct SVGA3dCmdDefineGBContext { uint32 cid; -} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ +} __packed +SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */ /* * Destroy a guest-backed context. @@ -2233,7 +2239,8 @@ struct SVGA3dCmdDefineGBContext { typedef struct SVGA3dCmdDestroyGBContext { uint32 cid; -} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ +} __packed +SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */ /* * Bind a guest-backed context. @@ -2252,7 +2259,8 @@ struct SVGA3dCmdBindGBContext { uint32 cid; SVGAMobId mobid; uint32 validContents; -} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ +} __packed +SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */ /* * Readback a guest-backed context. @@ -2262,7 +2270,8 @@ struct SVGA3dCmdBindGBContext { typedef struct SVGA3dCmdReadbackGBContext { uint32 cid; -} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ +} __packed +SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */ /* * Invalidate a guest-backed context. @@ -2270,7 +2279,8 @@ struct SVGA3dCmdReadbackGBContext { typedef struct SVGA3dCmdInvalidateGBContext { uint32 cid; -} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ +} __packed +SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */ /* * Define a guest-backed shader. @@ -2281,7 +2291,8 @@ struct SVGA3dCmdDefineGBShader { uint32 shid; SVGA3dShaderType type; uint32 sizeInBytes; -} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ +} __packed +SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */ /* * Bind a guest-backed shader. @@ -2291,7 +2302,8 @@ typedef struct SVGA3dCmdBindGBShader { uint32 shid; SVGAMobId mobid; uint32 offsetInBytes; -} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ +} __packed +SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */ /* * Destroy a guest-backed shader. @@ -2299,7 +2311,8 @@ typedef struct SVGA3dCmdBindGBShader { typedef struct SVGA3dCmdDestroyGBShader { uint32 shid; -} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ +} __packed +SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */ typedef struct { @@ -2314,14 +2327,16 @@ struct { * Note that FLOAT and INT constants are 4-dwords in length, while * BOOL constants are 1-dword in length. */ -} SVGA3dCmdSetGBShaderConstInline; +} __packed +SVGA3dCmdSetGBShaderConstInline; /* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */ typedef struct { uint32 cid; SVGA3dQueryType type; -} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ +} __packed +SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */ typedef struct { @@ -2329,7 +2344,8 @@ struct { SVGA3dQueryType type; SVGAMobId mobid; uint32 offset; -} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ +} __packed +SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */ /* @@ -2346,21 +2362,22 @@ struct { SVGA3dQueryType type; SVGAMobId mobid; uint32 offset; -} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ +} __packed +SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */ typedef struct { SVGAMobId mobid; uint32 fbOffset; uint32 initalized; -} +} __packed SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */ typedef struct { SVGAMobId mobid; uint32 gartOffset; -} +} __packed SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */ @@ -2368,7 +2385,7 @@ typedef struct { uint32 gartOffset; uint32 numPages; -} +} __packed SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */ @@ -2385,27 +2402,27 @@ struct { int32 xRoot; int32 yRoot; uint32 flags; -} +} __packed SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */ typedef struct { uint32 stid; -} +} __packed SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */ typedef struct { uint32 stid; SVGA3dSurfaceImageId image; -} +} __packed SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */ typedef struct { uint32 stid; SVGA3dBox box; -} +} __packed SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */ /* @@ -2583,4 +2600,28 @@ typedef union { float f; } SVGA3dDevCapResult; +typedef enum { + SVGA3DCAPS_RECORD_UNKNOWN = 0, + SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100, + SVGA3DCAPS_RECORD_DEVCAPS = 0x100, + SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff, +} SVGA3dCapsRecordType; + +typedef +struct SVGA3dCapsRecordHeader { + uint32 length; + SVGA3dCapsRecordType type; +} +SVGA3dCapsRecordHeader; + +typedef +struct SVGA3dCapsRecord { + SVGA3dCapsRecordHeader header; + uint32 data[1]; +} +SVGA3dCapsRecord; + + +typedef uint32 SVGA3dCapPair[2]; + #endif /* _SVGA3D_REG_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h index 8369c3ba10fe..ef3385096145 100644 --- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h @@ -38,8 +38,11 @@ #define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) +#define min_t(type, x, y) ((x) < (y) ? (x) : (y)) #define surf_size_struct SVGA3dSize #define u32 uint32 +#define u64 uint64_t +#define U32_MAX ((u32)~0U) #endif /* __KERNEL__ */ @@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = { static inline u32 clamped_umul32(u32 a, u32 b) { - uint64_t tmp = (uint64_t) a*b; - return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp; + u64 tmp = (u64) a*b; + return (tmp > (u64) U32_MAX) ? U32_MAX : tmp; } static inline const struct svga3d_surface_desc * @@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, bool cubemap) { const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); - u32 total_size = 0; + u64 total_size = 0; u32 mip; for (mip = 0; mip < num_mip_levels; mip++) { @@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format, if (cubemap) total_size *= SVGA3D_MAX_SURFACE_FACES; - return total_size; + return (u32) min_t(u64, total_size, (u64) U32_MAX); } diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h index 71defa4d2d75..11323dd5196f 100644 --- a/drivers/gpu/drm/vmwgfx/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/svga_reg.h @@ -169,10 +169,17 @@ enum { SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */ SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */ SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */ + SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */ + SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */ SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */ SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */ SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */ - SVGA_REG_TOP = 53, /* Must be 1 more than the last register */ + SVGA_REG_CMD_PREPEND_LOW = 53, + SVGA_REG_CMD_PREPEND_HIGH = 54, + SVGA_REG_SCREENTARGET_MAX_WIDTH = 55, + SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56, + SVGA_REG_MOB_MAX_SIZE = 57, + SVGA_REG_TOP = 58, /* Must be 1 more than the last register */ SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ /* Next 768 (== 256*3) registers exist for colormap */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 82c41daebc0e..1e80152674b5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -37,7 +37,7 @@ struct vmw_user_context { -typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); +typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); static void vmw_user_context_free(struct vmw_resource *res); static struct vmw_resource * @@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, bool readback, struct ttm_validate_buffer *val_buf); static int vmw_gb_context_destroy(struct vmw_resource *res); -static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); -static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); -static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); +static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); +static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, + bool rebind); +static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); +static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); static uint64_t vmw_user_context_size; @@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) if (res->func->destroy == vmw_gb_context_destroy) { mutex_lock(&dev_priv->cmdbuf_mutex); + mutex_lock(&dev_priv->binding_mutex); + (void) vmw_context_binding_state_kill + (&container_of(res, struct vmw_user_context, res)->cbs); (void) vmw_gb_context_destroy(res); if (dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid) __vmw_execbuf_release_pinned_bo(dev_priv, NULL); + mutex_unlock(&dev_priv->binding_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex); return; } @@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, BUG_ON(bo->mem.mem_type != VMW_PL_MOB); mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_state_kill(&uctx->cbs); + vmw_context_binding_state_scrub(&uctx->cbs); submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); @@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res) SVGA3dCmdHeader header; SVGA3dCmdDestroyGBContext body; } *cmd; - struct vmw_user_context *uctx = - container_of(res, struct vmw_user_context, res); - - BUG_ON(!list_empty(&uctx->cbs.list)); if (likely(res->id == -1)) return 0; @@ -528,8 +530,9 @@ out_unlock: * vmw_context_scrub_shader - scrub a shader binding from a context. * * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. */ -static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) +static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) { struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { @@ -548,7 +551,7 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) cmd->header.size = sizeof(cmd->body); cmd->body.cid = bi->ctx->id; cmd->body.type = bi->i1.shader_type; - cmd->body.shid = SVGA3D_INVALID_ID; + cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); vmw_fifo_commit(dev_priv, sizeof(*cmd)); return 0; @@ -559,8 +562,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) * from a context. * * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. */ -static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) +static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, + bool rebind) { struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { @@ -579,7 +584,7 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) cmd->header.size = sizeof(cmd->body); cmd->body.cid = bi->ctx->id; cmd->body.type = bi->i1.rt_type; - cmd->body.target.sid = SVGA3D_INVALID_ID; + cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); cmd->body.target.face = 0; cmd->body.target.mipmap = 0; vmw_fifo_commit(dev_priv, sizeof(*cmd)); @@ -591,11 +596,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) * vmw_context_scrub_texture - scrub a texture binding from a context. * * @bi: single binding information. + * @rebind: Whether to issue a bind instead of scrub command. * * TODO: Possibly complement this function with a function that takes * a list of texture bindings and combines them to a single command. */ -static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) +static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, + bool rebind) { struct vmw_private *dev_priv = bi->ctx->dev_priv; struct { @@ -619,7 +626,7 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) cmd->body.c.cid = bi->ctx->id; cmd->body.s1.stage = bi->i1.texture_stage; cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; - cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; + cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); vmw_fifo_commit(dev_priv, sizeof(*cmd)); return 0; @@ -692,6 +699,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, vmw_context_binding_drop(loc); loc->bi = *bi; + loc->bi.scrubbed = false; list_add_tail(&loc->ctx_list, &cbs->list); INIT_LIST_HEAD(&loc->res_list); @@ -727,12 +735,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, if (loc->bi.ctx != NULL) vmw_context_binding_drop(loc); - loc->bi = *bi; - list_add_tail(&loc->ctx_list, &cbs->list); - if (bi->res != NULL) + if (bi->res != NULL) { + loc->bi = *bi; + list_add_tail(&loc->ctx_list, &cbs->list); list_add_tail(&loc->res_list, &bi->res->binding_head); - else - INIT_LIST_HEAD(&loc->res_list); + } } /** @@ -746,7 +753,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, */ static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) { - (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); + if (!cb->bi.scrubbed) { + (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); + cb->bi.scrubbed = true; + } vmw_context_binding_drop(cb); } @@ -768,6 +778,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) } /** + * vmw_context_binding_state_scrub - Scrub all bindings associated with a + * struct vmw_ctx_binding state structure. + * + * @cbs: Pointer to the context binding state tracker. + * + * Emits commands to scrub all bindings associated with the + * context binding state tracker. + */ +static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) +{ + struct vmw_ctx_binding *entry; + + list_for_each_entry(entry, &cbs->list, ctx_list) { + if (!entry->bi.scrubbed) { + (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); + entry->bi.scrubbed = true; + } + } +} + +/** * vmw_context_binding_res_list_kill - Kill all bindings on a * resource binding list * @@ -785,6 +816,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head) } /** + * vmw_context_binding_res_list_scrub - Scrub all bindings on a + * resource binding list + * + * @head: list head of resource binding list + * + * Scrub all bindings associated with a specific resource. Typically + * called before the resource is evicted. + */ +void vmw_context_binding_res_list_scrub(struct list_head *head) +{ + struct vmw_ctx_binding *entry; + + list_for_each_entry(entry, head, res_list) { + if (!entry->bi.scrubbed) { + (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); + entry->bi.scrubbed = true; + } + } +} + +/** * vmw_context_binding_state_transfer - Commit staged binding info * * @ctx: Pointer to context to commit the staged binding info to. @@ -803,3 +855,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx, list_for_each_entry_safe(entry, next, &from->list, ctx_list) vmw_context_binding_transfer(&uctx->cbs, &entry->bi); } + +/** + * vmw_context_rebind_all - Rebind all scrubbed bindings of a context + * + * @ctx: The context resource + * + * Walks through the context binding list and rebinds all scrubbed + * resources. + */ +int vmw_context_rebind_all(struct vmw_resource *ctx) +{ + struct vmw_ctx_binding *entry; + struct vmw_user_context *uctx = + container_of(ctx, struct vmw_user_context, res); + struct vmw_ctx_binding_state *cbs = &uctx->cbs; + int ret; + + list_for_each_entry(entry, &cbs->list, ctx_list) { + if (likely(!entry->bi.scrubbed)) + continue; + + if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == + SVGA3D_INVALID_ID)) + continue; + + ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); + if (unlikely(ret != 0)) + return ret; + + entry->bi.scrubbed = false; + } + + return 0; +} + +/** + * vmw_context_binding_list - Return a list of context bindings + * + * @ctx: The context resource + * + * Returns the current list of bindings of the given context. Note that + * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked. + */ +struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) +{ + return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 9893328f8fdc..0083cbf99edf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -667,6 +667,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->memory_size = 512*1024*1024; } dev_priv->max_mob_pages = 0; + dev_priv->max_mob_size = 0; if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { uint64_t mem_size = vmw_read(dev_priv, @@ -676,6 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->prim_bb_mem = vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); + dev_priv->max_mob_size = + vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); } else dev_priv->prim_bb_mem = dev_priv->vram_size; @@ -941,6 +944,7 @@ static void vmw_postclose(struct drm_device *dev, drm_master_put(&vmw_fp->locked_master); } + vmw_compat_shader_man_destroy(vmw_fp->shman); ttm_object_file_release(&vmw_fp->tfile); kfree(vmw_fp); } @@ -960,11 +964,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) if (unlikely(vmw_fp->tfile == NULL)) goto out_no_tfile; + vmw_fp->shman = vmw_compat_shader_man_create(dev_priv); + if (IS_ERR(vmw_fp->shman)) + goto out_no_shman; + file_priv->driver_priv = vmw_fp; dev_priv->bdev.dev_mapping = dev->dev_mapping; return 0; +out_no_shman: + ttm_object_file_release(&vmw_fp->tfile); out_no_tfile: kfree(vmw_fp); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 554e7fa33082..07831554dad7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -40,7 +40,7 @@ #include <drm/ttm/ttm_module.h> #include "vmwgfx_fence.h" -#define VMWGFX_DRIVER_DATE "20121114" +#define VMWGFX_DRIVER_DATE "20140228" #define VMWGFX_DRIVER_MAJOR 2 #define VMWGFX_DRIVER_MINOR 5 #define VMWGFX_DRIVER_PATCHLEVEL 0 @@ -75,10 +75,14 @@ #define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_SHADER ttm_driver_type4 +struct vmw_compat_shader_manager; + struct vmw_fpriv { struct drm_master *locked_master; struct ttm_object_file *tfile; struct list_head fence_events; + bool gb_aware; + struct vmw_compat_shader_manager *shman; }; struct vmw_dma_buffer { @@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo { struct vmw_resource *ctx; struct vmw_resource *res; enum vmw_ctx_binding_type bt; + bool scrubbed; union { SVGA3dShaderType shader_type; SVGA3dRenderTargetType rt_type; @@ -318,7 +323,7 @@ struct vmw_sw_context{ struct drm_open_hash res_ht; bool res_ht_initialized; bool kernel; /**< is the called made from the kernel */ - struct ttm_object_file *tfile; + struct vmw_fpriv *fp; struct list_head validate_nodes; struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; uint32_t cur_reloc; @@ -336,6 +341,7 @@ struct vmw_sw_context{ bool needs_post_query_barrier; struct vmw_resource *error_resource; struct vmw_ctx_binding_state staged_bindings; + struct list_head staged_shaders; }; struct vmw_legacy_display; @@ -380,6 +386,7 @@ struct vmw_private { uint32_t max_gmr_ids; uint32_t max_gmr_pages; uint32_t max_mob_pages; + uint32_t max_mob_size; uint32_t memory_size; bool has_gmr; bool has_mob; @@ -569,6 +576,8 @@ struct vmw_user_resource_conv; extern void vmw_resource_unreference(struct vmw_resource **p_res); extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); +extern struct vmw_resource * +vmw_resource_reference_unless_doomed(struct vmw_resource *res); extern int vmw_resource_validate(struct vmw_resource *res); extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); extern bool vmw_resource_needs_backup(const struct vmw_resource *res); @@ -957,6 +966,9 @@ extern void vmw_context_binding_state_transfer(struct vmw_resource *res, struct vmw_ctx_binding_state *cbs); extern void vmw_context_binding_res_list_kill(struct list_head *head); +extern void vmw_context_binding_res_list_scrub(struct list_head *head); +extern int vmw_context_rebind_all(struct vmw_resource *ctx); +extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); /* * Surface management - vmwgfx_surface.c @@ -991,6 +1003,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, + SVGA3dShaderType shader_type, + u32 *user_key); +extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, + struct list_head *list); +extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, + struct list_head *list); +extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, + u32 user_key, + SVGA3dShaderType shader_type, + struct list_head *list); +extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, + u32 user_key, const void *bytecode, + SVGA3dShaderType shader_type, + size_t size, + struct ttm_object_file *tfile, + struct list_head *list); +extern struct vmw_compat_shader_manager * +vmw_compat_shader_man_create(struct vmw_private *dev_priv); +extern void +vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man); + /** * Inline helper functions diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 7a5f1eb55c5a..efb575a7996c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list, * persistent context binding tracker. */ if (unlikely(val->staged_bindings)) { - vmw_context_binding_state_transfer - (val->res, val->staged_bindings); + if (!backoff) { + vmw_context_binding_state_transfer + (val->res, val->staged_bindings); + } kfree(val->staged_bindings); val->staged_bindings = NULL; } @@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, } /** + * vmw_resource_context_res_add - Put resources previously bound to a context on + * the validation list + * + * @dev_priv: Pointer to a device private structure + * @sw_context: Pointer to a software context used for this command submission + * @ctx: Pointer to the context resource + * + * This function puts all resources that were previously bound to @ctx on + * the resource validation list. This is part of the context state reemission + */ +static int vmw_resource_context_res_add(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + struct vmw_resource *ctx) +{ + struct list_head *binding_list; + struct vmw_ctx_binding *entry; + int ret = 0; + struct vmw_resource *res; + + mutex_lock(&dev_priv->binding_mutex); + binding_list = vmw_context_binding_list(ctx); + + list_for_each_entry(entry, binding_list, ctx_list) { + res = vmw_resource_reference_unless_doomed(entry->bi.res); + if (unlikely(res == NULL)) + continue; + + ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); + vmw_resource_unreference(&res); + if (unlikely(ret != 0)) + break; + } + + mutex_unlock(&dev_priv->binding_mutex); + return ret; +} + +/** * vmw_resource_relocation_add - Add a relocation to the relocation list * * @list: Pointer to head of relocation list. @@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb, { struct vmw_resource_relocation *rel; - list_for_each_entry(rel, list, head) - cb[rel->offset] = rel->res->id; + list_for_each_entry(rel, list, head) { + if (likely(rel->res != NULL)) + cb[rel->offset] = rel->res->id; + else + cb[rel->offset] = SVGA_3D_CMD_NOP; + } } static int vmw_cmd_invalid(struct vmw_private *dev_priv, @@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context) } /** - * vmw_cmd_res_check - Check that a resource is present and if so, put it + * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it * on the resource validate list unless it's already there. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @res_type: Resource type. * @converter: User-space visisble type specific information. - * @id: Pointer to the location in the command buffer currently being + * @id: user-space resource id handle. + * @id_loc: Pointer to the location in the command buffer currently being * parsed from where the user-space resource id handle is located. + * @p_val: Pointer to pointer to resource validalidation node. Populated + * on exit. */ -static int vmw_cmd_res_check(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - enum vmw_res_type res_type, - const struct vmw_user_resource_conv *converter, - uint32_t *id, - struct vmw_resource_val_node **p_val) +static int +vmw_cmd_compat_res_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + const struct vmw_user_resource_conv *converter, + uint32_t id, + uint32_t *id_loc, + struct vmw_resource_val_node **p_val) { struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type]; @@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, struct vmw_resource_val_node *node; int ret; - if (*id == SVGA3D_INVALID_ID) { + if (id == SVGA3D_INVALID_ID) { if (p_val) *p_val = NULL; if (res_type == vmw_res_context) { @@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, * resource */ - if (likely(rcache->valid && *id == rcache->handle)) { + if (likely(rcache->valid && id == rcache->handle)) { const struct vmw_resource *res = rcache->res; rcache->node->first_usage = false; @@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, return vmw_resource_relocation_add (&sw_context->res_relocations, res, - id - sw_context->buf_start); + id_loc - sw_context->buf_start); } ret = vmw_user_resource_lookup_handle(dev_priv, - sw_context->tfile, - *id, + sw_context->fp->tfile, + id, converter, &res); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use resource 0x%08x.\n", - (unsigned) *id); + (unsigned) id); dump_stack(); return ret; } rcache->valid = true; rcache->res = res; - rcache->handle = *id; + rcache->handle = id; ret = vmw_resource_relocation_add(&sw_context->res_relocations, res, - id - sw_context->buf_start); + id_loc - sw_context->buf_start); if (unlikely(ret != 0)) goto out_no_reloc; @@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv, if (p_val) *p_val = node; - if (node->first_usage && res_type == vmw_res_context) { + if (dev_priv->has_mob && node->first_usage && + res_type == vmw_res_context) { + ret = vmw_resource_context_res_add(dev_priv, sw_context, res); + if (unlikely(ret != 0)) + goto out_no_reloc; node->staged_bindings = kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); if (node->staged_bindings == NULL) { @@ -481,6 +534,59 @@ out_no_reloc: } /** + * vmw_cmd_res_check - Check that a resource is present and if so, put it + * on the resource validate list unless it's already there. + * + * @dev_priv: Pointer to a device private structure. + * @sw_context: Pointer to the software context. + * @res_type: Resource type. + * @converter: User-space visisble type specific information. + * @id_loc: Pointer to the location in the command buffer currently being + * parsed from where the user-space resource id handle is located. + * @p_val: Pointer to pointer to resource validalidation node. Populated + * on exit. + */ +static int +vmw_cmd_res_check(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + enum vmw_res_type res_type, + const struct vmw_user_resource_conv *converter, + uint32_t *id_loc, + struct vmw_resource_val_node **p_val) +{ + return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type, + converter, *id_loc, id_loc, p_val); +} + +/** + * vmw_rebind_contexts - Rebind all resources previously bound to + * referenced contexts. + * + * @sw_context: Pointer to the software context. + * + * Rebind context binding points that have been scrubbed because of eviction. + */ +static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) +{ + struct vmw_resource_val_node *val; + int ret; + + list_for_each_entry(val, &sw_context->resource_list, head) { + if (likely(!val->staged_bindings)) + continue; + + ret = vmw_context_rebind_all(val->res); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Failed to rebind context.\n"); + return ret; + } + } + + return 0; +} + +/** * vmw_cmd_cid_check - Check a command header for valid context information. * * @dev_priv: Pointer to a device private structure. @@ -496,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, { struct vmw_cid_cmd { SVGA3dCmdHeader header; - __le32 cid; + uint32_t cid; } *cmd; cmd = container_of(header, struct vmw_cid_cmd, header); @@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); + ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use MOB buffer.\n"); return -EINVAL; @@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_relocation *reloc; int ret; - ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); + ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); return -EINVAL; @@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); - vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); + vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, + header); out_no_surface: vmw_dmabuf_unreference(&vmw_bo); @@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, &cmd->body.sid, NULL); } + +/** + * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_shader_define(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_shader_define_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdDefineShader body; + } *cmd; + int ret; + size_t size; + + cmd = container_of(header, struct vmw_shader_define_cmd, + header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + NULL); + if (unlikely(ret != 0)) + return ret; + + if (unlikely(!dev_priv->has_mob)) + return 0; + + size = cmd->header.size - sizeof(cmd->body); + ret = vmw_compat_shader_add(sw_context->fp->shman, + cmd->body.shid, cmd + 1, + cmd->body.type, size, + sw_context->fp->tfile, + &sw_context->staged_shaders); + if (unlikely(ret != 0)) + return ret; + + return vmw_resource_relocation_add(&sw_context->res_relocations, + NULL, &cmd->header.id - + sw_context->buf_start); + + return 0; +} + +/** + * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_shader_destroy_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdDestroyShader body; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_shader_destroy_cmd, + header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + NULL); + if (unlikely(ret != 0)) + return ret; + + if (unlikely(!dev_priv->has_mob)) + return 0; + + ret = vmw_compat_shader_remove(sw_context->fp->shman, + cmd->body.shid, + cmd->body.type, + &sw_context->staged_shaders); + if (unlikely(ret != 0)) + return ret; + + return vmw_resource_relocation_add(&sw_context->res_relocations, + NULL, &cmd->header.id - + sw_context->buf_start); + + return 0; +} + /** * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER * command @@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, if (dev_priv->has_mob) { struct vmw_ctx_bindinfo bi; struct vmw_resource_val_node *res_node; - - ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, - user_shader_converter, - &cmd->body.shid, &res_node); + u32 shid = cmd->body.shid; + + if (shid != SVGA3D_INVALID_ID) + (void) vmw_compat_shader_lookup(sw_context->fp->shman, + cmd->body.type, + &shid); + + ret = vmw_cmd_compat_res_check(dev_priv, sw_context, + vmw_res_shader, + user_shader_converter, + shid, + &cmd->body.shid, &res_node); if (unlikely(ret != 0)) return ret; @@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, } /** + * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST + * command + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + struct vmw_set_shader_const_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSetShaderConst body; + } *cmd; + int ret; + + cmd = container_of(header, struct vmw_set_shader_const_cmd, + header); + + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, + user_context_converter, &cmd->body.cid, + NULL); + if (unlikely(ret != 0)) + return ret; + + if (dev_priv->has_mob) + header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE; + + return 0; +} + +/** * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER * command * @@ -1595,7 +1835,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, return 0; } -static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { +static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid, false, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid, @@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = { true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, false, false, false), - VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, - true, true, false), - VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, - true, true, false), + VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define, + true, false, false), + VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy, + true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, true, false, false), - VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, - true, true, false), + VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const, + true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, true, false, false), VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, @@ -1792,6 +2032,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, goto out_invalid; entry = &vmw_cmd_entries[cmd_id]; + if (unlikely(!entry->func)) + goto out_invalid; + if (unlikely(!entry->user_allow && !sw_context->kernel)) goto out_privileged; @@ -2171,7 +2414,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, } else sw_context->kernel = true; - sw_context->tfile = vmw_fpriv(file_priv)->tfile; + sw_context->fp = vmw_fpriv(file_priv); sw_context->cur_reloc = 0; sw_context->cur_val_buf = 0; sw_context->fence_flags = 0; @@ -2188,16 +2431,17 @@ int vmw_execbuf_process(struct drm_file *file_priv, goto out_unlock; sw_context->res_ht_initialized = true; } + INIT_LIST_HEAD(&sw_context->staged_shaders); INIT_LIST_HEAD(&resource_list); ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, command_size); if (unlikely(ret != 0)) - goto out_err; + goto out_err_nores; ret = vmw_resources_reserve(sw_context); if (unlikely(ret != 0)) - goto out_err; + goto out_err_nores; ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); if (unlikely(ret != 0)) @@ -2225,6 +2469,12 @@ int vmw_execbuf_process(struct drm_file *file_priv, goto out_err; } + if (dev_priv->has_mob) { + ret = vmw_rebind_contexts(sw_context); + if (unlikely(ret != 0)) + goto out_unlock_binding; + } + cmd = vmw_fifo_reserve(dev_priv, command_size); if (unlikely(cmd == NULL)) { DRM_ERROR("Failed reserving fifo space for commands.\n"); @@ -2276,6 +2526,8 @@ int vmw_execbuf_process(struct drm_file *file_priv, } list_splice_init(&sw_context->resource_list, &resource_list); + vmw_compat_shaders_commit(sw_context->fp->shman, + &sw_context->staged_shaders); mutex_unlock(&dev_priv->cmdbuf_mutex); /* @@ -2289,10 +2541,11 @@ int vmw_execbuf_process(struct drm_file *file_priv, out_unlock_binding: mutex_unlock(&dev_priv->binding_mutex); out_err: - vmw_resource_relocations_free(&sw_context->res_relocations); - vmw_free_relocations(sw_context); ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); +out_err_nores: vmw_resource_list_unreserve(&sw_context->resource_list, true); + vmw_resource_relocations_free(&sw_context->res_relocations); + vmw_free_relocations(sw_context); vmw_clear_validations(sw_context); if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid)) @@ -2301,6 +2554,8 @@ out_unlock: list_splice_init(&sw_context->resource_list, &resource_list); error_resource = sw_context->error_resource; sw_context->error_resource = NULL; + vmw_compat_shaders_revert(sw_context->fp->shman, + &sw_context->staged_shaders); mutex_unlock(&dev_priv->cmdbuf_mutex); /* diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 116c49736763..47b70949bf3a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -29,12 +29,18 @@ #include <drm/vmwgfx_drm.h> #include "vmwgfx_kms.h" +struct svga_3d_compat_cap { + SVGA3dCapsRecordHeader header; + SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX]; +}; + int vmw_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_getparam_arg *param = (struct drm_vmw_getparam_arg *)data; + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); switch (param->param) { case DRM_VMW_PARAM_NUM_STREAMS: @@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, __le32 __iomem *fifo_mem = dev_priv->mmio_virt; const struct vmw_fifo_state *fifo = &dev_priv->fifo; + if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { + param->value = SVGA3D_HWVERSION_WS8_B1; + break; + } + param->value = ioread32(fifo_mem + ((fifo->capabilities & @@ -69,19 +80,31 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, break; } case DRM_VMW_PARAM_MAX_SURF_MEMORY: - param->value = dev_priv->memory_size; + if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && + !vmw_fp->gb_aware) + param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2; + else + param->value = dev_priv->memory_size; break; case DRM_VMW_PARAM_3D_CAPS_SIZE: - if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) - param->value = SVGA3D_DEVCAP_MAX; + if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) && + vmw_fp->gb_aware) + param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); + else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) + param->value = sizeof(struct svga_3d_compat_cap) + + sizeof(uint32_t); else param->value = (SVGA_FIFO_3D_CAPS_LAST - - SVGA_FIFO_3D_CAPS + 1); - param->value *= sizeof(uint32_t); + SVGA_FIFO_3D_CAPS + 1) * + sizeof(uint32_t); break; case DRM_VMW_PARAM_MAX_MOB_MEMORY: + vmw_fp->gb_aware = true; param->value = dev_priv->max_mob_pages * PAGE_SIZE; break; + case DRM_VMW_PARAM_MAX_MOB_SIZE: + param->value = dev_priv->max_mob_size; + break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); @@ -91,6 +114,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, return 0; } +static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce, + size_t size) +{ + struct svga_3d_compat_cap *compat_cap = + (struct svga_3d_compat_cap *) bounce; + unsigned int i; + size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs); + unsigned int max_size; + + if (size < pair_offset) + return -EINVAL; + + max_size = (size - pair_offset) / sizeof(SVGA3dCapPair); + + if (max_size > SVGA3D_DEVCAP_MAX) + max_size = SVGA3D_DEVCAP_MAX; + + compat_cap->header.length = + (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32); + compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS; + + mutex_lock(&dev_priv->hw_mutex); + for (i = 0; i < max_size; ++i) { + vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); + compat_cap->pairs[i][0] = i; + compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP); + } + mutex_unlock(&dev_priv->hw_mutex); + + return 0; +} + int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -104,41 +159,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, void *bounce; int ret; bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); + struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); if (unlikely(arg->pad64 != 0)) { DRM_ERROR("Illegal GET_3D_CAP argument.\n"); return -EINVAL; } - if (gb_objects) - size = SVGA3D_DEVCAP_MAX; + if (gb_objects && vmw_fp->gb_aware) + size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t); + else if (gb_objects) + size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t); else - size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); - - size *= sizeof(uint32_t); + size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) * + sizeof(uint32_t); if (arg->max_size < size) size = arg->max_size; - bounce = vmalloc(size); + bounce = vzalloc(size); if (unlikely(bounce == NULL)) { DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); return -ENOMEM; } - if (gb_objects) { - int i; + if (gb_objects && vmw_fp->gb_aware) { + int i, num; uint32_t *bounce32 = (uint32_t *) bounce; + num = size / sizeof(uint32_t); + if (num > SVGA3D_DEVCAP_MAX) + num = SVGA3D_DEVCAP_MAX; + mutex_lock(&dev_priv->hw_mutex); - for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { + for (i = 0; i < num; ++i) { vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } mutex_unlock(&dev_priv->hw_mutex); - + } else if (gb_objects) { + ret = vmw_fill_compat_cap(dev_priv, bounce, size); + if (unlikely(ret != 0)) + goto out_err; } else { - fifo_mem = dev_priv->mmio_virt; memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); } @@ -146,6 +209,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, ret = copy_to_user(buffer, bounce, size); if (ret) ret = -EFAULT; +out_err: vfree(bounce); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 4910e7b81811..04a64b8cd3cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); + ret = -ENOMEM; goto out_no_fifo; } @@ -187,18 +188,20 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, bo = otable->page_table->pt_bo; cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); - if (unlikely(cmd == NULL)) - DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); - - memset(cmd, 0, sizeof(*cmd)); - cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; - cmd->header.size = sizeof(cmd->body); - cmd->body.type = type; - cmd->body.baseAddress = 0; - cmd->body.sizeInBytes = 0; - cmd->body.validSizeInBytes = 0; - cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + if (unlikely(cmd == NULL)) { + DRM_ERROR("Failed reserving FIFO space for OTable " + "takedown.\n"); + } else { + memset(cmd, 0, sizeof(*cmd)); + cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; + cmd->header.size = sizeof(cmd->body); + cmd->body.type = type; + cmd->body.baseAddress = 0; + cmd->body.sizeInBytes = 0; + cmd->body.validSizeInBytes = 0; + cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); + } if (bo) { int ret; @@ -561,11 +564,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, if (unlikely(cmd == NULL)) { DRM_ERROR("Failed reserving FIFO space for Memory " "Object unbinding.\n"); + } else { + cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; + cmd->header.size = sizeof(cmd->body); + cmd->body.mobid = mob->id; + vmw_fifo_commit(dev_priv, sizeof(*cmd)); } - cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; - cmd->header.size = sizeof(cmd->body); - cmd->body.mobid = mob->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); if (bo) { vmw_fence_single_bo(bo, NULL); ttm_bo_unreserve(bo); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 6fdd82d42f65..9757b57f8388 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) return res; } +struct vmw_resource * +vmw_resource_reference_unless_doomed(struct vmw_resource *res) +{ + return kref_get_unless_zero(&res->kref) ? res : NULL; +} /** * vmw_resource_release_id - release a resource id to the id manager. @@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref) vmw_dmabuf_unreference(&res->backup); } - if (likely(res->hw_destroy != NULL)) + if (likely(res->hw_destroy != NULL)) { res->hw_destroy(res); + mutex_lock(&dev_priv->binding_mutex); + vmw_context_binding_res_list_kill(&res->binding_head); + mutex_unlock(&dev_priv->binding_mutex); + } id = res->id; if (res->res_free != NULL) @@ -418,8 +427,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, INIT_LIST_HEAD(&vmw_bo->res_list); ret = ttm_bo_init(bdev, &vmw_bo->base, size, - (user) ? ttm_bo_type_device : - ttm_bo_type_kernel, placement, + ttm_bo_type_device, placement, 0, interruptible, NULL, acc_size, NULL, bo_free); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 1457ec4b7125..ee3856578a12 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -29,6 +29,8 @@ #include "vmwgfx_resource_priv.h" #include "ttm/ttm_placement.h" +#define VMW_COMPAT_SHADER_HT_ORDER 12 + struct vmw_shader { struct vmw_resource res; SVGA3dShaderType type; @@ -40,6 +42,50 @@ struct vmw_user_shader { struct vmw_shader shader; }; +/** + * enum vmw_compat_shader_state - Staging state for compat shaders + */ +enum vmw_compat_shader_state { + VMW_COMPAT_COMMITED, + VMW_COMPAT_ADD, + VMW_COMPAT_DEL +}; + +/** + * struct vmw_compat_shader - Metadata for compat shaders. + * + * @handle: The TTM handle of the guest backed shader. + * @tfile: The struct ttm_object_file the guest backed shader is registered + * with. + * @hash: Hash item for lookup. + * @head: List head for staging lists or the compat shader manager list. + * @state: Staging state. + * + * The structure is protected by the cmdbuf lock. + */ +struct vmw_compat_shader { + u32 handle; + struct ttm_object_file *tfile; + struct drm_hash_item hash; + struct list_head head; + enum vmw_compat_shader_state state; +}; + +/** + * struct vmw_compat_shader_manager - Compat shader manager. + * + * @shaders: Hash table containing staged and commited compat shaders + * @list: List of commited shaders. + * @dev_priv: Pointer to a device private structure. + * + * @shaders and @list are protected by the cmdbuf mutex for now. + */ +struct vmw_compat_shader_manager { + struct drm_open_hash shaders; + struct list_head list; + struct vmw_private *dev_priv; +}; + static void vmw_user_shader_free(struct vmw_resource *res); static struct vmw_resource * vmw_user_shader_base_to_res(struct ttm_base_object *base); @@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) return 0; mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_res_list_kill(&res->binding_head); + vmw_context_binding_res_list_scrub(&res->binding_head); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { @@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, TTM_REF_USAGE); } +static int vmw_shader_alloc(struct vmw_private *dev_priv, + struct vmw_dma_buffer *buffer, + size_t shader_size, + size_t offset, + SVGA3dShaderType shader_type, + struct ttm_object_file *tfile, + u32 *handle) +{ + struct vmw_user_shader *ushader; + struct vmw_resource *res, *tmp; + int ret; + + /* + * Approximate idr memory usage with 128 bytes. It will be limited + * by maximum number_of shaders anyway. + */ + if (unlikely(vmw_user_shader_size == 0)) + vmw_user_shader_size = + ttm_round_pot(sizeof(struct vmw_user_shader)) + 128; + + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), + vmw_user_shader_size, + false, true); + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + DRM_ERROR("Out of graphics memory for shader " + "creation.\n"); + goto out; + } + + ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); + if (unlikely(ushader == NULL)) { + ttm_mem_global_free(vmw_mem_glob(dev_priv), + vmw_user_shader_size); + ret = -ENOMEM; + goto out; + } + + res = &ushader->shader.res; + ushader->base.shareable = false; + ushader->base.tfile = NULL; + + /* + * From here on, the destructor takes over resource freeing. + */ + + ret = vmw_gb_shader_init(dev_priv, res, shader_size, + offset, shader_type, buffer, + vmw_user_shader_free); + if (unlikely(ret != 0)) + goto out; + + tmp = vmw_resource_reference(res); + ret = ttm_base_object_init(tfile, &ushader->base, false, + VMW_RES_SHADER, + &vmw_user_shader_base_release, NULL); + + if (unlikely(ret != 0)) { + vmw_resource_unreference(&tmp); + goto out_err; + } + + if (handle) + *handle = ushader->base.hash.key; +out_err: + vmw_resource_unreference(&res); +out: + return ret; +} + + int vmw_shader_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_user_shader *ushader; - struct vmw_resource *res; - struct vmw_resource *tmp; struct drm_vmw_shader_create_arg *arg = (struct drm_vmw_shader_create_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; @@ -373,69 +487,326 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data, goto out_bad_arg; } - /* - * Approximate idr memory usage with 128 bytes. It will be limited - * by maximum number_of shaders anyway. - */ + ret = ttm_read_lock(&vmaster->lock, true); + if (unlikely(ret != 0)) + goto out_bad_arg; - if (unlikely(vmw_user_shader_size == 0)) - vmw_user_shader_size = ttm_round_pot(sizeof(*ushader)) - + 128; + ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset, + shader_type, tfile, &arg->shader_handle); - ret = ttm_read_lock(&vmaster->lock, true); + ttm_read_unlock(&vmaster->lock); +out_bad_arg: + vmw_dmabuf_unreference(&buffer); + return ret; +} + +/** + * vmw_compat_shader_lookup - Look up a compat shader + * + * @man: Pointer to the compat shader manager. + * @shader_type: The shader type, that combined with the user_key identifies + * the shader. + * @user_key: On entry, this should be a pointer to the user_key. + * On successful exit, it will contain the guest-backed shader's TTM handle. + * + * Returns 0 on success. Non-zero on failure, in which case the value pointed + * to by @user_key is unmodified. + */ +int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man, + SVGA3dShaderType shader_type, + u32 *user_key) +{ + struct drm_hash_item *hash; + int ret; + unsigned long key = *user_key | (shader_type << 24); + + ret = drm_ht_find_item(&man->shaders, key, &hash); if (unlikely(ret != 0)) return ret; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_shader_size, - false, true); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader" - " creation.\n"); - goto out_unlock; + *user_key = drm_hash_entry(hash, struct vmw_compat_shader, + hash)->handle; + + return 0; +} + +/** + * vmw_compat_shader_free - Free a compat shader. + * + * @man: Pointer to the compat shader manager. + * @entry: Pointer to a struct vmw_compat_shader. + * + * Frees a struct vmw_compat_shder entry and drops its reference to the + * guest backed shader. + */ +static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man, + struct vmw_compat_shader *entry) +{ + list_del(&entry->head); + WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash)); + WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle, + TTM_REF_USAGE)); + kfree(entry); +} + +/** + * vmw_compat_shaders_commit - Commit a list of compat shader actions. + * + * @man: Pointer to the compat shader manager. + * @list: Caller's list of compat shader actions. + * + * This function commits a list of compat shader additions or removals. + * It is typically called when the execbuf ioctl call triggering these + * actions has commited the fifo contents to the device. + */ +void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man, + struct list_head *list) +{ + struct vmw_compat_shader *entry, *next; + + list_for_each_entry_safe(entry, next, list, head) { + list_del(&entry->head); + switch (entry->state) { + case VMW_COMPAT_ADD: + entry->state = VMW_COMPAT_COMMITED; + list_add_tail(&entry->head, &man->list); + break; + case VMW_COMPAT_DEL: + ttm_ref_object_base_unref(entry->tfile, entry->handle, + TTM_REF_USAGE); + kfree(entry); + break; + default: + BUG(); + break; + } } +} - ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); - if (unlikely(ushader == NULL)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_shader_size); - ret = -ENOMEM; - goto out_unlock; +/** + * vmw_compat_shaders_revert - Revert a list of compat shader actions + * + * @man: Pointer to the compat shader manager. + * @list: Caller's list of compat shader actions. + * + * This function reverts a list of compat shader additions or removals. + * It is typically called when the execbuf ioctl call triggering these + * actions failed for some reason, and the command stream was never + * submitted. + */ +void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man, + struct list_head *list) +{ + struct vmw_compat_shader *entry, *next; + int ret; + + list_for_each_entry_safe(entry, next, list, head) { + switch (entry->state) { + case VMW_COMPAT_ADD: + vmw_compat_shader_free(man, entry); + break; + case VMW_COMPAT_DEL: + ret = drm_ht_insert_item(&man->shaders, &entry->hash); + list_del(&entry->head); + list_add_tail(&entry->head, &man->list); + entry->state = VMW_COMPAT_COMMITED; + break; + default: + BUG(); + break; + } } +} - res = &ushader->shader.res; - ushader->base.shareable = false; - ushader->base.tfile = NULL; +/** + * vmw_compat_shader_remove - Stage a compat shader for removal. + * + * @man: Pointer to the compat shader manager + * @user_key: The key that is used to identify the shader. The key is + * unique to the shader type. + * @shader_type: Shader type. + * @list: Caller's list of staged shader actions. + * + * This function stages a compat shader for removal and removes the key from + * the shader manager's hash table. If the shader was previously only staged + * for addition it is completely removed (But the execbuf code may keep a + * reference if it was bound to a context between addition and removal). If + * it was previously commited to the manager, it is staged for removal. + */ +int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man, + u32 user_key, SVGA3dShaderType shader_type, + struct list_head *list) +{ + struct vmw_compat_shader *entry; + struct drm_hash_item *hash; + int ret; - /* - * From here on, the destructor takes over resource freeing. - */ + ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24), + &hash); + if (likely(ret != 0)) + return -EINVAL; - ret = vmw_gb_shader_init(dev_priv, res, arg->size, - arg->offset, shader_type, buffer, - vmw_user_shader_free); + entry = drm_hash_entry(hash, struct vmw_compat_shader, hash); + + switch (entry->state) { + case VMW_COMPAT_ADD: + vmw_compat_shader_free(man, entry); + break; + case VMW_COMPAT_COMMITED: + (void) drm_ht_remove_item(&man->shaders, &entry->hash); + list_del(&entry->head); + entry->state = VMW_COMPAT_DEL; + list_add_tail(&entry->head, list); + break; + default: + BUG(); + break; + } + + return 0; +} + +/** + * vmw_compat_shader_add - Create a compat shader and add the + * key to the manager + * + * @man: Pointer to the compat shader manager + * @user_key: The key that is used to identify the shader. The key is + * unique to the shader type. + * @bytecode: Pointer to the bytecode of the shader. + * @shader_type: Shader type. + * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is + * to be created with. + * @list: Caller's list of staged shader actions. + * + * Note that only the key is added to the shader manager's hash table. + * The shader is not yet added to the shader manager's list of shaders. + */ +int vmw_compat_shader_add(struct vmw_compat_shader_manager *man, + u32 user_key, const void *bytecode, + SVGA3dShaderType shader_type, + size_t size, + struct ttm_object_file *tfile, + struct list_head *list) +{ + struct vmw_dma_buffer *buf; + struct ttm_bo_kmap_obj map; + bool is_iomem; + struct vmw_compat_shader *compat; + u32 handle; + int ret; + + if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16) + return -EINVAL; + + /* Allocate and pin a DMA buffer */ + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (unlikely(buf == NULL)) + return -ENOMEM; + + ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement, + true, vmw_dmabuf_bo_free); if (unlikely(ret != 0)) - goto out_unlock; + goto out; - tmp = vmw_resource_reference(res); - ret = ttm_base_object_init(tfile, &ushader->base, false, - VMW_RES_SHADER, - &vmw_user_shader_base_release, NULL); + ret = ttm_bo_reserve(&buf->base, false, true, false, NULL); + if (unlikely(ret != 0)) + goto no_reserve; + /* Map and copy shader bytecode. */ + ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT, + &map); if (unlikely(ret != 0)) { - vmw_resource_unreference(&tmp); - goto out_err; + ttm_bo_unreserve(&buf->base); + goto no_reserve; } - arg->shader_handle = ushader->base.hash.key; -out_err: - vmw_resource_unreference(&res); -out_unlock: - ttm_read_unlock(&vmaster->lock); -out_bad_arg: - vmw_dmabuf_unreference(&buffer); + memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size); + WARN_ON(is_iomem); + + ttm_bo_kunmap(&map); + ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true); + WARN_ON(ret != 0); + ttm_bo_unreserve(&buf->base); + + /* Create a guest-backed shader container backed by the dma buffer */ + ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type, + tfile, &handle); + vmw_dmabuf_unreference(&buf); + if (unlikely(ret != 0)) + goto no_reserve; + /* + * Create a compat shader structure and stage it for insertion + * in the manager + */ + compat = kzalloc(sizeof(*compat), GFP_KERNEL); + if (compat == NULL) + goto no_compat; + + compat->hash.key = user_key | (shader_type << 24); + ret = drm_ht_insert_item(&man->shaders, &compat->hash); + if (unlikely(ret != 0)) + goto out_invalid_key; + + compat->state = VMW_COMPAT_ADD; + compat->handle = handle; + compat->tfile = tfile; + list_add_tail(&compat->head, list); + return 0; + +out_invalid_key: + kfree(compat); +no_compat: + ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); +no_reserve: +out: return ret; +} + +/** + * vmw_compat_shader_man_create - Create a compat shader manager + * + * @dev_priv: Pointer to a device private structure. + * + * Typically done at file open time. If successful returns a pointer to a + * compat shader manager. Otherwise returns an error pointer. + */ +struct vmw_compat_shader_manager * +vmw_compat_shader_man_create(struct vmw_private *dev_priv) +{ + struct vmw_compat_shader_manager *man; + int ret; + + man = kzalloc(sizeof(*man), GFP_KERNEL); + if (man == NULL) + return ERR_PTR(-ENOMEM); + + man->dev_priv = dev_priv; + INIT_LIST_HEAD(&man->list); + ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER); + if (ret == 0) + return man; + + kfree(man); + return ERR_PTR(ret); +} + +/** + * vmw_compat_shader_man_destroy - Destroy a compat shader manager + * + * @man: Pointer to the shader manager to destroy. + * + * Typically done at file close time. + */ +void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man) +{ + struct vmw_compat_shader *entry, *next; + + mutex_lock(&man->dev_priv->cmdbuf_mutex); + list_for_each_entry_safe(entry, next, &man->list, head) + vmw_compat_shader_free(man, entry); + mutex_unlock(&man->dev_priv->cmdbuf_mutex); + kfree(man); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 979da1c246a5..e7af580ab977 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out_unlock; + /* + * A gb-aware client referencing a shared surface will + * expect a backup buffer to be present. + */ + if (dev_priv->has_mob && req->shareable) { + uint32_t backup_handle; + + ret = vmw_user_dmabuf_alloc(dev_priv, tfile, + res->backup_size, + true, + &backup_handle, + &res->backup); + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } + } + tmp = vmw_resource_reference(&srf->res); ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, req->shareable, VMW_RES_SURFACE, @@ -908,8 +926,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, rep->size_addr; if (user_sizes) - ret = copy_to_user(user_sizes, srf->sizes, - srf->num_sizes * sizeof(*srf->sizes)); + ret = copy_to_user(user_sizes, &srf->base_size, + sizeof(srf->base_size)); if (unlikely(ret != 0)) { DRM_ERROR("copy_to_user failed %p %u\n", user_sizes, srf->num_sizes); @@ -1111,7 +1129,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) return 0; mutex_lock(&dev_priv->binding_mutex); - vmw_context_binding_res_list_kill(&res->binding_head); + vmw_context_binding_res_list_scrub(&res->binding_head); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { |