summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_drv.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c50
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h1
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c12
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c47
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c20
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c40
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c89
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c22
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c23
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc18
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h460
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h460
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h188
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h188
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c41
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c118
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c12
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c10
-rw-r--r--drivers/gpu/drm/udl/udl_main.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c341
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c227
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c396
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c47
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c97
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c25
-rw-r--r--drivers/gpu/ipu-v3/ipu-dp.c71
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h3
100 files changed, 2535 insertions, 1413 deletions
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 03711d00aaae..8218078b6133 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -419,8 +419,9 @@ long drm_ioctl(struct file *filp,
retcode = -EFAULT;
goto err_i1;
}
- } else
+ } else if (cmd & IOC_OUT) {
memset(kdata, 0, usize);
+ }
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b565372a91f3..0de123afdb34 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -128,7 +128,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
*/
if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
atomic_inc(&dev->vblank[crtc].count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
/* Invalidate all timestamps while vblank irq's are off. */
@@ -868,9 +868,9 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_add(diff, &dev->vblank[crtc].count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
/**
@@ -1479,9 +1479,9 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
/* Increment cooked vblank count. This also atomically commits
* the timestamp computed above.
*/
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&dev->vblank[crtc].count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 4693531ebd48..3aa1c7ebbfcc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -40,7 +40,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
{
struct exynos_dpi *ctx = connector_to_dpi(connector);
- if (!ctx->panel->connector)
+ if (ctx->panel && !ctx->panel->connector)
drm_panel_attach(ctx->panel, &ctx->connector);
return connector_status_connected;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ddb5003f67f5..d82e3cb8a70d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -761,24 +761,24 @@ static int exynos_drm_init(void)
return 0;
-err_unregister_pd:
- platform_device_unregister(exynos_drm_pdev);
-
err_remove_vidi:
#ifdef CONFIG_DRM_EXYNOS_VIDI
exynos_drm_remove_vidi();
+
+err_unregister_pd:
#endif
+ platform_device_unregister(exynos_drm_pdev);
return ret;
}
static void exynos_drm_exit(void)
{
+ platform_driver_unregister(&exynos_drm_platform_driver);
#ifdef CONFIG_DRM_EXYNOS_VIDI
exynos_drm_remove_vidi();
#endif
platform_device_unregister(exynos_drm_pdev);
- platform_driver_unregister(&exynos_drm_platform_driver);
}
module_init(exynos_drm_init);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index fca4b98c4077..02f3b3dcb9f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -341,7 +341,7 @@ struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct device *dev);
#else
static inline struct exynos_drm_display *
-exynos_dpi_probe(struct device *dev) { return 0; }
+exynos_dpi_probe(struct device *dev) { return NULL; }
static inline int exynos_dpi_remove(struct device *dev) { return 0; }
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index bb45ab2e7384..33161ad38201 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -741,6 +741,8 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
win_data = &ctx->win_data[i];
if (win_data->enabled)
fimd_win_commit(mgr, i);
+ else
+ fimd_win_disable(mgr, i);
}
fimd_commit(mgr);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 6c1885eedfdf..800158714473 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -467,14 +467,17 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
goto err_free;
}
+ down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, userptr);
if (!vma) {
+ up_read(&current->mm->mmap_sem);
DRM_ERROR("failed to get vm region.\n");
ret = -EFAULT;
goto err_free_pages;
}
if (vma->vm_end < userptr + size) {
+ up_read(&current->mm->mmap_sem);
DRM_ERROR("vma is too small.\n");
ret = -EFAULT;
goto err_free_pages;
@@ -482,6 +485,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->vma = exynos_gem_get_vma(vma);
if (!g2d_userptr->vma) {
+ up_read(&current->mm->mmap_sem);
DRM_ERROR("failed to copy vma.\n");
ret = -ENOMEM;
goto err_free_pages;
@@ -492,10 +496,12 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
npages, pages, vma);
if (ret < 0) {
+ up_read(&current->mm->mmap_sem);
DRM_ERROR("failed to get user pages from userptr.\n");
goto err_put_vma;
}
+ up_read(&current->mm->mmap_sem);
g2d_userptr->pages = pages;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 603a79602f31..a1888e128f1d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -286,7 +286,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
/*
* This case is search ipp driver by prop_id handle.
* sometimes, ipp subsystem find driver by prop_id.
- * e.g PAUSE state, queue buf, command contro.
+ * e.g PAUSE state, queue buf, command control.
*/
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index dd565c4e5b4d..81df11d57673 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2090,6 +2090,11 @@ out:
static void hdmi_dpms(struct exynos_drm_display *display, int mode)
{
+ struct hdmi_context *hdata = display->ctx;
+ struct drm_encoder *encoder = hdata->encoder;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct drm_crtc_helper_funcs *funcs = NULL;
+
DRM_DEBUG_KMS("mode %d\n", mode);
switch (mode) {
@@ -2099,6 +2104,20 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
+ /*
+ * The SFRs of VP and Mixer are updated by Vertical Sync of
+ * Timing generator which is a part of HDMI so the sequence
+ * to disable TV Subsystem should be as following,
+ * VP -> Mixer -> HDMI
+ *
+ * Below codes will try to disable Mixer and VP(if used)
+ * prior to disabling HDMI.
+ */
+ if (crtc)
+ funcs = crtc->helper_private;
+ if (funcs && funcs->dpms)
+ (*funcs->dpms)(crtc, mode);
+
hdmi_poweroff(display);
break;
default:
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 4c5aed7e54c8..7529946d0a74 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -377,6 +377,20 @@ static void mixer_run(struct mixer_context *ctx)
mixer_regs_dump(ctx);
}
+static void mixer_stop(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ int timeout = 20;
+
+ mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+
+ while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
+ --timeout)
+ usleep_range(10000, 12000);
+
+ mixer_regs_dump(ctx);
+}
+
static void vp_video_buffer(struct mixer_context *ctx, int win)
{
struct mixer_resources *res = &ctx->mixer_res;
@@ -497,13 +511,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
static void mixer_layer_update(struct mixer_context *ctx)
{
struct mixer_resources *res = &ctx->mixer_res;
- u32 val;
-
- val = mixer_reg_read(res, MXR_CFG);
- /* allow one update per vsync only */
- if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK))
- mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+ mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
}
static void mixer_graph_buffer(struct mixer_context *ctx, int win)
@@ -1010,6 +1019,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
}
mutex_unlock(&mixer_ctx->mixer_mutex);
+ drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+
atomic_set(&mixer_ctx->wait_vsync_event, 1);
/*
@@ -1020,6 +1031,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
!atomic_read(&mixer_ctx->wait_vsync_event),
HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
+
+ drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe);
}
static void mixer_window_suspend(struct exynos_drm_manager *mgr)
@@ -1061,7 +1074,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
mutex_unlock(&ctx->mixer_mutex);
return;
}
- ctx->powered = true;
+
mutex_unlock(&ctx->mixer_mutex);
pm_runtime_get_sync(ctx->dev);
@@ -1072,6 +1085,12 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
clk_prepare_enable(res->sclk_mixer);
}
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = true;
+ mutex_unlock(&ctx->mixer_mutex);
+
+ mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
@@ -1084,14 +1103,21 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
struct mixer_resources *res = &ctx->mixer_res;
mutex_lock(&ctx->mixer_mutex);
- if (!ctx->powered)
- goto out;
+ if (!ctx->powered) {
+ mutex_unlock(&ctx->mixer_mutex);
+ return;
+ }
mutex_unlock(&ctx->mixer_mutex);
+ mixer_stop(ctx);
mixer_window_suspend(mgr);
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+ mutex_lock(&ctx->mixer_mutex);
+ ctx->powered = false;
+ mutex_unlock(&ctx->mixer_mutex);
+
clk_disable_unprepare(res->mixer);
if (ctx->vp_enabled) {
clk_disable_unprepare(res->vp);
@@ -1099,12 +1125,6 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
}
pm_runtime_put_sync(ctx->dev);
-
- mutex_lock(&ctx->mixer_mutex);
- ctx->powered = false;
-
-out:
- mutex_unlock(&ctx->mixer_mutex);
}
static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 4537026bc385..5f32e1a29411 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -78,6 +78,7 @@
#define MXR_STATUS_BIG_ENDIAN (1 << 3)
#define MXR_STATUS_ENDIAN_MASK (1 << 3)
#define MXR_STATUS_SYNC_ENABLE (1 << 2)
+#define MXR_STATUS_REG_IDLE (1 << 1)
#define MXR_STATUS_REG_RUN (1 << 0)
/* bits for MXR_CFG */
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 240c331405b9..ac357b02bd35 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -810,6 +810,12 @@ static int
tda998x_encoder_mode_valid(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
+ if (mode->clock > 150000)
+ return MODE_CLOCK_HIGH;
+ if (mode->htotal >= BIT(13))
+ return MODE_BAD_HVALUE;
+ if (mode->vtotal >= BIT(11))
+ return MODE_BAD_VVALUE;
return MODE_OK;
}
@@ -1048,8 +1054,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
return i;
}
} else {
- for (i = 10; i > 0; i--) {
- msleep(10);
+ for (i = 100; i > 0; i--) {
+ msleep(1);
ret = reg_read(priv, REG_INT_FLAGS_2);
if (ret < 0)
return ret;
@@ -1183,7 +1189,6 @@ static void
tda998x_encoder_destroy(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
- drm_i2c_encoder_destroy(encoder);
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
@@ -1193,6 +1198,7 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
if (priv->cec)
i2c_unregister_device(priv->cec);
+ drm_i2c_encoder_destroy(encoder);
kfree(priv);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 601caa88c092..b8c689202c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -446,7 +446,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
memset(&stats, 0, sizeof(stats));
stats.file_priv = file->driver_priv;
+ spin_lock(&file->table_lock);
idr_for_each(&file->object_idr, per_file_stats, &stats);
+ spin_unlock(&file->table_lock);
/*
* Although we have a valid reference on file->pid, that does
* not guarantee that the task_struct who called get_pid() is
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4c22a5b7f4c5..6c656392d67d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -36,6 +36,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include <linux/pci.h>
+#include <linux/console.h>
+#include <linux/vt.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
@@ -1386,7 +1388,6 @@ cleanup_gem:
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
- drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
@@ -1450,6 +1451,38 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
}
#endif
+#if !defined(CONFIG_VGA_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ return -ENODEV;
+}
+#else
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ DRM_INFO("Replacing VGA console driver\n");
+
+ console_lock();
+ ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+ if (ret == 0) {
+ ret = do_unregister_con_driver(&vga_con);
+
+ /* Ignore "already unregistered". */
+ if (ret == -ENODEV)
+ ret = 0;
+ }
+ console_unlock();
+
+ return ret;
+}
+#endif
+
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = &dev_priv->info;
@@ -1623,8 +1656,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_regs;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_kick_out_vgacon(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting VGA console\n");
+ goto out_gtt;
+ }
+
i915_kick_out_firmware_fb(dev_priv);
+ }
pci_set_master(dev->pdev);
@@ -1756,8 +1796,6 @@ out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
- list_del(&dev_priv->gtt.base.global_link);
- drm_mm_takedown(&dev_priv->gtt.base.mm);
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
out_regs:
intel_uncore_fini(dev);
@@ -1846,7 +1884,6 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
- list_del(&dev_priv->gtt.base.global_link);
WARN_ON(!list_empty(&dev_priv->vm_list));
drm_vblank_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 49414d30e8d4..a47fbf60b781 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -977,6 +977,8 @@ struct i915_power_well {
bool always_on;
/* power well enable/disable usage count */
int count;
+ /* cached hw enabled state */
+ bool hw_enabled;
unsigned long domains;
unsigned long data;
const struct i915_power_well_ops *ops;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3ffe308d5893..a5ddf3bce9c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -598,6 +598,7 @@ static int do_switch(struct intel_engine_cs *ring,
struct intel_context *from = ring->last_context;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
u32 hw_flags = 0;
+ bool uninitialized = false;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -696,19 +697,20 @@ static int do_switch(struct intel_engine_cs *ring,
i915_gem_context_unreference(from);
}
+ uninitialized = !to->is_initialized && from == NULL;
+ to->is_initialized = true;
+
done:
i915_gem_context_reference(to);
ring->last_context = to;
to->last_ring = ring;
- if (ring->id == RCS && !to->is_initialized && from == NULL) {
+ if (uninitialized) {
ret = i915_gem_render_state_init(ring);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
}
- to->is_initialized = true;
-
return 0;
unpin_out:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index eec820aec022..8b3cde703364 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1992,7 +1992,10 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
- drm_mm_takedown(&vm->mm);
+ if (drm_mm_initialized(&vm->mm)) {
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+ }
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
}
@@ -2025,6 +2028,10 @@ static int i915_gmch_probe(struct drm_device *dev,
static void i915_gmch_remove(struct i915_address_space *vm)
{
+ if (drm_mm_initialized(&vm->mm)) {
+ drm_mm_takedown(&vm->mm);
+ list_del(&vm->global_link);
+ }
intel_gmch_remove();
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 87ec60e181a7..66cf41765bf9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -888,6 +888,8 @@ static void i915_gem_record_rings(struct drm_device *dev,
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
+ error->ring[i].pid = -1;
+
if (ring->dev == NULL)
continue;
@@ -895,7 +897,6 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_record_ring_state(dev, ring, &error->ring[i]);
- error->ring[i].pid = -1;
request = i915_gem_find_active_request(ring);
if (request) {
/* We need to copy these to an anonymous buffer
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index cf288a95347c..267f069765ad 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2402,7 +2402,7 @@ static void i915_error_work_func(struct work_struct *work)
* updates before
* the counter increment.
*/
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&dev_priv->gpu_error.reset_counter);
kobject_uevent_env(&dev->primary->kdev->kobj,
@@ -2847,10 +2847,14 @@ static int semaphore_passed(struct intel_engine_cs *ring)
struct intel_engine_cs *signaller;
u32 seqno, ctl;
- ring->hangcheck.deadlock = true;
+ ring->hangcheck.deadlock++;
signaller = semaphore_waits_for(ring, &seqno);
- if (signaller == NULL || signaller->hangcheck.deadlock)
+ if (signaller == NULL)
+ return -1;
+
+ /* Prevent pathological recursion due to driver bugs */
+ if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1;
/* cursory check for an unkickable deadlock */
@@ -2858,7 +2862,13 @@ static int semaphore_passed(struct intel_engine_cs *ring)
if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
return -1;
- return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
+ if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
+ return 1;
+
+ if (signaller->hangcheck.deadlock)
+ return -1;
+
+ return 0;
}
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
@@ -2867,7 +2877,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
int i;
for_each_ring(ring, dev_priv, i)
- ring->hangcheck.deadlock = false;
+ ring->hangcheck.deadlock = 0;
}
static enum intel_ring_hangcheck_action
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1ee98f121a00..827498e081df 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -315,9 +315,6 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
- /* Err to enabling backlight if no backlight block. */
- dev_priv->vbt.backlight.present = true;
-
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
return;
@@ -1088,6 +1085,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
+ /* Default to having backlight */
+ dev_priv->vbt.backlight.present = true;
+
/* LFP panel data */
dev_priv->vbt.lvds_dither = 1;
dev_priv->vbt.lvds_vbt = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dbabaec6ae2d..1112d9ecc226 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2087,6 +2087,7 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
+ struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
@@ -2106,6 +2107,14 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, plane);
+
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
/**
@@ -4564,7 +4573,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
- vlv_prepare_pll(intel_crtc);
+ is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+ if (!is_dsi && !IS_CHERRYVIEW(dev))
+ vlv_prepare_pll(intel_crtc);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -4598,8 +4610,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
-
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
chv_enable_pll(intel_crtc);
@@ -11087,6 +11097,22 @@ const char *intel_output_name(int output)
return names[output];
}
+static bool intel_crt_present(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_ULT(dev))
+ return false;
+
+ if (IS_CHERRYVIEW(dev))
+ return false;
+
+ if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
+ return false;
+
+ return true;
+}
+
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11095,7 +11121,7 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_lvds_init(dev);
- if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev) && dev_priv->vbt.int_crt_support)
+ if (intel_crt_present(dev))
intel_crt_init(dev);
if (HAS_DDI(dev)) {
@@ -12411,8 +12437,8 @@ intel_display_capture_error_state(struct drm_device *dev)
for_each_pipe(i) {
error->pipe[i].power_domain_on =
- intel_display_power_enabled_sw(dev_priv,
- POWER_DOMAIN_PIPE(i));
+ intel_display_power_enabled_unlocked(dev_priv,
+ POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
@@ -12447,7 +12473,7 @@ intel_display_capture_error_state(struct drm_device *dev)
enum transcoder cpu_transcoder = transcoders[i];
error->transcoder[i].power_domain_on =
- intel_display_power_enabled_sw(dev_priv,
+ intel_display_power_enabled_unlocked(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bda0ae3d80cc..eaa27ee9e367 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -950,8 +950,8 @@ int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_remove(struct drm_i915_private *);
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 5e6c888b4928..38a98570d10c 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -798,9 +798,6 @@ static void i965_enable_backlight(struct intel_connector *connector)
ctl = freq << 16;
I915_WRITE(BLC_PWM_CTL, ctl);
- /* XXX: combine this into above write? */
- intel_panel_actually_set_backlight(connector, panel->backlight.level);
-
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
@@ -809,6 +806,8 @@ static void i965_enable_backlight(struct intel_connector *connector)
I915_WRITE(BLC_PWM_CTL2, ctl2);
POSTING_READ(BLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
}
static void vlv_enable_backlight(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d1e53abec1b5..ee72807069e4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -511,8 +511,7 @@ void intel_update_fbc(struct drm_device *dev)
obj = intel_fb->obj;
adjusted_mode = &intel_crtc->config.adjusted_mode;
- if (i915.enable_fbc < 0 &&
- INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+ if (i915.enable_fbc < 0) {
if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
DRM_DEBUG_KMS("disabled per chip default\n");
goto out_disable;
@@ -3210,6 +3209,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
+ /* Latest VLV doesn't need to force the gfx clock */
+ if (dev->pdev->revision >= 0xd) {
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ return;
+ }
+
/*
* When we are idle. Drop to min voltage state.
*/
@@ -3506,15 +3513,11 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- /* WaDisablePwrmtrEvent:chv (pre-production hw) */
- I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
- I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
-
/* 5: Enable RPS */
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
- GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+ GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
@@ -5608,8 +5611,8 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
}
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
@@ -5620,16 +5623,19 @@ bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
return false;
power_domains = &dev_priv->power_domains;
+
is_enabled = true;
+
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
if (power_well->always_on)
continue;
- if (!power_well->count) {
+ if (!power_well->hw_enabled) {
is_enabled = false;
break;
}
}
+
return is_enabled;
}
@@ -5637,30 +5643,15 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- bool is_enabled;
- int i;
-
- if (dev_priv->pm.suspended)
- return false;
+ bool ret;
power_domains = &dev_priv->power_domains;
- is_enabled = true;
-
mutex_lock(&power_domains->lock);
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
- if (power_well->always_on)
- continue;
-
- if (!power_well->ops->is_enabled(dev_priv, power_well)) {
- is_enabled = false;
- break;
- }
- }
+ ret = intel_display_power_enabled_unlocked(dev_priv, domain);
mutex_unlock(&power_domains->lock);
- return is_enabled;
+ return ret;
}
/*
@@ -5981,6 +5972,7 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
if (!power_well->count++) {
DRM_DEBUG_KMS("enabling %s\n", power_well->name);
power_well->ops->enable(dev_priv, power_well);
+ power_well->hw_enabled = true;
}
check_power_well_state(dev_priv, power_well);
@@ -6010,6 +6002,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
if (!--power_well->count && i915.disable_power_well) {
DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+ power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
@@ -6024,33 +6017,56 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
static struct i915_power_domains *hsw_pwr;
/* Display audio driver power well request */
-void i915_request_power_well(void)
+int i915_request_power_well(void)
{
struct drm_i915_private *dev_priv;
- if (WARN_ON(!hsw_pwr))
- return;
+ if (!hsw_pwr)
+ return -ENODEV;
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
/* Display audio driver power well release */
-void i915_release_power_well(void)
+int i915_release_power_well(void)
{
struct drm_i915_private *dev_priv;
- if (WARN_ON(!hsw_pwr))
- return;
+ if (!hsw_pwr)
+ return -ENODEV;
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+ return 0;
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!hsw_pwr)
+ return -ENODEV;
+
+ dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+ power_domains);
+
+ return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
+
+
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
@@ -6270,8 +6286,11 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
int i;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
power_well->ops->sync_hw(dev_priv, power_well);
+ power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+ power_well);
+ }
mutex_unlock(&power_domains->lock);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 910c83cf7d44..e72017bdcd7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -55,7 +55,7 @@ struct intel_ring_hangcheck {
u32 seqno;
int score;
enum intel_ring_hangcheck_action action;
- bool deadlock;
+ int deadlock;
};
struct intel_ringbuffer {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e0be8ae75c15..9350edd6728d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1385,7 +1385,9 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
>> SDVO_PORT_MULTIPLY_SHIFT) + 1;
}
- dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+ dotclock = pipe_config->port_clock;
+ if (pipe_config->pixel_multiplier)
+ dotclock /= pipe_config->pixel_multiplier;
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1b66ddcdfb33..9a17b4e92ef4 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -691,6 +691,14 @@ intel_post_enable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev))
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+ /*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 79cba593df0d..4f6fef7ac069 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -320,7 +320,8 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
- del_timer_sync(&dev_priv->uncore.force_wake_timer);
+ if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
+ gen6_force_wake_timer((unsigned long)dev_priv);
/* Hold uncore.lock across reset to prevent any register access
* with forcewake not set correctly
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index ae750f6928c1..7f7aadef8a82 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -277,6 +277,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+ static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
config.phy_init = hdmi_phy_8x74_init;
@@ -286,6 +287,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
config.pwr_reg_names = pwr_reg_names;
config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
config.hpd_clk_names = hpd_clk_names;
+ config.hpd_freq = hpd_clk_freq;
config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
config.pwr_clk_names = pwr_clk_names;
config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 9fafee6a3e43..9d7723c6528a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -87,6 +87,7 @@ struct hdmi_platform_config {
/* clks that need to be on for hpd: */
const char **hpd_clk_names;
+ const long unsigned *hpd_freq;
int hpd_clk_cnt;
/* clks that need to be on for screen pwr (ie pixel clk): */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 56c64c14c9b0..76960faae38f 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -127,6 +127,14 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
}
for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ dev_warn(dev->dev, "failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
ret = clk_prepare_enable(hdmi->hpd_clks[i]);
if (ret) {
dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 42caf7fcb0b9..71510ee26e96 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -20,6 +20,10 @@
#include "msm_mmu.h"
#include "mdp5_kms.h"
+static const char *iommu_ports[] = {
+ "mdp_0",
+};
+
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
static int mdp5_hw_init(struct msm_kms *kms)
@@ -104,6 +108,12 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp5_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct msm_mmu *mmu = mdp5_kms->mmu;
+
+ if (mmu) {
+ mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
+ mmu->funcs->destroy(mmu);
+ }
kfree(mdp5_kms);
}
@@ -216,10 +226,6 @@ fail:
return ret;
}
-static const char *iommu_ports[] = {
- "mdp_0",
-};
-
static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name)
{
@@ -317,17 +323,23 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mmu = msm_iommu_new(dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
+ dev_err(dev->dev, "failed to init iommu: %d\n", ret);
goto fail;
}
+
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
- if (ret)
+ if (ret) {
+ dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
+ mmu->funcs->destroy(mmu);
goto fail;
+ }
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
mmu = NULL;
}
+ mdp5_kms->mmu = mmu;
mdp5_kms->id = msm_register_mmu(dev, mmu);
if (mdp5_kms->id < 0) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index c8b1a2522c25..6e981b692d1d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -33,6 +33,7 @@ struct mdp5_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
+ struct msm_mmu *mmu;
/* for tracking smp allocation amongst pipes: */
mdp5_smp_state_t smp_state;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0d2562fb681e..9a5d87db5c23 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -159,7 +159,7 @@ static int msm_unload(struct drm_device *dev)
static int get_mdp_ver(struct platform_device *pdev)
{
#ifdef CONFIG_OF
- const static struct of_device_id match_types[] = { {
+ static const struct of_device_id match_types[] = { {
.compatible = "qcom,mdss_mdp",
.data = (void *)5,
}, {
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index a752ab83b810..5107fc4826bc 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -59,7 +59,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- dma_addr_t paddr;
+ uint32_t paddr;
int ret, size;
sizes->surface_bpp = 32;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index bb8026daebc9..690d7e7b6d1e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -278,6 +278,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
int ret = 0;
if (!msm_obj->domain[id].iova) {
@@ -285,6 +286,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
struct msm_mmu *mmu = priv->mmus[id];
struct page **pages = get_pages(obj);
+ if (!mmu) {
+ dev_err(dev->dev, "null MMU pointer\n");
+ return -EINVAL;
+ }
+
if (IS_ERR(pages))
return PTR_ERR(pages);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 92b745986231..4b2ad9181edf 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -28,7 +28,7 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
unsigned long iova, int flags, void *arg)
{
DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
- return 0;
+ return -ENOSYS;
}
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
@@ -40,8 +40,10 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
for (i = 0; i < cnt; i++) {
struct device *msm_iommu_get_ctx(const char *ctx_name);
struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (IS_ERR_OR_NULL(ctx))
+ if (IS_ERR_OR_NULL(ctx)) {
+ dev_warn(dev->dev, "couldn't get %s context", names[i]);
continue;
+ }
ret = iommu_attach_device(iommu->domain, ctx);
if (ret) {
dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
@@ -52,6 +54,20 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
return 0;
}
+static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx))
+ continue;
+ iommu_detach_device(iommu->domain, ctx);
+ }
+}
+
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
@@ -110,7 +126,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
- BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+ BUG_ON(!PAGE_ALIGNED(bytes));
da += bytes;
}
@@ -127,6 +143,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
static const struct msm_mmu_funcs funcs = {
.attach = msm_iommu_attach,
+ .detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 030324482b4a..21da6d154f71 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -22,6 +22,7 @@
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
+ void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
unsigned len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 2b6156d0e4b5..8b307e143632 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -140,6 +140,7 @@ nouveau-y += core/subdev/i2c/nv4e.o
nouveau-y += core/subdev/i2c/nv50.o
nouveau-y += core/subdev/i2c/nv94.o
nouveau-y += core/subdev/i2c/nvd0.o
+nouveau-y += core/subdev/i2c/gf117.o
nouveau-y += core/subdev/i2c/nve0.o
nouveau-y += core/subdev/ibus/nvc0.o
nouveau-y += core/subdev/ibus/nve0.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index f199957995fa..8d55ed633b19 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -314,7 +314,7 @@ nvc0_identify(struct nouveau_device *device)
device->cname = "GF117";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
device->oclass[NVDEV_SUBDEV_GPIO ] = nvd0_gpio_oclass;
- device->oclass[NVDEV_SUBDEV_I2C ] = nvd0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = gf117_i2c_oclass;
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
index c41f656abe64..9c38c5e40500 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -99,8 +99,10 @@ _nouveau_disp_dtor(struct nouveau_object *object)
nouveau_event_destroy(&disp->vblank);
- list_for_each_entry_safe(outp, outt, &disp->outp, head) {
- nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
+ if (disp->outp.next) {
+ list_for_each_entry_safe(outp, outt, &disp->outp, head) {
+ nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
+ }
}
nouveau_engine_destroy(&disp->base);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 39562d48101d..5a5b59b21130 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -241,7 +241,9 @@ dp_link_train_eq(struct dp_state *dp)
dp_set_training_pattern(dp, 2);
do {
- if (dp_link_train_update(dp, dp->pc2, 400))
+ if ((tries &&
+ dp_link_train_commit(dp, dp->pc2)) ||
+ dp_link_train_update(dp, dp->pc2, 400))
break;
eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
@@ -253,9 +255,6 @@ dp_link_train_eq(struct dp_state *dp)
!(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
eq_done = false;
}
-
- if (dp_link_train_commit(dp, dp->pc2))
- break;
} while (!eq_done && cr_done && ++tries <= 5);
return eq_done ? 0 : -1;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 1e85f36c705f..26e962b7e702 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1270,7 +1270,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
i--;
outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
- if (!data)
+ if (!outp)
return NULL;
if (outp->info.location == 0) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
index 2f7345f7fe07..7445f12b1d9e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
@@ -54,7 +54,7 @@ mmio_list_base:
#ifdef INCLUDE_CODE
// reports an exception to the host
//
-// In: $r15 error code (see nvc0.fuc)
+// In: $r15 error code (see os.h)
//
error:
push $r14
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
index c8ddb8d71b91..b4ad18bf5a26 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
@@ -49,7 +49,7 @@ hub_mmio_list_next:
#ifdef INCLUDE_CODE
// reports an exception to the host
//
-// In: $r15 error code (see nvc0.fuc)
+// In: $r15 error code (see os.h)
//
error:
nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), 0, $r15)
@@ -343,13 +343,25 @@ ih:
ih_no_ctxsw:
and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
bra e #ih_no_fwmthd
- // none we handle, ack, and fall-through to unhandled
+ // none we handle; report to host and ack
+ nv_rd32($r15, NV_PGRAPH_TRAPPED_DATA_LO)
+ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(4), 0, $r15)
+ nv_rd32($r15, NV_PGRAPH_TRAPPED_ADDR)
+ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(3), 0, $r15)
+ extr $r14 $r15 16:18
+ shl b32 $r14 $r14 2
+ imm32($r15, NV_PGRAPH_FE_OBJECT_TABLE(0))
+ add b32 $r14 $r15
+ call(nv_rd32)
+ nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(2), 0, $r15)
+ mov $r15 E_BAD_FWMTHD
+ call(error)
mov $r11 0x100
nv_wr32(0x400144, $r11)
// anything we didn't handle, bring it to the host's attention
ih_no_fwmthd:
- mov $r11 0x104 // FIFO | CHSW
+ mov $r11 0x504 // FIFO | CHSW | FWMTHD
not b32 $r11
and $r11 $r10 $r11
bra e #ih_no_other
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
index 214dd16ec566..5f953c5c20b7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
@@ -478,10 +478,10 @@ uint32_t gm107_grhub_code[] = {
0x01040080,
0xbd0001f6,
0x01004104,
- 0x627e020f,
- 0x717e0006,
+ 0xa87e020f,
+ 0xb77e0006,
0x100f0006,
- 0x0006b37e,
+ 0x0006f97e,
0x98000e98,
0x207e010f,
0x14950001,
@@ -523,8 +523,8 @@ uint32_t gm107_grhub_code[] = {
0x800040b7,
0xf40132b6,
0x000fb41b,
- 0x0006b37e,
- 0x627e000f,
+ 0x0006f97e,
+ 0xa87e000f,
0x00800006,
0x01f60201,
0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t gm107_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0231f401,
- 0x0008367e,
+ 0x00087c7e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -563,7 +563,7 @@ uint32_t gm107_grhub_code[] = {
0x37008006,
0x0009f602,
0x31f404bd,
- 0x08367e01,
+ 0x087c7e01,
0xf094bd00,
0x00800699,
0x09f60217,
@@ -572,7 +572,7 @@ uint32_t gm107_grhub_code[] = {
0x20f92f0e,
0x32f412b2,
0x0232f401,
- 0x0008367e,
+ 0x00087c7e,
0x008020fc,
0x02f602c0,
0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t gm107_grhub_code[] = {
0x23c8130e,
0x0d0bf41f,
0xf40131f4,
- 0x367e0232,
+ 0x7c7e0232,
/* 0x054e: chsw_done */
0x01020008,
0x02c30080,
@@ -593,7 +593,7 @@ uint32_t gm107_grhub_code[] = {
0xb0ff2a0e,
0x1bf401e4,
0x7ef2b20c,
- 0xf40007d6,
+ 0xf400081c,
/* 0x057a: main_not_ctx_chan */
0xe4b0400e,
0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t gm107_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0232f401,
- 0x0008367e,
+ 0x00087c7e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -642,238 +642,238 @@ uint32_t gm107_grhub_code[] = {
/* 0x061a: ih_no_ctxsw */
0xabe40000,
0x0bf40400,
- 0x01004b10,
- 0x448ebfb2,
- 0x8f7e4001,
-/* 0x062e: ih_no_fwmthd */
- 0x044b0000,
- 0xffb0bd01,
- 0x0bf4b4ab,
- 0x0700800c,
- 0x000bf603,
-/* 0x0642: ih_no_other */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x0662: ctx_4170s */
- 0xf5f001f8,
- 0x8effb210,
- 0x7e404170,
- 0xf800008f,
-/* 0x0671: ctx_4170w */
- 0x41708e00,
+ 0x07088e56,
0x00657e40,
- 0xf0ffb200,
- 0x1bf410f4,
-/* 0x0683: ctx_redswitch */
- 0x4e00f8f3,
- 0xe5f00200,
- 0x20e5f040,
- 0x8010e5f0,
- 0xf6018500,
- 0x04bd000e,
-/* 0x069a: ctx_redswitch_delay */
- 0xf2b6080f,
- 0xfd1bf401,
- 0x0400e5f1,
- 0x0100e5f1,
- 0x01850080,
- 0xbd000ef6,
-/* 0x06b3: ctx_86c */
- 0x8000f804,
- 0xf6022300,
+ 0x80ffb200,
+ 0xf6020400,
0x04bd000f,
- 0x148effb2,
- 0x8f7e408a,
- 0xffb20000,
- 0x41a88c8e,
+ 0x4007048e,
+ 0x0000657e,
+ 0x0080ffb2,
+ 0x0ff60203,
+ 0xc704bd00,
+ 0xee9450fe,
+ 0x07008f02,
+ 0x00efbb40,
+ 0x0000657e,
+ 0x02020080,
+ 0xbd000ff6,
+ 0x7e030f04,
+ 0x4b0002f8,
+ 0xbfb20100,
+ 0x4001448e,
0x00008f7e,
-/* 0x06d2: ctx_mem */
- 0x008000f8,
- 0x0ff60284,
-/* 0x06db: ctx_mem_wait */
- 0x8f04bd00,
- 0xcf028400,
- 0xfffd00ff,
- 0xf61bf405,
-/* 0x06ea: ctx_load */
- 0x94bd00f8,
- 0x800599f0,
- 0xf6023700,
- 0x04bd0009,
- 0xb87e0c0a,
- 0xf4bd0000,
- 0x02890080,
+/* 0x0674: ih_no_fwmthd */
+ 0xbd05044b,
+ 0xb4abffb0,
+ 0x800c0bf4,
+ 0xf6030700,
+ 0x04bd000b,
+/* 0x0688: ih_no_other */
+ 0xf6010040,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x06a8: ctx_4170s */
+ 0xb210f5f0,
+ 0x41708eff,
+ 0x008f7e40,
+/* 0x06b7: ctx_4170w */
+ 0x8e00f800,
+ 0x7e404170,
+ 0xb2000065,
+ 0x10f4f0ff,
+ 0xf8f31bf4,
+/* 0x06c9: ctx_redswitch */
+ 0x02004e00,
+ 0xf040e5f0,
+ 0xe5f020e5,
+ 0x85008010,
+ 0x000ef601,
+ 0x080f04bd,
+/* 0x06e0: ctx_redswitch_delay */
+ 0xf401f2b6,
+ 0xe5f1fd1b,
+ 0xe5f10400,
+ 0x00800100,
+ 0x0ef60185,
+ 0xf804bd00,
+/* 0x06f9: ctx_86c */
+ 0x23008000,
+ 0x000ff602,
+ 0xffb204bd,
+ 0x408a148e,
+ 0x00008f7e,
+ 0x8c8effb2,
+ 0x8f7e41a8,
+ 0x00f80000,
+/* 0x0718: ctx_mem */
+ 0x02840080,
0xbd000ff6,
- 0xc1008004,
- 0x0002f602,
- 0x008004bd,
- 0x02f60283,
- 0x0f04bd00,
- 0x06d27e07,
- 0xc0008000,
- 0x0002f602,
- 0x0bfe04bd,
- 0x1f2af000,
- 0xb60424b6,
- 0x94bd0220,
- 0x800899f0,
- 0xf6023700,
- 0x04bd0009,
- 0x02810080,
- 0xbd0002f6,
- 0x0000d204,
- 0x25f08000,
- 0x88008002,
- 0x0002f602,
- 0x100104bd,
- 0xf0020042,
- 0x12fa0223,
- 0xbd03f805,
- 0x0899f094,
- 0x02170080,
- 0xbd0009f6,
- 0x81019804,
- 0x981814b6,
- 0x25b68002,
- 0x0512fd08,
- 0xbd1601b5,
- 0x0999f094,
- 0x02370080,
- 0xbd0009f6,
- 0x81008004,
- 0x0001f602,
- 0x010204bd,
- 0x02880080,
+/* 0x0721: ctx_mem_wait */
+ 0x84008f04,
+ 0x00ffcf02,
+ 0xf405fffd,
+ 0x00f8f61b,
+/* 0x0730: ctx_load */
+ 0x99f094bd,
+ 0x37008005,
+ 0x0009f602,
+ 0x0c0a04bd,
+ 0x0000b87e,
+ 0x0080f4bd,
+ 0x0ff60289,
+ 0x8004bd00,
+ 0xf602c100,
+ 0x04bd0002,
+ 0x02830080,
0xbd0002f6,
- 0x01004104,
- 0xfa0613f0,
- 0x03f80501,
+ 0x7e070f04,
+ 0x80000718,
+ 0xf602c000,
+ 0x04bd0002,
+ 0xf0000bfe,
+ 0x24b61f2a,
+ 0x0220b604,
0x99f094bd,
- 0x17008009,
+ 0x37008008,
0x0009f602,
- 0x94bd04bd,
- 0x800599f0,
+ 0x008004bd,
+ 0x02f60281,
+ 0xd204bd00,
+ 0x80000000,
+ 0x800225f0,
+ 0xf6028800,
+ 0x04bd0002,
+ 0x00421001,
+ 0x0223f002,
+ 0xf80512fa,
+ 0xf094bd03,
+ 0x00800899,
+ 0x09f60217,
+ 0x9804bd00,
+ 0x14b68101,
+ 0x80029818,
+ 0xfd0825b6,
+ 0x01b50512,
+ 0xf094bd16,
+ 0x00800999,
+ 0x09f60237,
+ 0x8004bd00,
+ 0xf6028100,
+ 0x04bd0001,
+ 0x00800102,
+ 0x02f60288,
+ 0x4104bd00,
+ 0x13f00100,
+ 0x0501fa06,
+ 0x94bd03f8,
+ 0x800999f0,
0xf6021700,
0x04bd0009,
-/* 0x07d6: ctx_chan */
- 0xea7e00f8,
- 0x0c0a0006,
- 0x0000b87e,
- 0xd27e050f,
- 0x00f80006,
-/* 0x07e8: ctx_mmio_exec */
- 0x80410398,
+ 0x99f094bd,
+ 0x17008005,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x081c: ctx_chan */
+ 0x0007307e,
+ 0xb87e0c0a,
+ 0x050f0000,
+ 0x0007187e,
+/* 0x082e: ctx_mmio_exec */
+ 0x039800f8,
+ 0x81008041,
+ 0x0003f602,
+ 0x34bd04bd,
+/* 0x083c: ctx_mmio_loop */
+ 0xf4ff34c4,
+ 0x00450e1b,
+ 0x0653f002,
+ 0xf80535fa,
+/* 0x084d: ctx_mmio_pull */
+ 0x804e9803,
+ 0x7e814f98,
+ 0xb600008f,
+ 0x12b60830,
+ 0xdf1bf401,
+/* 0x0860: ctx_mmio_done */
+ 0x80160398,
0xf6028100,
0x04bd0003,
-/* 0x07f6: ctx_mmio_loop */
- 0x34c434bd,
- 0x0e1bf4ff,
- 0xf0020045,
- 0x35fa0653,
-/* 0x0807: ctx_mmio_pull */
- 0x9803f805,
- 0x4f98804e,
- 0x008f7e81,
- 0x0830b600,
- 0xf40112b6,
-/* 0x081a: ctx_mmio_done */
- 0x0398df1b,
- 0x81008016,
- 0x0003f602,
- 0x00b504bd,
- 0x01004140,
- 0xfa0613f0,
- 0x03f80601,
-/* 0x0836: ctx_xfer */
- 0x040e00f8,
- 0x03020080,
- 0xbd000ef6,
-/* 0x0841: ctx_xfer_idle */
- 0x00008e04,
- 0x00eecf03,
- 0x2000e4f1,
- 0xf4f51bf4,
- 0x02f40611,
-/* 0x0855: ctx_xfer_pre */
- 0x7e100f0c,
- 0xf40006b3,
-/* 0x085e: ctx_xfer_pre_load */
- 0x020f1b11,
- 0x0006627e,
- 0x0006717e,
- 0x0006837e,
- 0x627ef4bd,
- 0xea7e0006,
-/* 0x0876: ctx_xfer_exec */
- 0x01980006,
- 0x8024bd16,
- 0xf6010500,
- 0x04bd0002,
- 0x008e1fb2,
- 0x8f7e41a5,
- 0xfcf00000,
- 0x022cf001,
- 0xfd0124b6,
- 0xffb205f2,
- 0x41a5048e,
+ 0x414000b5,
+ 0x13f00100,
+ 0x0601fa06,
+ 0x00f803f8,
+/* 0x087c: ctx_xfer */
+ 0x0080040e,
+ 0x0ef60302,
+/* 0x0887: ctx_xfer_idle */
+ 0x8e04bd00,
+ 0xcf030000,
+ 0xe4f100ee,
+ 0x1bf42000,
+ 0x0611f4f5,
+/* 0x089b: ctx_xfer_pre */
+ 0x0f0c02f4,
+ 0x06f97e10,
+ 0x1b11f400,
+/* 0x08a4: ctx_xfer_pre_load */
+ 0xa87e020f,
+ 0xb77e0006,
+ 0xc97e0006,
+ 0xf4bd0006,
+ 0x0006a87e,
+ 0x0007307e,
+/* 0x08bc: ctx_xfer_exec */
+ 0xbd160198,
+ 0x05008024,
+ 0x0002f601,
+ 0x1fb204bd,
+ 0x41a5008e,
0x00008f7e,
- 0x0002167e,
- 0xfc8024bd,
- 0x02f60247,
- 0xf004bd00,
- 0x20b6012c,
- 0x4afc8003,
- 0x0002f602,
- 0xacf004bd,
- 0x06a5f001,
- 0x0c98000b,
- 0x010d9800,
- 0x3d7e000e,
- 0x080a0001,
- 0x0000ec7e,
- 0x00020a7e,
- 0x0a1201f4,
- 0x00b87e0c,
- 0x7e050f00,
- 0xf40006d2,
-/* 0x08f2: ctx_xfer_post */
- 0x020f2d02,
- 0x0006627e,
- 0xb37ef4bd,
- 0x277e0006,
- 0x717e0002,
+ 0xf001fcf0,
+ 0x24b6022c,
+ 0x05f2fd01,
+ 0x048effb2,
+ 0x8f7e41a5,
+ 0x167e0000,
+ 0x24bd0002,
+ 0x0247fc80,
+ 0xbd0002f6,
+ 0x012cf004,
+ 0x800320b6,
+ 0xf6024afc,
+ 0x04bd0002,
+ 0xf001acf0,
+ 0x000b06a5,
+ 0x98000c98,
+ 0x000e010d,
+ 0x00013d7e,
+ 0xec7e080a,
+ 0x0a7e0000,
+ 0x01f40002,
+ 0x7e0c0a12,
+ 0x0f0000b8,
+ 0x07187e05,
+ 0x2d02f400,
+/* 0x0938: ctx_xfer_post */
+ 0xa87e020f,
0xf4bd0006,
- 0x0006627e,
- 0x981011f4,
- 0x11fd4001,
- 0x070bf405,
- 0x0007e87e,
-/* 0x091c: ctx_xfer_no_post_mmio */
-/* 0x091c: ctx_xfer_done */
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x0006f97e,
+ 0x0002277e,
+ 0x0006b77e,
+ 0xa87ef4bd,
+ 0x11f40006,
+ 0x40019810,
+ 0xf40511fd,
+ 0x2e7e070b,
+/* 0x0962: ctx_xfer_no_post_mmio */
+/* 0x0962: ctx_xfer_done */
+ 0x00f80008,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
index 64dfd75192bf..e49b5a877ae4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
@@ -478,10 +478,10 @@ uint32_t nv108_grhub_code[] = {
0x01040080,
0xbd0001f6,
0x01004104,
- 0x627e020f,
- 0x717e0006,
+ 0xa87e020f,
+ 0xb77e0006,
0x100f0006,
- 0x0006b37e,
+ 0x0006f97e,
0x98000e98,
0x207e010f,
0x14950001,
@@ -523,8 +523,8 @@ uint32_t nv108_grhub_code[] = {
0x800040b7,
0xf40132b6,
0x000fb41b,
- 0x0006b37e,
- 0x627e000f,
+ 0x0006f97e,
+ 0xa87e000f,
0x00800006,
0x01f60201,
0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t nv108_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0231f401,
- 0x0008367e,
+ 0x00087c7e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -563,7 +563,7 @@ uint32_t nv108_grhub_code[] = {
0x37008006,
0x0009f602,
0x31f404bd,
- 0x08367e01,
+ 0x087c7e01,
0xf094bd00,
0x00800699,
0x09f60217,
@@ -572,7 +572,7 @@ uint32_t nv108_grhub_code[] = {
0x20f92f0e,
0x32f412b2,
0x0232f401,
- 0x0008367e,
+ 0x00087c7e,
0x008020fc,
0x02f602c0,
0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t nv108_grhub_code[] = {
0x23c8130e,
0x0d0bf41f,
0xf40131f4,
- 0x367e0232,
+ 0x7c7e0232,
/* 0x054e: chsw_done */
0x01020008,
0x02c30080,
@@ -593,7 +593,7 @@ uint32_t nv108_grhub_code[] = {
0xb0ff2a0e,
0x1bf401e4,
0x7ef2b20c,
- 0xf40007d6,
+ 0xf400081c,
/* 0x057a: main_not_ctx_chan */
0xe4b0400e,
0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t nv108_grhub_code[] = {
0x0009f602,
0x32f404bd,
0x0232f401,
- 0x0008367e,
+ 0x00087c7e,
0x99f094bd,
0x17008007,
0x0009f602,
@@ -642,238 +642,238 @@ uint32_t nv108_grhub_code[] = {
/* 0x061a: ih_no_ctxsw */
0xabe40000,
0x0bf40400,
- 0x01004b10,
- 0x448ebfb2,
- 0x8f7e4001,
-/* 0x062e: ih_no_fwmthd */
- 0x044b0000,
- 0xffb0bd01,
- 0x0bf4b4ab,
- 0x0700800c,
- 0x000bf603,
-/* 0x0642: ih_no_other */
- 0x004004bd,
- 0x000af601,
- 0xf0fc04bd,
- 0xd0fce0fc,
- 0xa0fcb0fc,
- 0x80fc90fc,
- 0xfc0088fe,
- 0x0032f480,
-/* 0x0662: ctx_4170s */
- 0xf5f001f8,
- 0x8effb210,
- 0x7e404170,
- 0xf800008f,
-/* 0x0671: ctx_4170w */
- 0x41708e00,
+ 0x07088e56,
0x00657e40,
- 0xf0ffb200,
- 0x1bf410f4,
-/* 0x0683: ctx_redswitch */
- 0x4e00f8f3,
- 0xe5f00200,
- 0x20e5f040,
- 0x8010e5f0,
- 0xf6018500,
- 0x04bd000e,
-/* 0x069a: ctx_redswitch_delay */
- 0xf2b6080f,
- 0xfd1bf401,
- 0x0400e5f1,
- 0x0100e5f1,
- 0x01850080,
- 0xbd000ef6,
-/* 0x06b3: ctx_86c */
- 0x8000f804,
- 0xf6022300,
+ 0x80ffb200,
+ 0xf6020400,
0x04bd000f,
- 0x148effb2,
- 0x8f7e408a,
- 0xffb20000,
- 0x41a88c8e,
+ 0x4007048e,
+ 0x0000657e,
+ 0x0080ffb2,
+ 0x0ff60203,
+ 0xc704bd00,
+ 0xee9450fe,
+ 0x07008f02,
+ 0x00efbb40,
+ 0x0000657e,
+ 0x02020080,
+ 0xbd000ff6,
+ 0x7e030f04,
+ 0x4b0002f8,
+ 0xbfb20100,
+ 0x4001448e,
0x00008f7e,
-/* 0x06d2: ctx_mem */
- 0x008000f8,
- 0x0ff60284,
-/* 0x06db: ctx_mem_wait */
- 0x8f04bd00,
- 0xcf028400,
- 0xfffd00ff,
- 0xf61bf405,
-/* 0x06ea: ctx_load */
- 0x94bd00f8,
- 0x800599f0,
- 0xf6023700,
- 0x04bd0009,
- 0xb87e0c0a,
- 0xf4bd0000,
- 0x02890080,
+/* 0x0674: ih_no_fwmthd */
+ 0xbd05044b,
+ 0xb4abffb0,
+ 0x800c0bf4,
+ 0xf6030700,
+ 0x04bd000b,
+/* 0x0688: ih_no_other */
+ 0xf6010040,
+ 0x04bd000a,
+ 0xe0fcf0fc,
+ 0xb0fcd0fc,
+ 0x90fca0fc,
+ 0x88fe80fc,
+ 0xf480fc00,
+ 0x01f80032,
+/* 0x06a8: ctx_4170s */
+ 0xb210f5f0,
+ 0x41708eff,
+ 0x008f7e40,
+/* 0x06b7: ctx_4170w */
+ 0x8e00f800,
+ 0x7e404170,
+ 0xb2000065,
+ 0x10f4f0ff,
+ 0xf8f31bf4,
+/* 0x06c9: ctx_redswitch */
+ 0x02004e00,
+ 0xf040e5f0,
+ 0xe5f020e5,
+ 0x85008010,
+ 0x000ef601,
+ 0x080f04bd,
+/* 0x06e0: ctx_redswitch_delay */
+ 0xf401f2b6,
+ 0xe5f1fd1b,
+ 0xe5f10400,
+ 0x00800100,
+ 0x0ef60185,
+ 0xf804bd00,
+/* 0x06f9: ctx_86c */
+ 0x23008000,
+ 0x000ff602,
+ 0xffb204bd,
+ 0x408a148e,
+ 0x00008f7e,
+ 0x8c8effb2,
+ 0x8f7e41a8,
+ 0x00f80000,
+/* 0x0718: ctx_mem */
+ 0x02840080,
0xbd000ff6,
- 0xc1008004,
- 0x0002f602,
- 0x008004bd,
- 0x02f60283,
- 0x0f04bd00,
- 0x06d27e07,
- 0xc0008000,
- 0x0002f602,
- 0x0bfe04bd,
- 0x1f2af000,
- 0xb60424b6,
- 0x94bd0220,
- 0x800899f0,
- 0xf6023700,
- 0x04bd0009,
- 0x02810080,
- 0xbd0002f6,
- 0x0000d204,
- 0x25f08000,
- 0x88008002,
- 0x0002f602,
- 0x100104bd,
- 0xf0020042,
- 0x12fa0223,
- 0xbd03f805,
- 0x0899f094,
- 0x02170080,
- 0xbd0009f6,
- 0x81019804,
- 0x981814b6,
- 0x25b68002,
- 0x0512fd08,
- 0xbd1601b5,
- 0x0999f094,
- 0x02370080,
- 0xbd0009f6,
- 0x81008004,
- 0x0001f602,
- 0x010204bd,
- 0x02880080,
+/* 0x0721: ctx_mem_wait */
+ 0x84008f04,
+ 0x00ffcf02,
+ 0xf405fffd,
+ 0x00f8f61b,
+/* 0x0730: ctx_load */
+ 0x99f094bd,
+ 0x37008005,
+ 0x0009f602,
+ 0x0c0a04bd,
+ 0x0000b87e,
+ 0x0080f4bd,
+ 0x0ff60289,
+ 0x8004bd00,
+ 0xf602c100,
+ 0x04bd0002,
+ 0x02830080,
0xbd0002f6,
- 0x01004104,
- 0xfa0613f0,
- 0x03f80501,
+ 0x7e070f04,
+ 0x80000718,
+ 0xf602c000,
+ 0x04bd0002,
+ 0xf0000bfe,
+ 0x24b61f2a,
+ 0x0220b604,
0x99f094bd,
- 0x17008009,
+ 0x37008008,
0x0009f602,
- 0x94bd04bd,
- 0x800599f0,
+ 0x008004bd,
+ 0x02f60281,
+ 0xd204bd00,
+ 0x80000000,
+ 0x800225f0,
+ 0xf6028800,
+ 0x04bd0002,
+ 0x00421001,
+ 0x0223f002,
+ 0xf80512fa,
+ 0xf094bd03,
+ 0x00800899,
+ 0x09f60217,
+ 0x9804bd00,
+ 0x14b68101,
+ 0x80029818,
+ 0xfd0825b6,
+ 0x01b50512,
+ 0xf094bd16,
+ 0x00800999,
+ 0x09f60237,
+ 0x8004bd00,
+ 0xf6028100,
+ 0x04bd0001,
+ 0x00800102,
+ 0x02f60288,
+ 0x4104bd00,
+ 0x13f00100,
+ 0x0501fa06,
+ 0x94bd03f8,
+ 0x800999f0,
0xf6021700,
0x04bd0009,
-/* 0x07d6: ctx_chan */
- 0xea7e00f8,
- 0x0c0a0006,
- 0x0000b87e,
- 0xd27e050f,
- 0x00f80006,
-/* 0x07e8: ctx_mmio_exec */
- 0x80410398,
+ 0x99f094bd,
+ 0x17008005,
+ 0x0009f602,
+ 0x00f804bd,
+/* 0x081c: ctx_chan */
+ 0x0007307e,
+ 0xb87e0c0a,
+ 0x050f0000,
+ 0x0007187e,
+/* 0x082e: ctx_mmio_exec */
+ 0x039800f8,
+ 0x81008041,
+ 0x0003f602,
+ 0x34bd04bd,
+/* 0x083c: ctx_mmio_loop */
+ 0xf4ff34c4,
+ 0x00450e1b,
+ 0x0653f002,
+ 0xf80535fa,
+/* 0x084d: ctx_mmio_pull */
+ 0x804e9803,
+ 0x7e814f98,
+ 0xb600008f,
+ 0x12b60830,
+ 0xdf1bf401,
+/* 0x0860: ctx_mmio_done */
+ 0x80160398,
0xf6028100,
0x04bd0003,
-/* 0x07f6: ctx_mmio_loop */
- 0x34c434bd,
- 0x0e1bf4ff,
- 0xf0020045,
- 0x35fa0653,
-/* 0x0807: ctx_mmio_pull */
- 0x9803f805,
- 0x4f98804e,
- 0x008f7e81,
- 0x0830b600,
- 0xf40112b6,
-/* 0x081a: ctx_mmio_done */
- 0x0398df1b,
- 0x81008016,
- 0x0003f602,
- 0x00b504bd,
- 0x01004140,
- 0xfa0613f0,
- 0x03f80601,
-/* 0x0836: ctx_xfer */
- 0x040e00f8,
- 0x03020080,
- 0xbd000ef6,
-/* 0x0841: ctx_xfer_idle */
- 0x00008e04,
- 0x00eecf03,
- 0x2000e4f1,
- 0xf4f51bf4,
- 0x02f40611,
-/* 0x0855: ctx_xfer_pre */
- 0x7e100f0c,
- 0xf40006b3,
-/* 0x085e: ctx_xfer_pre_load */
- 0x020f1b11,
- 0x0006627e,
- 0x0006717e,
- 0x0006837e,
- 0x627ef4bd,
- 0xea7e0006,
-/* 0x0876: ctx_xfer_exec */
- 0x01980006,
- 0x8024bd16,
- 0xf6010500,
- 0x04bd0002,
- 0x008e1fb2,
- 0x8f7e41a5,
- 0xfcf00000,
- 0x022cf001,
- 0xfd0124b6,
- 0xffb205f2,
- 0x41a5048e,
+ 0x414000b5,
+ 0x13f00100,
+ 0x0601fa06,
+ 0x00f803f8,
+/* 0x087c: ctx_xfer */
+ 0x0080040e,
+ 0x0ef60302,
+/* 0x0887: ctx_xfer_idle */
+ 0x8e04bd00,
+ 0xcf030000,
+ 0xe4f100ee,
+ 0x1bf42000,
+ 0x0611f4f5,
+/* 0x089b: ctx_xfer_pre */
+ 0x0f0c02f4,
+ 0x06f97e10,
+ 0x1b11f400,
+/* 0x08a4: ctx_xfer_pre_load */
+ 0xa87e020f,
+ 0xb77e0006,
+ 0xc97e0006,
+ 0xf4bd0006,
+ 0x0006a87e,
+ 0x0007307e,
+/* 0x08bc: ctx_xfer_exec */
+ 0xbd160198,
+ 0x05008024,
+ 0x0002f601,
+ 0x1fb204bd,
+ 0x41a5008e,
0x00008f7e,
- 0x0002167e,
- 0xfc8024bd,
- 0x02f60247,
- 0xf004bd00,
- 0x20b6012c,
- 0x4afc8003,
- 0x0002f602,
- 0xacf004bd,
- 0x06a5f001,
- 0x0c98000b,
- 0x010d9800,
- 0x3d7e000e,
- 0x080a0001,
- 0x0000ec7e,
- 0x00020a7e,
- 0x0a1201f4,
- 0x00b87e0c,
- 0x7e050f00,
- 0xf40006d2,
-/* 0x08f2: ctx_xfer_post */
- 0x020f2d02,
- 0x0006627e,
- 0xb37ef4bd,
- 0x277e0006,
- 0x717e0002,
+ 0xf001fcf0,
+ 0x24b6022c,
+ 0x05f2fd01,
+ 0x048effb2,
+ 0x8f7e41a5,
+ 0x167e0000,
+ 0x24bd0002,
+ 0x0247fc80,
+ 0xbd0002f6,
+ 0x012cf004,
+ 0x800320b6,
+ 0xf6024afc,
+ 0x04bd0002,
+ 0xf001acf0,
+ 0x000b06a5,
+ 0x98000c98,
+ 0x000e010d,
+ 0x00013d7e,
+ 0xec7e080a,
+ 0x0a7e0000,
+ 0x01f40002,
+ 0x7e0c0a12,
+ 0x0f0000b8,
+ 0x07187e05,
+ 0x2d02f400,
+/* 0x0938: ctx_xfer_post */
+ 0xa87e020f,
0xf4bd0006,
- 0x0006627e,
- 0x981011f4,
- 0x11fd4001,
- 0x070bf405,
- 0x0007e87e,
-/* 0x091c: ctx_xfer_no_post_mmio */
-/* 0x091c: ctx_xfer_done */
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x0006f97e,
+ 0x0002277e,
+ 0x0006b77e,
+ 0xa87ef4bd,
+ 0x11f40006,
+ 0x40019810,
+ 0xf40511fd,
+ 0x2e7e070b,
+/* 0x0962: ctx_xfer_no_post_mmio */
+/* 0x0962: ctx_xfer_done */
+ 0x00f80008,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index f8f7b278a13f..92dfe6a4ac87 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -528,10 +528,10 @@ uint32_t nvc0_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0xb521f502,
- 0xc721f507,
- 0x10f7f007,
- 0x081421f5,
+ 0x0d21f502,
+ 0x1f21f508,
+ 0x10f7f008,
+ 0x086c21f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvc0_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x081421f5,
+ 0x086c21f5,
0xf500f7f0,
- 0xf107b521,
+ 0xf1080d21,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvc0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0xe821f502,
- 0xf094bd09,
+ 0x4021f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvc0_grhub_code[] = {
0x0203f00f,
0xbd0009d0,
0x0131f404,
- 0x09e821f5,
+ 0x0a4021f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvc0_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc09e821,
+ 0xfc0a4021,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvc0_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x09e821f5,
+ 0x0a4021f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvc0_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0x7821f502,
+ 0xd021f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvc0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0xe821f502,
- 0xf094bd09,
+ 0x4021f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvc0_grhub_code[] = {
/* 0x072b: ih_no_ctxsw */
0xe40421f4,
0xf40400ab,
- 0xb7f1140b,
+ 0xe7f16c0b,
+ 0xe3f00708,
+ 0x6821f440,
+ 0xf102ffb9,
+ 0xf0040007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf00704e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0x030007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0x9450fec7,
+ 0xf7f102ee,
+ 0xf3f00700,
+ 0x00efbb40,
+ 0xf16821f4,
+ 0xf0020007,
+ 0x0fd00203,
+ 0xf004bd00,
+ 0x21f503f7,
+ 0xb7f1037e,
0xbfb90100,
0x44e7f102,
0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
0xf19d21f4,
- 0xbd0104b7,
+ 0xbd0504b7,
0xb4abffb0,
0xf10f0bf4,
0xf0070007,
0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
0xf104bd00,
0xf0010007,
0x0ad00003,
@@ -731,36 +753,36 @@ uint32_t nvc0_grhub_code[] = {
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x077f: ctx_4160s */
+/* 0x07d7: ctx_4160s */
0xf001f800,
0xffb901f7,
0x60e7f102,
0x40e3f041,
-/* 0x078f: ctx_4160s_wait */
+/* 0x07e7: ctx_4160s_wait */
0xf19d21f4,
0xf04160e7,
0x21f440e3,
0x02ffb968,
0xf404ffc8,
0x00f8f00b,
-/* 0x07a4: ctx_4160c */
+/* 0x07fc: ctx_4160c */
0xffb9f4bd,
0x60e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x07b5: ctx_4170s */
+/* 0x080d: ctx_4170s */
0x10f5f000,
0xf102ffb9,
0xf04170e7,
0x21f440e3,
-/* 0x07c7: ctx_4170w */
+/* 0x081f: ctx_4170w */
0xf100f89d,
0xf04170e7,
0x21f440e3,
0x02ffb968,
0xf410f4f0,
0x00f8f01b,
-/* 0x07dc: ctx_redswitch */
+/* 0x0834: ctx_redswitch */
0x0200e7f1,
0xf040e5f0,
0xe5f020e5,
@@ -768,7 +790,7 @@ uint32_t nvc0_grhub_code[] = {
0x0103f085,
0xbd000ed0,
0x08f7f004,
-/* 0x07f8: ctx_redswitch_delay */
+/* 0x0850: ctx_redswitch_delay */
0xf401f2b6,
0xe5f1fd1b,
0xe5f10400,
@@ -776,7 +798,7 @@ uint32_t nvc0_grhub_code[] = {
0x03f08500,
0x000ed001,
0x00f804bd,
-/* 0x0814: ctx_86c */
+/* 0x086c: ctx_86c */
0x1b0007f1,
0xd00203f0,
0x04bd000f,
@@ -787,16 +809,16 @@ uint32_t nvc0_grhub_code[] = {
0xa86ce7f1,
0xf441e3f0,
0x00f89d21,
-/* 0x083c: ctx_mem */
+/* 0x0894: ctx_mem */
0x840007f1,
0xd00203f0,
0x04bd000f,
-/* 0x0848: ctx_mem_wait */
+/* 0x08a0: ctx_mem_wait */
0x8400f7f1,
0xcf02f3f0,
0xfffd00ff,
0xf31bf405,
-/* 0x085a: ctx_load */
+/* 0x08b2: ctx_load */
0x94bd00f8,
0xf10599f0,
0xf00f0007,
@@ -814,7 +836,7 @@ uint32_t nvc0_grhub_code[] = {
0x02d00203,
0xf004bd00,
0x21f507f7,
- 0x07f1083c,
+ 0x07f10894,
0x03f0c000,
0x0002d002,
0x0bfe04bd,
@@ -869,31 +891,31 @@ uint32_t nvc0_grhub_code[] = {
0x03f01700,
0x0009d002,
0x00f804bd,
-/* 0x0978: ctx_chan */
- 0x077f21f5,
- 0x085a21f5,
+/* 0x09d0: ctx_chan */
+ 0x07d721f5,
+ 0x08b221f5,
0xf40ca7f0,
0xf7f0d021,
- 0x3c21f505,
- 0xa421f508,
-/* 0x0993: ctx_mmio_exec */
+ 0x9421f505,
+ 0xfc21f508,
+/* 0x09eb: ctx_mmio_exec */
0x9800f807,
0x07f14103,
0x03f08100,
0x0003d002,
0x34bd04bd,
-/* 0x09a4: ctx_mmio_loop */
+/* 0x09fc: ctx_mmio_loop */
0xf4ff34c4,
0x57f10f1b,
0x53f00200,
0x0535fa06,
-/* 0x09b6: ctx_mmio_pull */
+/* 0x0a0e: ctx_mmio_pull */
0x4e9803f8,
0x814f9880,
0xb69d21f4,
0x12b60830,
0xdf1bf401,
-/* 0x09c8: ctx_mmio_done */
+/* 0x0a20: ctx_mmio_done */
0xf1160398,
0xf0810007,
0x03d00203,
@@ -902,30 +924,30 @@ uint32_t nvc0_grhub_code[] = {
0x13f00100,
0x0601fa06,
0x00f803f8,
-/* 0x09e8: ctx_xfer */
+/* 0x0a40: ctx_xfer */
0xf104e7f0,
0xf0020007,
0x0ed00303,
-/* 0x09f7: ctx_xfer_idle */
+/* 0x0a4f: ctx_xfer_idle */
0xf104bd00,
0xf00000e7,
0xeecf03e3,
0x00e4f100,
0xf21bf420,
0xf40611f4,
-/* 0x0a0e: ctx_xfer_pre */
+/* 0x0a66: ctx_xfer_pre */
0xf7f01102,
- 0x1421f510,
- 0x7f21f508,
+ 0x6c21f510,
+ 0xd721f508,
0x1c11f407,
-/* 0x0a1c: ctx_xfer_pre_load */
+/* 0x0a74: ctx_xfer_pre_load */
0xf502f7f0,
- 0xf507b521,
- 0xf507c721,
- 0xbd07dc21,
- 0xb521f5f4,
- 0x5a21f507,
-/* 0x0a35: ctx_xfer_exec */
+ 0xf5080d21,
+ 0xf5081f21,
+ 0xbd083421,
+ 0x0d21f5f4,
+ 0xb221f508,
+/* 0x0a8d: ctx_xfer_exec */
0x16019808,
0x07f124bd,
0x03f00500,
@@ -960,23 +982,65 @@ uint32_t nvc0_grhub_code[] = {
0x1301f402,
0xf40ca7f0,
0xf7f0d021,
- 0x3c21f505,
+ 0x9421f505,
0x3202f408,
-/* 0x0ac4: ctx_xfer_post */
+/* 0x0b1c: ctx_xfer_post */
0xf502f7f0,
- 0xbd07b521,
- 0x1421f5f4,
+ 0xbd080d21,
+ 0x6c21f5f4,
0x7f21f508,
- 0xc721f502,
- 0xf5f4bd07,
- 0xf407b521,
+ 0x1f21f502,
+ 0xf5f4bd08,
+ 0xf4080d21,
0x01981011,
0x0511fd40,
0xf5070bf4,
-/* 0x0aef: ctx_xfer_no_post_mmio */
- 0xf5099321,
-/* 0x0af3: ctx_xfer_done */
- 0xf807a421,
+/* 0x0b47: ctx_xfer_no_post_mmio */
+ 0xf509eb21,
+/* 0x0b4b: ctx_xfer_done */
+ 0xf807fc21,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
index 624215a005b0..62b0c7601d8b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
@@ -528,10 +528,10 @@ uint32_t nvd7_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0xb521f502,
- 0xc721f507,
- 0x10f7f007,
- 0x081421f5,
+ 0x0d21f502,
+ 0x1f21f508,
+ 0x10f7f008,
+ 0x086c21f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvd7_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x081421f5,
+ 0x086c21f5,
0xf500f7f0,
- 0xf107b521,
+ 0xf1080d21,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvd7_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0xe821f502,
- 0xf094bd09,
+ 0x4021f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvd7_grhub_code[] = {
0x0203f00f,
0xbd0009d0,
0x0131f404,
- 0x09e821f5,
+ 0x0a4021f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvd7_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc09e821,
+ 0xfc0a4021,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvd7_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x09e821f5,
+ 0x0a4021f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvd7_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0x7821f502,
+ 0xd021f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvd7_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0xe821f502,
- 0xf094bd09,
+ 0x4021f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvd7_grhub_code[] = {
/* 0x072b: ih_no_ctxsw */
0xe40421f4,
0xf40400ab,
- 0xb7f1140b,
+ 0xe7f16c0b,
+ 0xe3f00708,
+ 0x6821f440,
+ 0xf102ffb9,
+ 0xf0040007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf00704e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0x030007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0x9450fec7,
+ 0xf7f102ee,
+ 0xf3f00700,
+ 0x00efbb40,
+ 0xf16821f4,
+ 0xf0020007,
+ 0x0fd00203,
+ 0xf004bd00,
+ 0x21f503f7,
+ 0xb7f1037e,
0xbfb90100,
0x44e7f102,
0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
0xf19d21f4,
- 0xbd0104b7,
+ 0xbd0504b7,
0xb4abffb0,
0xf10f0bf4,
0xf0070007,
0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
0xf104bd00,
0xf0010007,
0x0ad00003,
@@ -731,36 +753,36 @@ uint32_t nvd7_grhub_code[] = {
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x077f: ctx_4160s */
+/* 0x07d7: ctx_4160s */
0xf001f800,
0xffb901f7,
0x60e7f102,
0x40e3f041,
-/* 0x078f: ctx_4160s_wait */
+/* 0x07e7: ctx_4160s_wait */
0xf19d21f4,
0xf04160e7,
0x21f440e3,
0x02ffb968,
0xf404ffc8,
0x00f8f00b,
-/* 0x07a4: ctx_4160c */
+/* 0x07fc: ctx_4160c */
0xffb9f4bd,
0x60e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x07b5: ctx_4170s */
+/* 0x080d: ctx_4170s */
0x10f5f000,
0xf102ffb9,
0xf04170e7,
0x21f440e3,
-/* 0x07c7: ctx_4170w */
+/* 0x081f: ctx_4170w */
0xf100f89d,
0xf04170e7,
0x21f440e3,
0x02ffb968,
0xf410f4f0,
0x00f8f01b,
-/* 0x07dc: ctx_redswitch */
+/* 0x0834: ctx_redswitch */
0x0200e7f1,
0xf040e5f0,
0xe5f020e5,
@@ -768,7 +790,7 @@ uint32_t nvd7_grhub_code[] = {
0x0103f085,
0xbd000ed0,
0x08f7f004,
-/* 0x07f8: ctx_redswitch_delay */
+/* 0x0850: ctx_redswitch_delay */
0xf401f2b6,
0xe5f1fd1b,
0xe5f10400,
@@ -776,7 +798,7 @@ uint32_t nvd7_grhub_code[] = {
0x03f08500,
0x000ed001,
0x00f804bd,
-/* 0x0814: ctx_86c */
+/* 0x086c: ctx_86c */
0x1b0007f1,
0xd00203f0,
0x04bd000f,
@@ -787,16 +809,16 @@ uint32_t nvd7_grhub_code[] = {
0xa86ce7f1,
0xf441e3f0,
0x00f89d21,
-/* 0x083c: ctx_mem */
+/* 0x0894: ctx_mem */
0x840007f1,
0xd00203f0,
0x04bd000f,
-/* 0x0848: ctx_mem_wait */
+/* 0x08a0: ctx_mem_wait */
0x8400f7f1,
0xcf02f3f0,
0xfffd00ff,
0xf31bf405,
-/* 0x085a: ctx_load */
+/* 0x08b2: ctx_load */
0x94bd00f8,
0xf10599f0,
0xf00f0007,
@@ -814,7 +836,7 @@ uint32_t nvd7_grhub_code[] = {
0x02d00203,
0xf004bd00,
0x21f507f7,
- 0x07f1083c,
+ 0x07f10894,
0x03f0c000,
0x0002d002,
0x0bfe04bd,
@@ -869,31 +891,31 @@ uint32_t nvd7_grhub_code[] = {
0x03f01700,
0x0009d002,
0x00f804bd,
-/* 0x0978: ctx_chan */
- 0x077f21f5,
- 0x085a21f5,
+/* 0x09d0: ctx_chan */
+ 0x07d721f5,
+ 0x08b221f5,
0xf40ca7f0,
0xf7f0d021,
- 0x3c21f505,
- 0xa421f508,
-/* 0x0993: ctx_mmio_exec */
+ 0x9421f505,
+ 0xfc21f508,
+/* 0x09eb: ctx_mmio_exec */
0x9800f807,
0x07f14103,
0x03f08100,
0x0003d002,
0x34bd04bd,
-/* 0x09a4: ctx_mmio_loop */
+/* 0x09fc: ctx_mmio_loop */
0xf4ff34c4,
0x57f10f1b,
0x53f00200,
0x0535fa06,
-/* 0x09b6: ctx_mmio_pull */
+/* 0x0a0e: ctx_mmio_pull */
0x4e9803f8,
0x814f9880,
0xb69d21f4,
0x12b60830,
0xdf1bf401,
-/* 0x09c8: ctx_mmio_done */
+/* 0x0a20: ctx_mmio_done */
0xf1160398,
0xf0810007,
0x03d00203,
@@ -902,30 +924,30 @@ uint32_t nvd7_grhub_code[] = {
0x13f00100,
0x0601fa06,
0x00f803f8,
-/* 0x09e8: ctx_xfer */
+/* 0x0a40: ctx_xfer */
0xf104e7f0,
0xf0020007,
0x0ed00303,
-/* 0x09f7: ctx_xfer_idle */
+/* 0x0a4f: ctx_xfer_idle */
0xf104bd00,
0xf00000e7,
0xeecf03e3,
0x00e4f100,
0xf21bf420,
0xf40611f4,
-/* 0x0a0e: ctx_xfer_pre */
+/* 0x0a66: ctx_xfer_pre */
0xf7f01102,
- 0x1421f510,
- 0x7f21f508,
+ 0x6c21f510,
+ 0xd721f508,
0x1c11f407,
-/* 0x0a1c: ctx_xfer_pre_load */
+/* 0x0a74: ctx_xfer_pre_load */
0xf502f7f0,
- 0xf507b521,
- 0xf507c721,
- 0xbd07dc21,
- 0xb521f5f4,
- 0x5a21f507,
-/* 0x0a35: ctx_xfer_exec */
+ 0xf5080d21,
+ 0xf5081f21,
+ 0xbd083421,
+ 0x0d21f5f4,
+ 0xb221f508,
+/* 0x0a8d: ctx_xfer_exec */
0x16019808,
0x07f124bd,
0x03f00500,
@@ -960,23 +982,65 @@ uint32_t nvd7_grhub_code[] = {
0x1301f402,
0xf40ca7f0,
0xf7f0d021,
- 0x3c21f505,
+ 0x9421f505,
0x3202f408,
-/* 0x0ac4: ctx_xfer_post */
+/* 0x0b1c: ctx_xfer_post */
0xf502f7f0,
- 0xbd07b521,
- 0x1421f5f4,
+ 0xbd080d21,
+ 0x6c21f5f4,
0x7f21f508,
- 0xc721f502,
- 0xf5f4bd07,
- 0xf407b521,
+ 0x1f21f502,
+ 0xf5f4bd08,
+ 0xf4080d21,
0x01981011,
0x0511fd40,
0xf5070bf4,
-/* 0x0aef: ctx_xfer_no_post_mmio */
- 0xf5099321,
-/* 0x0af3: ctx_xfer_done */
- 0xf807a421,
+/* 0x0b47: ctx_xfer_no_post_mmio */
+ 0xf509eb21,
+/* 0x0b4b: ctx_xfer_done */
+ 0xf807fc21,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index 6547b3dfc7ed..51c3797d8537 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -528,10 +528,10 @@ uint32_t nve0_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0x7f21f502,
- 0x9121f507,
+ 0xd721f502,
+ 0xe921f507,
0x10f7f007,
- 0x07de21f5,
+ 0x083621f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t nve0_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x07de21f5,
+ 0x083621f5,
0xf500f7f0,
- 0xf1077f21,
+ 0xf107d721,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nve0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0xaa21f502,
- 0xf094bd09,
+ 0x0221f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nve0_grhub_code[] = {
0x0203f00f,
0xbd0009d0,
0x0131f404,
- 0x09aa21f5,
+ 0x0a0221f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nve0_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc09aa21,
+ 0xfc0a0221,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nve0_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x09aa21f5,
+ 0x0a0221f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nve0_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0x4221f502,
+ 0x9a21f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nve0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0xaa21f502,
- 0xf094bd09,
+ 0x0221f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nve0_grhub_code[] = {
/* 0x072b: ih_no_ctxsw */
0xe40421f4,
0xf40400ab,
- 0xb7f1140b,
+ 0xe7f16c0b,
+ 0xe3f00708,
+ 0x6821f440,
+ 0xf102ffb9,
+ 0xf0040007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf00704e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0x030007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0x9450fec7,
+ 0xf7f102ee,
+ 0xf3f00700,
+ 0x00efbb40,
+ 0xf16821f4,
+ 0xf0020007,
+ 0x0fd00203,
+ 0xf004bd00,
+ 0x21f503f7,
+ 0xb7f1037e,
0xbfb90100,
0x44e7f102,
0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
0xf19d21f4,
- 0xbd0104b7,
+ 0xbd0504b7,
0xb4abffb0,
0xf10f0bf4,
0xf0070007,
0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
0xf104bd00,
0xf0010007,
0x0ad00003,
@@ -731,19 +753,19 @@ uint32_t nve0_grhub_code[] = {
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x077f: ctx_4170s */
+/* 0x07d7: ctx_4170s */
0xf001f800,
0xffb910f5,
0x70e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x0791: ctx_4170w */
+/* 0x07e9: ctx_4170w */
0x70e7f100,
0x40e3f041,
0xb96821f4,
0xf4f002ff,
0xf01bf410,
-/* 0x07a6: ctx_redswitch */
+/* 0x07fe: ctx_redswitch */
0xe7f100f8,
0xe5f00200,
0x20e5f040,
@@ -751,7 +773,7 @@ uint32_t nve0_grhub_code[] = {
0xf0850007,
0x0ed00103,
0xf004bd00,
-/* 0x07c2: ctx_redswitch_delay */
+/* 0x081a: ctx_redswitch_delay */
0xf2b608f7,
0xfd1bf401,
0x0400e5f1,
@@ -759,7 +781,7 @@ uint32_t nve0_grhub_code[] = {
0x850007f1,
0xd00103f0,
0x04bd000e,
-/* 0x07de: ctx_86c */
+/* 0x0836: ctx_86c */
0x07f100f8,
0x03f01b00,
0x000fd002,
@@ -770,17 +792,17 @@ uint32_t nve0_grhub_code[] = {
0xe7f102ff,
0xe3f0a86c,
0x9d21f441,
-/* 0x0806: ctx_mem */
+/* 0x085e: ctx_mem */
0x07f100f8,
0x03f08400,
0x000fd002,
-/* 0x0812: ctx_mem_wait */
+/* 0x086a: ctx_mem_wait */
0xf7f104bd,
0xf3f08400,
0x00ffcf02,
0xf405fffd,
0x00f8f31b,
-/* 0x0824: ctx_load */
+/* 0x087c: ctx_load */
0x99f094bd,
0x0007f105,
0x0203f00f,
@@ -797,7 +819,7 @@ uint32_t nve0_grhub_code[] = {
0x0203f083,
0xbd0002d0,
0x07f7f004,
- 0x080621f5,
+ 0x085e21f5,
0xc00007f1,
0xd00203f0,
0x04bd0002,
@@ -852,29 +874,29 @@ uint32_t nve0_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x0942: ctx_chan */
+/* 0x099a: ctx_chan */
0x21f500f8,
- 0xa7f00824,
+ 0xa7f0087c,
0xd021f40c,
0xf505f7f0,
- 0xf8080621,
-/* 0x0955: ctx_mmio_exec */
+ 0xf8085e21,
+/* 0x09ad: ctx_mmio_exec */
0x41039800,
0x810007f1,
0xd00203f0,
0x04bd0003,
-/* 0x0966: ctx_mmio_loop */
+/* 0x09be: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x020057f1,
0xfa0653f0,
0x03f80535,
-/* 0x0978: ctx_mmio_pull */
+/* 0x09d0: ctx_mmio_pull */
0x98804e98,
0x21f4814f,
0x0830b69d,
0xf40112b6,
-/* 0x098a: ctx_mmio_done */
+/* 0x09e2: ctx_mmio_done */
0x0398df1b,
0x0007f116,
0x0203f081,
@@ -883,30 +905,30 @@ uint32_t nve0_grhub_code[] = {
0x010017f1,
0xfa0613f0,
0x03f80601,
-/* 0x09aa: ctx_xfer */
+/* 0x0a02: ctx_xfer */
0xe7f000f8,
0x0007f104,
0x0303f002,
0xbd000ed0,
-/* 0x09b9: ctx_xfer_idle */
+/* 0x0a11: ctx_xfer_idle */
0x00e7f104,
0x03e3f000,
0xf100eecf,
0xf42000e4,
0x11f4f21b,
0x0d02f406,
-/* 0x09d0: ctx_xfer_pre */
+/* 0x0a28: ctx_xfer_pre */
0xf510f7f0,
- 0xf407de21,
-/* 0x09da: ctx_xfer_pre_load */
+ 0xf4083621,
+/* 0x0a32: ctx_xfer_pre_load */
0xf7f01c11,
- 0x7f21f502,
- 0x9121f507,
- 0xa621f507,
+ 0xd721f502,
+ 0xe921f507,
+ 0xfe21f507,
0xf5f4bd07,
- 0xf5077f21,
-/* 0x09f3: ctx_xfer_exec */
- 0x98082421,
+ 0xf507d721,
+/* 0x0a4b: ctx_xfer_exec */
+ 0x98087c21,
0x24bd1601,
0x050007f1,
0xd00103f0,
@@ -941,21 +963,21 @@ uint32_t nve0_grhub_code[] = {
0xa7f01301,
0xd021f40c,
0xf505f7f0,
- 0xf4080621,
-/* 0x0a82: ctx_xfer_post */
+ 0xf4085e21,
+/* 0x0ada: ctx_xfer_post */
0xf7f02e02,
- 0x7f21f502,
+ 0xd721f502,
0xf5f4bd07,
- 0xf507de21,
+ 0xf5083621,
0xf5027f21,
- 0xbd079121,
- 0x7f21f5f4,
+ 0xbd07e921,
+ 0xd721f5f4,
0x1011f407,
0xfd400198,
0x0bf40511,
- 0x5521f507,
-/* 0x0aad: ctx_xfer_no_post_mmio */
-/* 0x0aad: ctx_xfer_done */
+ 0xad21f507,
+/* 0x0b05: ctx_xfer_no_post_mmio */
+/* 0x0b05: ctx_xfer_done */
0x0000f809,
0x00000000,
0x00000000,
@@ -977,4 +999,46 @@ uint32_t nve0_grhub_code[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
index a5aee5a4302f..a0af4b703a8e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
@@ -528,10 +528,10 @@ uint32_t nvf0_grhub_code[] = {
0x0001d001,
0x17f104bd,
0xf7f00100,
- 0x7f21f502,
- 0x9121f507,
+ 0xd721f502,
+ 0xe921f507,
0x10f7f007,
- 0x07de21f5,
+ 0x083621f5,
0x98000e98,
0x21f5010f,
0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvf0_grhub_code[] = {
0xb6800040,
0x1bf40132,
0x00f7f0be,
- 0x07de21f5,
+ 0x083621f5,
0xf500f7f0,
- 0xf1077f21,
+ 0xf107d721,
0xf0010007,
0x01d00203,
0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvf0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x31f40132,
- 0xaa21f502,
- 0xf094bd09,
+ 0x0221f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvf0_grhub_code[] = {
0x0203f037,
0xbd0009d0,
0x0131f404,
- 0x09aa21f5,
+ 0x0a0221f5,
0x99f094bd,
0x0007f106,
0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvf0_grhub_code[] = {
0x12b920f9,
0x0132f402,
0xf50232f4,
- 0xfc09aa21,
+ 0xfc0a0221,
0x0007f120,
0x0203f0c0,
0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvf0_grhub_code[] = {
0xf41f23c8,
0x31f40d0b,
0x0232f401,
- 0x09aa21f5,
+ 0x0a0221f5,
/* 0x063c: chsw_done */
0xf10127f0,
0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvf0_grhub_code[] = {
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
- 0x4221f502,
+ 0x9a21f502,
0x460ef409,
/* 0x0670: main_not_ctx_chan */
0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvf0_grhub_code[] = {
0x09d00203,
0xf404bd00,
0x32f40132,
- 0xaa21f502,
- 0xf094bd09,
+ 0x0221f502,
+ 0xf094bd0a,
0x07f10799,
0x03f01700,
0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvf0_grhub_code[] = {
/* 0x072b: ih_no_ctxsw */
0xe40421f4,
0xf40400ab,
- 0xb7f1140b,
+ 0xe7f16c0b,
+ 0xe3f00708,
+ 0x6821f440,
+ 0xf102ffb9,
+ 0xf0040007,
+ 0x0fd00203,
+ 0xf104bd00,
+ 0xf00704e7,
+ 0x21f440e3,
+ 0x02ffb968,
+ 0x030007f1,
+ 0xd00203f0,
+ 0x04bd000f,
+ 0x9450fec7,
+ 0xf7f102ee,
+ 0xf3f00700,
+ 0x00efbb40,
+ 0xf16821f4,
+ 0xf0020007,
+ 0x0fd00203,
+ 0xf004bd00,
+ 0x21f503f7,
+ 0xb7f1037e,
0xbfb90100,
0x44e7f102,
0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
0xf19d21f4,
- 0xbd0104b7,
+ 0xbd0504b7,
0xb4abffb0,
0xf10f0bf4,
0xf0070007,
0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
0xf104bd00,
0xf0010007,
0x0ad00003,
@@ -731,19 +753,19 @@ uint32_t nvf0_grhub_code[] = {
0xfc90fca0,
0x0088fe80,
0x32f480fc,
-/* 0x077f: ctx_4170s */
+/* 0x07d7: ctx_4170s */
0xf001f800,
0xffb910f5,
0x70e7f102,
0x40e3f041,
0xf89d21f4,
-/* 0x0791: ctx_4170w */
+/* 0x07e9: ctx_4170w */
0x70e7f100,
0x40e3f041,
0xb96821f4,
0xf4f002ff,
0xf01bf410,
-/* 0x07a6: ctx_redswitch */
+/* 0x07fe: ctx_redswitch */
0xe7f100f8,
0xe5f00200,
0x20e5f040,
@@ -751,7 +773,7 @@ uint32_t nvf0_grhub_code[] = {
0xf0850007,
0x0ed00103,
0xf004bd00,
-/* 0x07c2: ctx_redswitch_delay */
+/* 0x081a: ctx_redswitch_delay */
0xf2b608f7,
0xfd1bf401,
0x0400e5f1,
@@ -759,7 +781,7 @@ uint32_t nvf0_grhub_code[] = {
0x850007f1,
0xd00103f0,
0x04bd000e,
-/* 0x07de: ctx_86c */
+/* 0x0836: ctx_86c */
0x07f100f8,
0x03f02300,
0x000fd002,
@@ -770,17 +792,17 @@ uint32_t nvf0_grhub_code[] = {
0xe7f102ff,
0xe3f0a88c,
0x9d21f441,
-/* 0x0806: ctx_mem */
+/* 0x085e: ctx_mem */
0x07f100f8,
0x03f08400,
0x000fd002,
-/* 0x0812: ctx_mem_wait */
+/* 0x086a: ctx_mem_wait */
0xf7f104bd,
0xf3f08400,
0x00ffcf02,
0xf405fffd,
0x00f8f31b,
-/* 0x0824: ctx_load */
+/* 0x087c: ctx_load */
0x99f094bd,
0x0007f105,
0x0203f037,
@@ -797,7 +819,7 @@ uint32_t nvf0_grhub_code[] = {
0x0203f083,
0xbd0002d0,
0x07f7f004,
- 0x080621f5,
+ 0x085e21f5,
0xc00007f1,
0xd00203f0,
0x04bd0002,
@@ -852,29 +874,29 @@ uint32_t nvf0_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
-/* 0x0942: ctx_chan */
+/* 0x099a: ctx_chan */
0x21f500f8,
- 0xa7f00824,
+ 0xa7f0087c,
0xd021f40c,
0xf505f7f0,
- 0xf8080621,
-/* 0x0955: ctx_mmio_exec */
+ 0xf8085e21,
+/* 0x09ad: ctx_mmio_exec */
0x41039800,
0x810007f1,
0xd00203f0,
0x04bd0003,
-/* 0x0966: ctx_mmio_loop */
+/* 0x09be: ctx_mmio_loop */
0x34c434bd,
0x0f1bf4ff,
0x020057f1,
0xfa0653f0,
0x03f80535,
-/* 0x0978: ctx_mmio_pull */
+/* 0x09d0: ctx_mmio_pull */
0x98804e98,
0x21f4814f,
0x0830b69d,
0xf40112b6,
-/* 0x098a: ctx_mmio_done */
+/* 0x09e2: ctx_mmio_done */
0x0398df1b,
0x0007f116,
0x0203f081,
@@ -883,30 +905,30 @@ uint32_t nvf0_grhub_code[] = {
0x010017f1,
0xfa0613f0,
0x03f80601,
-/* 0x09aa: ctx_xfer */
+/* 0x0a02: ctx_xfer */
0xe7f000f8,
0x0007f104,
0x0303f002,
0xbd000ed0,
-/* 0x09b9: ctx_xfer_idle */
+/* 0x0a11: ctx_xfer_idle */
0x00e7f104,
0x03e3f000,
0xf100eecf,
0xf42000e4,
0x11f4f21b,
0x0d02f406,
-/* 0x09d0: ctx_xfer_pre */
+/* 0x0a28: ctx_xfer_pre */
0xf510f7f0,
- 0xf407de21,
-/* 0x09da: ctx_xfer_pre_load */
+ 0xf4083621,
+/* 0x0a32: ctx_xfer_pre_load */
0xf7f01c11,
- 0x7f21f502,
- 0x9121f507,
- 0xa621f507,
+ 0xd721f502,
+ 0xe921f507,
+ 0xfe21f507,
0xf5f4bd07,
- 0xf5077f21,
-/* 0x09f3: ctx_xfer_exec */
- 0x98082421,
+ 0xf507d721,
+/* 0x0a4b: ctx_xfer_exec */
+ 0x98087c21,
0x24bd1601,
0x050007f1,
0xd00103f0,
@@ -941,21 +963,21 @@ uint32_t nvf0_grhub_code[] = {
0xa7f01301,
0xd021f40c,
0xf505f7f0,
- 0xf4080621,
-/* 0x0a82: ctx_xfer_post */
+ 0xf4085e21,
+/* 0x0ada: ctx_xfer_post */
0xf7f02e02,
- 0x7f21f502,
+ 0xd721f502,
0xf5f4bd07,
- 0xf507de21,
+ 0xf5083621,
0xf5027f21,
- 0xbd079121,
- 0x7f21f5f4,
+ 0xbd07e921,
+ 0xd721f5f4,
0x1011f407,
0xfd400198,
0x0bf40511,
- 0x5521f507,
-/* 0x0aad: ctx_xfer_no_post_mmio */
-/* 0x0aad: ctx_xfer_done */
+ 0xad21f507,
+/* 0x0b05: ctx_xfer_no_post_mmio */
+/* 0x0b05: ctx_xfer_done */
0x0000f809,
0x00000000,
0x00000000,
@@ -977,4 +999,46 @@ uint32_t nvf0_grhub_code[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
index a47d49db5232..2a0b0f844299 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
@@ -30,6 +30,12 @@
#define GK110 0xf0
#define GK208 0x108
+#define NV_PGRAPH_TRAPPED_ADDR 0x400704
+#define NV_PGRAPH_TRAPPED_DATA_LO 0x400708
+#define NV_PGRAPH_TRAPPED_DATA_HI 0x40070c
+
+#define NV_PGRAPH_FE_OBJECT_TABLE(n) ((n) * 4 + 0x400700)
+
#define NV_PGRAPH_FECS_INTR_ACK 0x409004
#define NV_PGRAPH_FECS_INTR 0x409008
#define NV_PGRAPH_FECS_INTR_FWMTHD 0x00000400
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
index fd1d380de094..1718ae4e8224 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
@@ -3,5 +3,6 @@
#define E_BAD_COMMAND 0x00000001
#define E_CMD_OVERFLOW 0x00000002
+#define E_BAD_FWMTHD 0x00000003
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 1a2d56493cf6..20665c21d80e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -976,7 +976,6 @@ nv50_graph_init(struct nouveau_object *object)
break;
case 0xa0:
default:
- nv_wr32(priv, 0x402cc0, 0x00000000);
if (nv_device(priv)->chipset == 0xa0 ||
nv_device(priv)->chipset == 0xaa ||
nv_device(priv)->chipset == 0xac) {
@@ -991,10 +990,10 @@ nv50_graph_init(struct nouveau_object *object)
/* zero out zcull regions */
for (i = 0; i < 8; i++) {
- nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000);
- nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000);
- nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000);
- nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000);
+ nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
+ nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
+ nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
+ nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
}
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index bf7bdb1f291e..aa0838916354 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -789,17 +789,40 @@ nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
static void
nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
{
- u32 ustat = nv_rd32(priv, 0x409c18);
+ u32 stat = nv_rd32(priv, 0x409c18);
- if (ustat & 0x00000001)
- nv_error(priv, "CTXCTL ucode error\n");
- if (ustat & 0x00080000)
- nv_error(priv, "CTXCTL watchdog timeout\n");
- if (ustat & ~0x00080001)
- nv_error(priv, "CTXCTL 0x%08x\n", ustat);
+ if (stat & 0x00000001) {
+ u32 code = nv_rd32(priv, 0x409814);
+ if (code == E_BAD_FWMTHD) {
+ u32 class = nv_rd32(priv, 0x409808);
+ u32 addr = nv_rd32(priv, 0x40980c);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00003ffc);
+ u32 data = nv_rd32(priv, 0x409810);
+
+ nv_error(priv, "FECS MTHD subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ subc, class, mthd, data);
- nvc0_graph_ctxctl_debug(priv);
- nv_wr32(priv, 0x409c20, ustat);
+ nv_wr32(priv, 0x409c20, 0x00000001);
+ stat &= ~0x00000001;
+ } else {
+ nv_error(priv, "FECS ucode error %d\n", code);
+ }
+ }
+
+ if (stat & 0x00080000) {
+ nv_error(priv, "FECS watchdog timeout\n");
+ nvc0_graph_ctxctl_debug(priv);
+ nv_wr32(priv, 0x409c20, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ nv_error(priv, "FECS 0x%08x\n", stat);
+ nvc0_graph_ctxctl_debug(priv);
+ nv_wr32(priv, 0x409c20, stat);
+ }
}
static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 75203a99d902..ffc289198dd8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -38,6 +38,8 @@
#include <engine/fifo.h>
#include <engine/graph.h>
+#include "fuc/os.h"
+
#define GPC_MAX 32
#define TPC_MAX (GPC_MAX * 8)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index db1b39d08013..825f7bb46b67 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -84,6 +84,7 @@ extern struct nouveau_oclass *nv4e_i2c_oclass;
extern struct nouveau_oclass *nv50_i2c_oclass;
extern struct nouveau_oclass *nv94_i2c_oclass;
extern struct nouveau_oclass *nvd0_i2c_oclass;
+extern struct nouveau_oclass *gf117_i2c_oclass;
extern struct nouveau_oclass *nve0_i2c_oclass;
static inline int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
index 4ac1aa30ea11..0e62a3240144 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -307,7 +307,6 @@ calc_clk(struct nve0_clock_priv *priv,
info->dsrc = src0;
if (div0) {
info->ddiv |= 0x80000000;
- info->ddiv |= div0 << 8;
info->ddiv |= div0;
}
if (div1D) {
@@ -352,7 +351,7 @@ nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
{
struct nve0_clock_info *info = &priv->eng[clk];
if (!info->ssel) {
- nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+ nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
}
}
@@ -389,7 +388,10 @@ static void
nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
{
struct nve0_clock_info *info = &priv->eng[clk];
- nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+ if (info->ssel)
+ nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
+ else
+ nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
}
static void
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 84c7efbc4f38..1ad3ea503133 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -262,8 +262,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
struct nve0_ram *ram = (void *)pfb->ram;
struct nve0_ramfuc *fuc = &ram->fuc;
struct nouveau_ram_data *next = ram->base.next;
- int vc = !(next->bios.ramcfg_11_02_08);
- int mv = !(next->bios.ramcfg_11_02_04);
+ int vc = !next->bios.ramcfg_11_02_08;
+ int mv = !next->bios.ramcfg_11_02_04;
u32 mask, data;
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -370,8 +370,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
}
}
- if ( (next->bios.ramcfg_11_02_40) ||
- (next->bios.ramcfg_11_07_10)) {
+ if (next->bios.ramcfg_11_02_40 ||
+ next->bios.ramcfg_11_07_10) {
ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
ram_nsec(fuc, 20000);
}
@@ -417,7 +417,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x10f694, 0xff00ff00, data);
}
- if (ram->mode == 2 && (next->bios.ramcfg_11_08_10))
+ if (ram->mode == 2 && next->bios.ramcfg_11_08_10)
data = 0x00000080;
else
data = 0x00000000;
@@ -425,13 +425,13 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
mask = 0x00070000;
data = 0x00000000;
- if (!(next->bios.ramcfg_11_02_80))
+ if (!next->bios.ramcfg_11_02_80)
data |= 0x03000000;
- if (!(next->bios.ramcfg_11_02_40))
+ if (!next->bios.ramcfg_11_02_40)
data |= 0x00002000;
- if (!(next->bios.ramcfg_11_07_10))
+ if (!next->bios.ramcfg_11_07_10)
data |= 0x00004000;
- if (!(next->bios.ramcfg_11_07_08))
+ if (!next->bios.ramcfg_11_07_08)
data |= 0x00000003;
else
data |= 0x74000000;
@@ -486,7 +486,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
data = mask = 0x00000000;
if (NOTE00(ramcfg_02_03 != 0)) {
- data |= (next->bios.ramcfg_11_02_03) << 8;
+ data |= next->bios.ramcfg_11_02_03 << 8;
mask |= 0x00000300;
}
if (NOTE00(ramcfg_01_10)) {
@@ -498,7 +498,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
data = mask = 0x00000000;
if (NOTE00(timing_30_07 != 0)) {
- data |= (next->bios.timing_20_30_07) << 28;
+ data |= next->bios.timing_20_30_07 << 28;
mask |= 0x70000000;
}
if (NOTE00(ramcfg_01_01)) {
@@ -510,7 +510,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
data = mask = 0x00000000;
if (NOTE00(timing_30_07 != 0)) {
- data |= (next->bios.timing_20_30_07) << 28;
+ data |= next->bios.timing_20_30_07 << 28;
mask |= 0x70000000;
}
if (NOTE00(ramcfg_01_02)) {
@@ -522,16 +522,16 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
mask = 0x33f00000;
data = 0x00000000;
- if (!(next->bios.ramcfg_11_01_04))
+ if (!next->bios.ramcfg_11_01_04)
data |= 0x20200000;
- if (!(next->bios.ramcfg_11_07_80))
+ if (!next->bios.ramcfg_11_07_80)
data |= 0x12800000;
/*XXX: see note above about there probably being some condition
* for the 10f824 stuff that uses ramcfg 3...
*/
- if ( (next->bios.ramcfg_11_03_f0)) {
+ if (next->bios.ramcfg_11_03_f0) {
if (next->bios.rammap_11_08_0c) {
- if (!(next->bios.ramcfg_11_07_80))
+ if (!next->bios.ramcfg_11_07_80)
mask |= 0x00000020;
else
data |= 0x00000020;
@@ -563,7 +563,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
}
- data = (next->bios.timing_20_30_07) << 8;
+ data = next->bios.timing_20_30_07 << 8;
if (next->bios.ramcfg_11_01_01)
data |= 0x80000000;
ram_mask(fuc, 0x100778, 0x00000700, data);
@@ -588,7 +588,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
- if ((next->bios.ramcfg_11_08_10) && (ram->mode == 2) /*XXX*/) {
+ if (next->bios.ramcfg_11_08_10 && (ram->mode == 2) /*XXX*/) {
u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
ram_nsec(fuc, 1000);
@@ -621,8 +621,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
data = ram_rd32(fuc, 0x10f978);
data &= ~0x00046144;
data |= 0x0000000b;
- if (!(next->bios.ramcfg_11_07_08)) {
- if (!(next->bios.ramcfg_11_07_04))
+ if (!next->bios.ramcfg_11_07_08) {
+ if (!next->bios.ramcfg_11_07_04)
data |= 0x0000200c;
else
data |= 0x00000000;
@@ -636,11 +636,11 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
ram_wr32(fuc, 0x10f830, data);
}
- if (!(next->bios.ramcfg_11_07_08)) {
+ if (!next->bios.ramcfg_11_07_08) {
data = 0x88020000;
- if ( (next->bios.ramcfg_11_07_04))
+ if ( next->bios.ramcfg_11_07_04)
data |= 0x10000000;
- if (!(next->bios.rammap_11_08_10))
+ if (!next->bios.rammap_11_08_10)
data |= 0x00080000;
} else {
data = 0xa40e0000;
@@ -689,8 +689,8 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
const u32 runk0 = ram->fN1 << 16;
const u32 runk1 = ram->fN1;
struct nouveau_ram_data *next = ram->base.next;
- int vc = !(next->bios.ramcfg_11_02_08);
- int mv = !(next->bios.ramcfg_11_02_04);
+ int vc = !next->bios.ramcfg_11_02_08;
+ int mv = !next->bios.ramcfg_11_02_04;
u32 mask, data;
ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -705,7 +705,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
}
ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
- if ((next->bios.ramcfg_11_03_f0))
+ if (next->bios.ramcfg_11_03_f0)
ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
@@ -761,7 +761,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
data = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
- data |= (next->bios.ramcfg_11_03_30) << 12;
+ data |= next->bios.ramcfg_11_03_30 << 16;
ram_wr32(fuc, 0x1373ec, data);
ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
@@ -793,8 +793,8 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
}
}
- if ( (next->bios.ramcfg_11_02_40) ||
- (next->bios.ramcfg_11_07_10)) {
+ if (next->bios.ramcfg_11_02_40 ||
+ next->bios.ramcfg_11_07_10) {
ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
ram_nsec(fuc, 20000);
}
@@ -810,13 +810,13 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
mask = 0x00010000;
data = 0x00000000;
- if (!(next->bios.ramcfg_11_02_80))
+ if (!next->bios.ramcfg_11_02_80)
data |= 0x03000000;
- if (!(next->bios.ramcfg_11_02_40))
+ if (!next->bios.ramcfg_11_02_40)
data |= 0x00002000;
- if (!(next->bios.ramcfg_11_07_10))
+ if (!next->bios.ramcfg_11_07_10)
data |= 0x00004000;
- if (!(next->bios.ramcfg_11_07_08))
+ if (!next->bios.ramcfg_11_07_08)
data |= 0x00000003;
else
data |= 0x14000000;
@@ -844,16 +844,16 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
mask = 0x33f00000;
data = 0x00000000;
- if (!(next->bios.ramcfg_11_01_04))
+ if (!next->bios.ramcfg_11_01_04)
data |= 0x20200000;
- if (!(next->bios.ramcfg_11_07_80))
+ if (!next->bios.ramcfg_11_07_80)
data |= 0x12800000;
/*XXX: see note above about there probably being some condition
* for the 10f824 stuff that uses ramcfg 3...
*/
- if ( (next->bios.ramcfg_11_03_f0)) {
+ if (next->bios.ramcfg_11_03_f0) {
if (next->bios.rammap_11_08_0c) {
- if (!(next->bios.ramcfg_11_07_80))
+ if (!next->bios.ramcfg_11_07_80)
mask |= 0x00000020;
else
data |= 0x00000020;
@@ -876,7 +876,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
data = next->bios.timing_20_2c_1fc0;
ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
- ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8);
+ ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
ram_wr32(fuc, 0x10f090, 0x4000007f);
ram_nsec(fuc, 1000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c
new file mode 100644
index 000000000000..fa891c39866b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+gf117_i2c_oclass = &(struct nouveau_i2c_impl) {
+ .base.handle = NV_SUBDEV(I2C, 0xd7),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_i2c_ctor,
+ .dtor = _nouveau_i2c_dtor,
+ .init = _nouveau_i2c_init,
+ .fini = _nouveau_i2c_fini,
+ },
+ .sclass = nvd0_i2c_sclass,
+ .pad_x = &nv04_i2c_pad_oclass,
+ .pad_s = &nv04_i2c_pad_oclass,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
index 7120124dceac..ebef970a0645 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
@@ -95,6 +95,23 @@ nve0_ibus_intr(struct nouveau_subdev *subdev)
}
static int
+nve0_ibus_init(struct nouveau_object *object)
+{
+ struct nve0_ibus_priv *priv = (void *)object;
+ int ret = nouveau_ibus_init(&priv->base);
+ if (ret == 0) {
+ nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000);
+ nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200);
+ nv_mask(priv, 0x122310, 0x0003ffff, 0x00000800);
+ nv_mask(priv, 0x122348, 0x0003ffff, 0x00000100);
+ nv_mask(priv, 0x1223b0, 0x0003ffff, 0x00000fff);
+ nv_mask(priv, 0x122348, 0x0003ffff, 0x00000200);
+ nv_mask(priv, 0x122358, 0x0003ffff, 0x00002880);
+ }
+ return ret;
+}
+
+static int
nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -117,7 +134,7 @@ nve0_ibus_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_ibus_ctor,
.dtor = _nouveau_ibus_dtor,
- .init = _nouveau_ibus_init,
+ .init = nve0_ibus_init,
.fini = _nouveau_ibus_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
index 2284ecb1c9b8..c2bb616a8da5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
@@ -83,7 +83,7 @@ host_send:
// increment GET
add b32 $r1 0x1
and $r14 $r1 #fifo_qmaskf
- nv_iowr(NV_PPWR_FIFO_GET(0), $r1)
+ nv_iowr(NV_PPWR_FIFO_GET(0), $r14)
bra #host_send
host_send_done:
ret
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 4bd43a99fdcc..39a5dc150a05 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -1018,7 +1018,7 @@ uint32_t nv108_pwr_code[] = {
0xb600023f,
0x1ec40110,
0x04b0400f,
- 0xbd0001f6,
+ 0xbd000ef6,
0xc70ef404,
/* 0x0328: host_send_done */
/* 0x032a: host_recv */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 5a73fa620978..254205cd5166 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -1124,7 +1124,7 @@ uint32_t nva3_pwr_code[] = {
0x0f1ec401,
0x04b007f1,
0xd00604b6,
- 0x04bd0001,
+ 0x04bd000e,
/* 0x03cb: host_send_done */
0xf8ba0ef4,
/* 0x03cd: host_recv */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index 4dba00d2dd1a..7ac87405d01b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -1124,7 +1124,7 @@ uint32_t nvc0_pwr_code[] = {
0x0f1ec401,
0x04b007f1,
0xd00604b6,
- 0x04bd0001,
+ 0x04bd000e,
/* 0x03cb: host_send_done */
0xf8ba0ef4,
/* 0x03cd: host_recv */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index 5e24c6bc041d..cd9ff1a73284 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -1033,7 +1033,7 @@ uint32_t nvd0_pwr_code[] = {
0xb6026b21,
0x1ec40110,
0xb007f10f,
- 0x0001d004,
+ 0x000ed004,
0x0ef404bd,
/* 0x0365: host_send_done */
/* 0x0367: host_recv */
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 630f6e84fc01..2c1e4aad7da3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -31,7 +31,6 @@
*/
#include <linux/backlight.h>
-#include <linux/acpi.h>
#include "nouveau_drm.h"
#include "nouveau_reg.h"
@@ -222,14 +221,6 @@ nouveau_backlight_init(struct drm_device *dev)
struct nouveau_device *device = nv_device(drm->device);
struct drm_connector *connector;
-#ifdef CONFIG_ACPI
- if (acpi_video_backlight_support()) {
- NV_INFO(drm, "ACPI backlight interface available, "
- "not registering our own\n");
- return 0;
- }
-#endif
-
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 26b5647188ef..47ad74255bf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -736,6 +736,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
new_bo->bo.offset };
+ /* Keep vblanks on during flip, for the target crtc of this flip */
+ drm_vblank_get(dev, nouveau_crtc(crtc)->index);
+
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
@@ -779,6 +782,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return 0;
fail_unreserve:
+ drm_vblank_put(dev, nouveau_crtc(crtc)->index);
ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
mutex_unlock(&chan->cli->mutex);
@@ -817,6 +821,9 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
drm_send_vblank_event(dev, crtcid, s->event);
}
+ /* Give up ownership of vblank for page-flipped crtc */
+ drm_vblank_put(dev, s->crtc);
+
list_del(&s->head);
if (ps)
*ps = *s;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 26c12a3fe430..a03c73411a56 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1052,7 +1052,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
/* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */
- if (ASIC_IS_DCE5(rdev) && !ASIC_IS_DCE8(rdev) &&
+ if (ASIC_IS_DCE5(rdev) &&
(encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
(radeon_crtc->bpc > 8))
clock = radeon_crtc->adjusted_clock;
@@ -1136,6 +1136,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
+ bool bypass_lut = false;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1174,33 +1175,73 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (target_fb->bits_per_pixel) {
- case 8:
+ switch (target_fb->pixel_format) {
+ case DRM_FORMAT_C8:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
break;
- case 15:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB4444:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_ARGB1555:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+ break;
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_BGRA5551:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
break;
- case 16:
+ case DRM_FORMAT_RGB565:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
#endif
break;
- case 24:
- case 32:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
#endif
break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_BGRA1010102:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+ fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
default:
- DRM_ERROR("Unsupported screen depth %d\n",
- target_fb->bits_per_pixel);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format));
return -EINVAL;
}
@@ -1329,6 +1370,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+ /*
+ * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+ * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+ * retain the full precision throughout the pipeline.
+ */
+ WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + radeon_crtc->crtc_offset,
+ (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+ ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+ if (bypass_lut)
+ DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
@@ -1396,6 +1449,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
u32 tmp, viewport_w, viewport_h;
int r;
+ bool bypass_lut = false;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1433,18 +1487,30 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (target_fb->bits_per_pixel) {
- case 8:
+ switch (target_fb->pixel_format) {
+ case DRM_FORMAT_C8:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
break;
- case 15:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB4444:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+ AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444;
+#ifdef __BIG_ENDIAN
+ fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
+ break;
+ case DRM_FORMAT_XRGB1555:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
+#ifdef __BIG_ENDIAN
+ fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
break;
- case 16:
+ case DRM_FORMAT_RGB565:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
@@ -1452,8 +1518,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
#endif
break;
- case 24:
- case 32:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
@@ -1461,9 +1527,20 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
#endif
break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ fb_format =
+ AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+ AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010;
+#ifdef __BIG_ENDIAN
+ fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
+ /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+ bypass_lut = true;
+ break;
default:
- DRM_ERROR("Unsupported screen depth %d\n",
- target_fb->bits_per_pixel);
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(target_fb->pixel_format));
return -EINVAL;
}
@@ -1502,6 +1579,13 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
if (rdev->family >= CHIP_R600)
WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
+ /* LUT only has 256 slots for 8 bpc fb. Bypass for > 8 bpc scanout for precision */
+ WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset,
+ (bypass_lut ? AVIVO_LUT_10BIT_BYPASS_EN : 0), ~AVIVO_LUT_10BIT_BYPASS_EN);
+
+ if (bypass_lut)
+ DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index c5b1f2da3954..35f4182c63b6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -403,16 +403,18 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[DP_DPCD_SIZE];
- int ret, i;
+ int ret;
+
+ char dpcd_hex_dump[DP_DPCD_SIZE * 3];
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
DP_DPCD_SIZE);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
- DRM_DEBUG_KMS("DPCD: ");
- for (i = 0; i < DP_DPCD_SIZE; i++)
- DRM_DEBUG_KMS("%02x ", msg[i]);
- DRM_DEBUG_KMS("\n");
+
+ hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
+ 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+ DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
radeon_dp_probe_oui(radeon_connector);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index ae88660f34ea..0c6e1b55d968 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -1752,12 +1752,12 @@
#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
#define EOP_TCL1_ACTION_EN (1 << 16)
#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
+#define EOP_TCL2_VOLATILE (1 << 24)
#define EOP_CACHE_POLICY(x) ((x) << 25)
/* 0 - LRU
* 1 - Stream
* 2 - Bypass
*/
-#define EOP_TCL2_VOLATILE (1 << 27)
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 5a9a5f4d7888..47d31e915758 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1551,7 +1551,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index a0f63ff5a5e9..333d143fca2c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -116,6 +116,8 @@
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x6808
+# define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
# define EVERGREEN_GRPH_ENDIAN_NONE 0
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 3f6e817d97ee..9ef8c38f2d66 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2726,7 +2726,7 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->caps_sclk_ds = true;
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
- pi->bapm_enable = false;
+ pi->bapm_enable = true;
pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 004c931606c4..01fc4888e6fe 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -1315,7 +1315,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
}
}
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 1dd0d32993d5..136b7bc7cd20 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -402,6 +402,7 @@
* block and vice versa. This applies to GRPH, CUR, etc.
*/
#define AVIVO_D1GRPH_LUT_SEL 0x6108
+# define AVIVO_LUT_10BIT_BYPASS_EN (1 << 8)
#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4b0bbf88d5c0..29d9cc04c04e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -102,6 +102,7 @@ extern int radeon_runtime_pm;
extern int radeon_hard_reset;
extern int radeon_vm_size;
extern int radeon_vm_block_size;
+extern int radeon_deep_color;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -749,10 +750,6 @@ union radeon_irq_stat_regs {
struct cik_irq_stat_regs cik;
};
-#define RADEON_MAX_HPD_PINS 7
-#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_AFMT_BLOCKS 7
-
struct radeon_irq {
bool installed;
spinlock_t lock;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 30844814c25a..173f378428a9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1227,11 +1227,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->clock.default_dispclk =
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
if (rdev->clock.default_dispclk == 0) {
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE6(rdev))
+ rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ else if (ASIC_IS_DCE5(rdev))
rdev->clock.default_dispclk = 54000; /* 540 Mhz */
else
rdev->clock.default_dispclk = 60000; /* 600 Mhz */
}
+ /* set a reasonable default for DP */
+ if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) {
+ DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
+ rdev->clock.default_dispclk / 100);
+ rdev->clock.default_dispclk = 60000;
+ }
rdev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
rdev->clock.current_dispclk = rdev->clock.default_dispclk;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5b1a611c30eb..8137b7a34696 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -199,6 +199,9 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
}
}
+ if ((radeon_deep_color == 0) && (bpc > 8))
+ bpc = 8;
+
DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
connector->name, connector->display_info.bpc, bpc);
@@ -1288,17 +1291,15 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
- else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
- if (ASIC_IS_DCE6(rdev)) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else
+ else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* HDMI 1.3+ supports max clock of 340 Mhz */
+ if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
- } else
+ else
+ return MODE_OK;
+ } else {
return MODE_CLOCK_HIGH;
+ }
}
/* check against the max pixel clock */
@@ -1549,6 +1550,8 @@ out:
static int radeon_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
@@ -1579,14 +1582,23 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
}
- return MODE_OK;
} else {
if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return radeon_dp_mode_valid_helper(connector, mode);
- else
- return MODE_OK;
+ } else {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ /* HDMI 1.3+ supports max clock of 340 Mhz */
+ if (mode->clock > 340000)
+ return MODE_CLOCK_HIGH;
+ } else {
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ }
+ }
}
+
+ return MODE_OK;
}
static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5ed617056b9c..13896edcf0b6 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -66,7 +66,8 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
(radeon_crtc->lut_b[i] << 0));
}
- WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
+ /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
+ WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1);
}
static void dce4_crtc_load_lut(struct drm_crtc *crtc)
@@ -284,7 +285,6 @@ static void radeon_unpin_work_func(struct work_struct *__work)
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
- struct radeon_flip_work *work;
unsigned long flags;
u32 update_pending;
int vpos, hpos;
@@ -294,8 +294,11 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
return;
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
- work = radeon_crtc->flip_work;
- if (work == NULL) {
+ if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+ DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+ "RADEON_FLIP_SUBMITTED(%d)\n",
+ radeon_crtc->flip_status,
+ RADEON_FLIP_SUBMITTED);
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
@@ -343,12 +346,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
work = radeon_crtc->flip_work;
- if (work == NULL) {
+ if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+ DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+ "RADEON_FLIP_SUBMITTED(%d)\n",
+ radeon_crtc->flip_status,
+ RADEON_FLIP_SUBMITTED);
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
/* Pageflip completed. Clean up. */
+ radeon_crtc->flip_status = RADEON_FLIP_NONE;
radeon_crtc->flip_work = NULL;
/* wakeup userspace */
@@ -357,8 +365,9 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+ drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
radeon_fence_unref(&work->fence);
- radeon_irq_kms_pflip_irq_get(rdev, work->crtc_id);
+ radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
queue_work(radeon_crtc->flip_queue, &work->unpin_work);
}
@@ -459,6 +468,12 @@ static void radeon_flip_work_func(struct work_struct *__work)
base &= ~7;
}
+ r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
+ if (r) {
+ DRM_ERROR("failed to get vblank before flip\n");
+ goto pflip_cleanup;
+ }
+
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -468,11 +483,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
+ radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
up_read(&rdev->exclusive_lock);
return;
+pflip_cleanup:
+ if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) {
+ DRM_ERROR("failed to reserve new rbo in error path\n");
+ goto cleanup;
+ }
+ if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) {
+ DRM_ERROR("failed to unpin new rbo in error path\n");
+ }
+ radeon_bo_unreserve(work->new_rbo);
+
cleanup:
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
@@ -526,7 +552,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (radeon_crtc->flip_work) {
+ if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
@@ -534,6 +560,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
kfree(work);
return -EBUSY;
}
+ radeon_crtc->flip_status = RADEON_FLIP_PENDING;
radeon_crtc->flip_work = work;
/* update crtc fb */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 6e3017413386..cb1421369e3a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -175,6 +175,7 @@ int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
int radeon_vm_size = 4096;
int radeon_vm_block_size = 9;
+int radeon_deep_color = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -248,6 +249,9 @@ module_param_named(vm_size, radeon_vm_size, int, 0444);
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
+MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
+module_param_named(deep_color, radeon_deep_color, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ad0e4b8cc7e3..0592ddb0904b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,6 +46,10 @@ struct radeon_device;
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
+#define RADEON_MAX_HPD_PINS 7
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_AFMT_BLOCKS 7
+
enum radeon_rmx_type {
RMX_OFF,
RMX_FULL,
@@ -233,8 +237,8 @@ struct radeon_mode_info {
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
- struct radeon_crtc *crtcs[6];
- struct radeon_afmt *afmt[7];
+ struct radeon_crtc *crtcs[RADEON_MAX_CRTCS];
+ struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -302,6 +306,12 @@ struct radeon_atom_ss {
uint16_t amount;
};
+enum radeon_flip_status {
+ RADEON_FLIP_NONE,
+ RADEON_FLIP_PENDING,
+ RADEON_FLIP_SUBMITTED
+};
+
struct radeon_crtc {
struct drm_crtc base;
int crtc_id;
@@ -327,6 +337,7 @@ struct radeon_crtc {
/* page flipping */
struct workqueue_struct *flip_queue;
struct radeon_flip_work *flip_work;
+ enum radeon_flip_status flip_status;
/* pll sharing */
struct radeon_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 12c663e86ca1..e447e390d09a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -73,8 +73,10 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
rdev->pm.dpm.ac_power = true;
else
rdev->pm.dpm.ac_power = false;
- if (rdev->asic->dpm.enable_bapm)
- radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+ if (rdev->family == CHIP_ARUBA) {
+ if (rdev->asic->dpm.enable_bapm)
+ radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+ }
mutex_unlock(&rdev->pm.mutex);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 899d9126cad6..eecff6bbd341 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -495,7 +495,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
mutex_unlock(&vm->mutex);
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
- RADEON_GPU_PAGE_SIZE, false,
+ RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
if (r)
return r;
@@ -992,7 +992,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
return -ENOMEM;
}
- r = radeon_bo_create(rdev, pd_size, align, false,
+ r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, NULL,
&vm->page_directory);
if (r)
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2a2822c03329..20da6ff183df 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,7 +1874,15 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- pi->enable_bapm = false;
+ /* There are stability issues reported on latops with
+ * bapm installed when switching between AC and battery
+ * power. At the same time, some desktop boards hang
+ * if it's not enabled and dpm is enabled.
+ */
+ if (rdev->flags & RADEON_IS_MOBILITY)
+ pi->enable_bapm = false;
+ else
+ pi->enable_bapm = true;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index e1038a945f40..7094b92d1ec7 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -283,7 +283,7 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
int udl_driver_load(struct drm_device *dev, unsigned long flags)
{
struct udl_device *udl;
- int ret;
+ int ret = -ENOMEM;
DRM_DEBUG("\n");
udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
@@ -300,7 +300,6 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
}
if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
- ret = -ENOMEM;
DRM_ERROR("udl_alloc_urb_list failed\n");
goto err;
}
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 458cdf6d81e8..ce0ab951f507 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
- vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o
+ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
+ vmwgfx_cmdbuf_res.o \
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
new file mode 100644
index 000000000000..bfeb4b1f2acf
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -0,0 +1,341 @@
+/**************************************************************************
+ *
+ * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
+
+enum vmw_cmdbuf_res_state {
+ VMW_CMDBUF_RES_COMMITED,
+ VMW_CMDBUF_RES_ADD,
+ VMW_CMDBUF_RES_DEL
+};
+
+/**
+ * struct vmw_cmdbuf_res - Command buffer managed resource entry.
+ *
+ * @res: Refcounted pointer to a struct vmw_resource.
+ * @hash: Hash entry for the manager hash table.
+ * @head: List head used either by the staging list or the manager list
+ * of commited resources.
+ * @state: Staging state of this resource entry.
+ * @man: Pointer to a resource manager for this entry.
+ */
+struct vmw_cmdbuf_res {
+ struct vmw_resource *res;
+ struct drm_hash_item hash;
+ struct list_head head;
+ enum vmw_cmdbuf_res_state state;
+ struct vmw_cmdbuf_res_manager *man;
+};
+
+/**
+ * struct vmw_cmdbuf_res_manager - Command buffer resource manager.
+ *
+ * @resources: Hash table containing staged and commited command buffer
+ * resources
+ * @list: List of commited command buffer resources.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @resources and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_cmdbuf_res_manager {
+ struct drm_open_hash resources;
+ struct list_head list;
+ struct vmw_private *dev_priv;
+};
+
+
+/**
+ * vmw_cmdbuf_res_lookup - Look up a command buffer resource
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @resource_type: The resource type, that combined with the user key
+ * identifies the resource.
+ * @user_key: The user key.
+ *
+ * Returns a valid refcounted struct vmw_resource pointer on success,
+ * an error pointer on failure.
+ */
+struct vmw_resource *
+vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key)
+{
+ struct drm_hash_item *hash;
+ int ret;
+ unsigned long key = user_key | (res_type << 24);
+
+ ret = drm_ht_find_item(&man->resources, key, &hash);
+ if (unlikely(ret != 0))
+ return ERR_PTR(ret);
+
+ return vmw_resource_reference
+ (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
+}
+
+/**
+ * vmw_cmdbuf_res_free - Free a command buffer resource.
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @entry: Pointer to a struct vmw_cmdbuf_res.
+ *
+ * Frees a struct vmw_cmdbuf_res entry and drops its reference to the
+ * struct vmw_resource.
+ */
+static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_cmdbuf_res *entry)
+{
+ list_del(&entry->head);
+ WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
+ vmw_resource_unreference(&entry->res);
+ kfree(entry);
+}
+
+/**
+ * vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions
+ *
+ * @list: Caller's list of command buffer resource actions.
+ *
+ * This function commits a list of command buffer resource
+ * additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_cmdbuf_res_commit(struct list_head *list)
+{
+ struct vmw_cmdbuf_res *entry, *next;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ list_del(&entry->head);
+ switch (entry->state) {
+ case VMW_CMDBUF_RES_ADD:
+ entry->state = VMW_CMDBUF_RES_COMMITED;
+ list_add_tail(&entry->head, &entry->man->list);
+ break;
+ case VMW_CMDBUF_RES_DEL:
+ vmw_resource_unreference(&entry->res);
+ kfree(entry);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @list: Caller's list of command buffer resource action
+ *
+ * This function reverts a list of command buffer resource
+ * additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_cmdbuf_res_revert(struct list_head *list)
+{
+ struct vmw_cmdbuf_res *entry, *next;
+ int ret;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ switch (entry->state) {
+ case VMW_CMDBUF_RES_ADD:
+ vmw_cmdbuf_res_free(entry->man, entry);
+ break;
+ case VMW_CMDBUF_RES_DEL:
+ ret = drm_ht_insert_item(&entry->man->resources,
+ &entry->hash);
+ list_del(&entry->head);
+ list_add_tail(&entry->head, &entry->man->list);
+ entry->state = VMW_CMDBUF_RES_COMMITED;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition.
+ *
+ * @man: Pointer to the command buffer resource manager.
+ * @res_type: The resource type.
+ * @user_key: The user-space id of the resource.
+ * @res: Valid (refcount != 0) pointer to a struct vmw_resource.
+ * @list: The staging list.
+ *
+ * This function allocates a struct vmw_cmdbuf_res entry and adds the
+ * resource to the hash table of the manager identified by @man. The
+ * entry is then put on the staging list identified by @list.
+ */
+int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct vmw_resource *res,
+ struct list_head *list)
+{
+ struct vmw_cmdbuf_res *cres;
+ int ret;
+
+ cres = kzalloc(sizeof(*cres), GFP_KERNEL);
+ if (unlikely(cres == NULL))
+ return -ENOMEM;
+
+ cres->hash.key = user_key | (res_type << 24);
+ ret = drm_ht_insert_item(&man->resources, &cres->hash);
+ if (unlikely(ret != 0))
+ goto out_invalid_key;
+
+ cres->state = VMW_CMDBUF_RES_ADD;
+ cres->res = vmw_resource_reference(res);
+ cres->man = man;
+ list_add_tail(&cres->head, list);
+
+out_invalid_key:
+ return ret;
+}
+
+/**
+ * vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal.
+ *
+ * @man: Pointer to the command buffer resource manager.
+ * @res_type: The resource type.
+ * @user_key: The user-space id of the resource.
+ * @list: The staging list.
+ *
+ * This function looks up the struct vmw_cmdbuf_res entry from the manager
+ * hash table and, if it exists, removes it. Depending on its current staging
+ * state it then either removes the entry from the staging list or adds it
+ * to it with a staging state of removal.
+ */
+int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct list_head *list)
+{
+ struct vmw_cmdbuf_res *entry;
+ struct drm_hash_item *hash;
+ int ret;
+
+ ret = drm_ht_find_item(&man->resources, user_key, &hash);
+ if (likely(ret != 0))
+ return -EINVAL;
+
+ entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
+
+ switch (entry->state) {
+ case VMW_CMDBUF_RES_ADD:
+ vmw_cmdbuf_res_free(man, entry);
+ break;
+ case VMW_CMDBUF_RES_COMMITED:
+ (void) drm_ht_remove_item(&man->resources, &entry->hash);
+ list_del(&entry->head);
+ entry->state = VMW_CMDBUF_RES_DEL;
+ list_add_tail(&entry->head, list);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource
+ * manager.
+ *
+ * @dev_priv: Pointer to a struct vmw_private
+ *
+ * Allocates and initializes a command buffer managed resource manager. Returns
+ * an error pointer on failure.
+ */
+struct vmw_cmdbuf_res_manager *
+vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_cmdbuf_res_manager *man;
+ int ret;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (man == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ man->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&man->list);
+ ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
+ if (ret == 0)
+ return man;
+
+ kfree(man);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource
+ * manager.
+ *
+ * @man: Pointer to the manager to destroy.
+ *
+ * This function destroys a command buffer managed resource manager and
+ * unreferences / frees all command buffer managed resources and -entries
+ * associated with it.
+ */
+void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
+{
+ struct vmw_cmdbuf_res *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &man->list, head)
+ vmw_cmdbuf_res_free(man, entry);
+
+ kfree(man);
+}
+
+/**
+ *
+ * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
+ * resource manager
+ *
+ * Returns the approximate allocation size of a command buffer managed
+ * resource manager.
+ */
+size_t vmw_cmdbuf_res_man_size(void)
+{
+ static size_t res_man_size;
+
+ if (unlikely(res_man_size == 0))
+ res_man_size =
+ ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
+ ttm_round_pot(sizeof(struct hlist_head) <<
+ VMW_CMDBUF_RES_MAN_HT_ORDER);
+
+ return res_man_size;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 8bb26dcd9eae..5ac92874404d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -33,6 +33,7 @@ struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
struct vmw_ctx_binding_state cbs;
+ struct vmw_cmdbuf_res_manager *man;
};
@@ -103,7 +104,8 @@ static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
-
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
@@ -113,9 +115,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
if (res->func->destroy == vmw_gb_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
+ vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
- (void) vmw_context_binding_state_kill
- (&container_of(res, struct vmw_user_context, res)->cbs);
+ (void) vmw_context_binding_state_kill(&uctx->cbs);
(void) vmw_gb_context_destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
@@ -152,13 +154,16 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
ret = vmw_resource_init(dev_priv, res, true,
res_free, &vmw_gb_context_func);
res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+ if (unlikely(ret != 0))
+ goto out_err;
- if (unlikely(ret != 0)) {
- if (res_free)
- res_free(res);
- else
- kfree(res);
- return ret;
+ if (dev_priv->has_mob) {
+ uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
+ if (unlikely(IS_ERR(uctx->man))) {
+ ret = PTR_ERR(uctx->man);
+ uctx->man = NULL;
+ goto out_err;
+ }
}
memset(&uctx->cbs, 0, sizeof(uctx->cbs));
@@ -166,6 +171,13 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
+
+out_err:
+ if (res_free)
+ res_free(res);
+ else
+ kfree(res);
+ return ret;
}
static int vmw_context_init(struct vmw_private *dev_priv,
@@ -471,7 +483,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
*/
if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
+ ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
@@ -901,3 +914,8 @@ struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
}
+
+struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
+{
+ return container_of(ctx, struct vmw_user_context, res)->man;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 246a62bab378..f31a75494e07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -946,7 +946,6 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master);
}
- vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -966,16 +965,10 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
- vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
- if (IS_ERR(vmw_fp->shman))
- goto out_no_shman;
-
file_priv->driver_priv = vmw_fp;
return 0;
-out_no_shman:
- ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile:
kfree(vmw_fp);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6b252a887ae2..c1811750cc8d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,10 +40,10 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20140325"
+#define VMWGFX_DRIVER_DATE "20140704"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 6
-#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_DRIVER_PATCHLEVEL 1
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -75,14 +75,11 @@
#define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4
-struct vmw_compat_shader_manager;
-
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
bool gb_aware;
- struct vmw_compat_shader_manager *shman;
};
struct vmw_dma_buffer {
@@ -124,6 +121,10 @@ struct vmw_resource {
void (*hw_destroy) (struct vmw_resource *res);
};
+
+/*
+ * Resources that are managed using ioctls.
+ */
enum vmw_res_type {
vmw_res_context,
vmw_res_surface,
@@ -132,6 +133,15 @@ enum vmw_res_type {
vmw_res_max
};
+/*
+ * Resources that are managed using command streams.
+ */
+enum vmw_cmdbuf_res_type {
+ vmw_cmdbuf_res_compat_shader
+};
+
+struct vmw_cmdbuf_res_manager;
+
struct vmw_cursor_snooper {
struct drm_crtc *crtc;
size_t age;
@@ -341,7 +351,7 @@ struct vmw_sw_context{
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
- struct list_head staged_shaders;
+ struct list_head staged_cmd_res;
};
struct vmw_legacy_display;
@@ -974,7 +984,8 @@ extern void vmw_context_binding_res_list_kill(struct list_head *head);
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
-
+extern struct vmw_cmdbuf_res_manager *
+vmw_context_res_man(struct vmw_resource *ctx);
/*
* Surface management - vmwgfx_surface.c
*/
@@ -1008,27 +1019,42 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
- SVGA3dShaderType shader_type,
- u32 *user_key);
-extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
- struct list_head *list);
-extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
- struct list_head *list);
-extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
- u32 user_key,
- SVGA3dShaderType shader_type,
- struct list_head *list);
-extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
+ struct vmw_cmdbuf_res_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
- struct ttm_object_file *tfile,
struct list_head *list);
-extern struct vmw_compat_shader_manager *
-vmw_compat_shader_man_create(struct vmw_private *dev_priv);
-extern void
-vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern struct vmw_resource *
+vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type);
+
+/*
+ * Command buffer managed resources - vmwgfx_cmdbuf_res.c
+ */
+
+extern struct vmw_cmdbuf_res_manager *
+vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
+extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
+extern size_t vmw_cmdbuf_res_man_size(void);
+extern struct vmw_resource *
+vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key);
+extern void vmw_cmdbuf_res_revert(struct list_head *list);
+extern void vmw_cmdbuf_res_commit(struct list_head *list);
+extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct vmw_resource *res,
+ struct list_head *list);
+extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct list_head *list);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 87df0b3674fd..7bfdaa163a33 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -422,28 +422,91 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
return 0;
}
+
+/**
+ * vmw_cmd_res_reloc_add - Add a resource to a software context's
+ * relocation- and validation lists.
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @id_loc: Pointer to where the id that needs translation is located.
+ * @res: Valid pointer to a struct vmw_resource.
+ * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
+ * used for this resource is returned here.
+ */
+static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ uint32_t *id_loc,
+ struct vmw_resource *res,
+ struct vmw_resource_val_node **p_val)
+{
+ int ret;
+ struct vmw_resource_val_node *node;
+
+ *p_val = NULL;
+ ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+ res,
+ id_loc - sw_context->buf_start);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ ret = vmw_resource_val_add(sw_context, res, &node);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ if (res_type == vmw_res_context && dev_priv->has_mob &&
+ node->first_usage) {
+
+ /*
+ * Put contexts first on the list to be able to exit
+ * list traversal for contexts early.
+ */
+ list_del(&node->head);
+ list_add(&node->head, &sw_context->resource_list);
+
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+ if (unlikely(ret != 0))
+ goto out_err;
+ node->staged_bindings =
+ kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
+ if (node->staged_bindings == NULL) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ goto out_err;
+ }
+ INIT_LIST_HEAD(&node->staged_bindings->list);
+ }
+
+ if (p_val)
+ *p_val = node;
+
+out_err:
+ return ret;
+}
+
+
/**
- * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
- * @id: user-space resource id handle.
* @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
*/
static int
-vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t id,
- uint32_t *id_loc,
- struct vmw_resource_val_node **p_val)
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
@@ -451,7 +514,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node;
int ret;
- if (id == SVGA3D_INVALID_ID) {
+ if (*id_loc == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) {
@@ -466,7 +529,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
* resource
*/
- if (likely(rcache->valid && id == rcache->handle)) {
+ if (likely(rcache->valid && *id_loc == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
@@ -480,49 +543,28 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
ret = vmw_user_resource_lookup_handle(dev_priv,
sw_context->fp->tfile,
- id,
+ *id_loc,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) id);
+ (unsigned) *id_loc);
dump_stack();
return ret;
}
rcache->valid = true;
rcache->res = res;
- rcache->handle = id;
-
- ret = vmw_resource_relocation_add(&sw_context->res_relocations,
- res,
- id_loc - sw_context->buf_start);
- if (unlikely(ret != 0))
- goto out_no_reloc;
+ rcache->handle = *id_loc;
- ret = vmw_resource_val_add(sw_context, res, &node);
+ ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+ res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
rcache->node = node;
if (p_val)
*p_val = node;
-
- if (dev_priv->has_mob && node->first_usage &&
- res_type == vmw_res_context) {
- ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
- if (unlikely(ret != 0))
- goto out_no_reloc;
- node->staged_bindings =
- kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
- if (node->staged_bindings == NULL) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
- goto out_no_reloc;
- }
- INIT_LIST_HEAD(&node->staged_bindings->list);
- }
-
vmw_resource_unreference(&res);
return 0;
@@ -534,31 +576,6 @@ out_no_reloc:
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
- * on the resource validate list unless it's already there.
- *
- * @dev_priv: Pointer to a device private structure.
- * @sw_context: Pointer to the software context.
- * @res_type: Resource type.
- * @converter: User-space visisble type specific information.
- * @id_loc: Pointer to the location in the command buffer currently being
- * parsed from where the user-space resource id handle is located.
- * @p_val: Pointer to pointer to resource validalidation node. Populated
- * on exit.
- */
-static int
-vmw_cmd_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t *id_loc,
- struct vmw_resource_val_node **p_val)
-{
- return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
- converter, *id_loc, id_loc, p_val);
-}
-
-/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
@@ -572,8 +589,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
- if (likely(!val->staged_bindings))
- continue;
+ if (unlikely(!val->staged_bindings))
+ break;
ret = vmw_context_rebind_all(val->res);
if (unlikely(ret != 0)) {
@@ -1626,13 +1643,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
} *cmd;
int ret;
size_t size;
+ struct vmw_resource_val_node *val;
cmd = container_of(header, struct vmw_shader_define_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- NULL);
+ &val);
if (unlikely(ret != 0))
return ret;
@@ -1640,11 +1658,11 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return 0;
size = cmd->header.size - sizeof(cmd->body);
- ret = vmw_compat_shader_add(sw_context->fp->shman,
+ ret = vmw_compat_shader_add(dev_priv,
+ vmw_context_res_man(val->res),
cmd->body.shid, cmd + 1,
cmd->body.type, size,
- sw_context->fp->tfile,
- &sw_context->staged_shaders);
+ &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
@@ -1672,23 +1690,24 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
SVGA3dCmdDestroyShader body;
} *cmd;
int ret;
+ struct vmw_resource_val_node *val;
cmd = container_of(header, struct vmw_shader_destroy_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- NULL);
+ &val);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_compat_shader_remove(sw_context->fp->shman,
+ ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
cmd->body.shid,
cmd->body.type,
- &sw_context->staged_shaders);
+ &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
@@ -1715,7 +1734,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
- struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *ctx_node, *res_node = NULL;
+ struct vmw_ctx_bindinfo bi;
+ struct vmw_resource *res = NULL;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
@@ -1727,32 +1748,40 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
- struct vmw_resource_val_node *res_node;
- u32 shid = cmd->body.shid;
-
- if (shid != SVGA3D_INVALID_ID)
- (void) vmw_compat_shader_lookup(sw_context->fp->shman,
- cmd->body.type,
- &shid);
-
- ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
- vmw_res_shader,
- user_shader_converter,
- shid,
- &cmd->body.shid, &res_node);
+ if (!dev_priv->has_mob)
+ return 0;
+
+ if (cmd->body.shid != SVGA3D_INVALID_ID) {
+ res = vmw_compat_shader_lookup
+ (vmw_context_res_man(ctx_node->res),
+ cmd->body.shid,
+ cmd->body.type);
+
+ if (!IS_ERR(res)) {
+ ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
+ vmw_res_shader,
+ &cmd->body.shid, res,
+ &res_node);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ }
+
+ if (!res_node) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context,
+ vmw_res_shader,
+ user_shader_converter,
+ &cmd->body.shid, &res_node);
if (unlikely(ret != 0))
return ret;
-
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_shader;
- bi.i1.shader_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
}
- return 0;
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_shader;
+ bi.i1.shader_type = cmd->body.type;
+ return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
}
/**
@@ -2394,6 +2423,8 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
}
}
+
+
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
@@ -2453,7 +2484,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
sw_context->res_ht_initialized = true;
}
- INIT_LIST_HEAD(&sw_context->staged_shaders);
+ INIT_LIST_HEAD(&sw_context->staged_cmd_res);
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
@@ -2548,8 +2579,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
list_splice_init(&sw_context->resource_list, &resource_list);
- vmw_compat_shaders_commit(sw_context->fp->shman,
- &sw_context->staged_shaders);
+ vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
@@ -2576,8 +2606,7 @@ out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
- vmw_compat_shaders_revert(sw_context->fp->shman,
- &sw_context->staged_shaders);
+ vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a89ad938eacf..b031b48dbb3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info)
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
- vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index c1559eeaffe9..8719fb3cccc9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,8 +29,6 @@
#include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h"
-#define VMW_COMPAT_SHADER_HT_ORDER 12
-
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
@@ -42,49 +40,8 @@ struct vmw_user_shader {
struct vmw_shader shader;
};
-/**
- * enum vmw_compat_shader_state - Staging state for compat shaders
- */
-enum vmw_compat_shader_state {
- VMW_COMPAT_COMMITED,
- VMW_COMPAT_ADD,
- VMW_COMPAT_DEL
-};
-
-/**
- * struct vmw_compat_shader - Metadata for compat shaders.
- *
- * @handle: The TTM handle of the guest backed shader.
- * @tfile: The struct ttm_object_file the guest backed shader is registered
- * with.
- * @hash: Hash item for lookup.
- * @head: List head for staging lists or the compat shader manager list.
- * @state: Staging state.
- *
- * The structure is protected by the cmdbuf lock.
- */
-struct vmw_compat_shader {
- u32 handle;
- struct ttm_object_file *tfile;
- struct drm_hash_item hash;
- struct list_head head;
- enum vmw_compat_shader_state state;
-};
-
-/**
- * struct vmw_compat_shader_manager - Compat shader manager.
- *
- * @shaders: Hash table containing staged and commited compat shaders
- * @list: List of commited shaders.
- * @dev_priv: Pointer to a device private structure.
- *
- * @shaders and @list are protected by the cmdbuf mutex for now.
- */
-struct vmw_compat_shader_manager {
- struct drm_open_hash shaders;
- struct list_head list;
- struct vmw_private *dev_priv;
-};
+static uint64_t vmw_user_shader_size;
+static uint64_t vmw_shader_size;
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
@@ -98,8 +55,6 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_shader_destroy(struct vmw_resource *res);
-static uint64_t vmw_user_shader_size;
-
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
.base_obj_to_res = vmw_user_shader_base_to_res,
@@ -347,6 +302,16 @@ static void vmw_user_shader_free(struct vmw_resource *res)
vmw_user_shader_size);
}
+static void vmw_shader_free(struct vmw_resource *res)
+{
+ struct vmw_shader *shader = vmw_res_to_shader(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ kfree(shader);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_shader_size);
+}
+
/**
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
@@ -371,13 +336,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
-static int vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
- size_t shader_size,
- size_t offset,
- SVGA3dShaderType shader_type,
- struct ttm_object_file *tfile,
- u32 *handle)
+static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type,
+ struct ttm_object_file *tfile,
+ u32 *handle)
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
@@ -442,6 +407,56 @@ out:
}
+struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type)
+{
+ struct vmw_shader *shader;
+ struct vmw_resource *res;
+ int ret;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of shaders anyway.
+ */
+ if (unlikely(vmw_shader_size == 0))
+ vmw_shader_size =
+ ttm_round_pot(sizeof(struct vmw_shader)) + 128;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_shader_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ goto out_err;
+ }
+
+ shader = kzalloc(sizeof(*shader), GFP_KERNEL);
+ if (unlikely(shader == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_shader_size);
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ res = &shader->res;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+ ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+ offset, shader_type, buffer,
+ vmw_shader_free);
+
+out_err:
+ return ret ? ERR_PTR(ret) : res;
+}
+
+
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -490,8 +505,8 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_bad_arg;
- ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
- shader_type, tfile, &arg->shader_handle);
+ ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+ shader_type, tfile, &arg->shader_handle);
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
@@ -500,202 +515,83 @@ out_bad_arg:
}
/**
- * vmw_compat_shader_lookup - Look up a compat shader
- *
- * @man: Pointer to the compat shader manager.
- * @shader_type: The shader type, that combined with the user_key identifies
- * the shader.
- * @user_key: On entry, this should be a pointer to the user_key.
- * On successful exit, it will contain the guest-backed shader's TTM handle.
+ * vmw_compat_shader_id_ok - Check whether a compat shader user key and
+ * shader type are within valid bounds.
*
- * Returns 0 on success. Non-zero on failure, in which case the value pointed
- * to by @user_key is unmodified.
- */
-int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
- SVGA3dShaderType shader_type,
- u32 *user_key)
-{
- struct drm_hash_item *hash;
- int ret;
- unsigned long key = *user_key | (shader_type << 24);
-
- ret = drm_ht_find_item(&man->shaders, key, &hash);
- if (unlikely(ret != 0))
- return ret;
-
- *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
- hash)->handle;
-
- return 0;
-}
-
-/**
- * vmw_compat_shader_free - Free a compat shader.
- *
- * @man: Pointer to the compat shader manager.
- * @entry: Pointer to a struct vmw_compat_shader.
- *
- * Frees a struct vmw_compat_shder entry and drops its reference to the
- * guest backed shader.
- */
-static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
- struct vmw_compat_shader *entry)
-{
- list_del(&entry->head);
- WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
- WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
- TTM_REF_USAGE));
- kfree(entry);
-}
-
-/**
- * vmw_compat_shaders_commit - Commit a list of compat shader actions.
- *
- * @man: Pointer to the compat shader manager.
- * @list: Caller's list of compat shader actions.
+ * @user_key: User space id of the shader.
+ * @shader_type: Shader type.
*
- * This function commits a list of compat shader additions or removals.
- * It is typically called when the execbuf ioctl call triggering these
- * actions has commited the fifo contents to the device.
+ * Returns true if valid false if not.
*/
-void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
- struct list_head *list)
+static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
{
- struct vmw_compat_shader *entry, *next;
-
- list_for_each_entry_safe(entry, next, list, head) {
- list_del(&entry->head);
- switch (entry->state) {
- case VMW_COMPAT_ADD:
- entry->state = VMW_COMPAT_COMMITED;
- list_add_tail(&entry->head, &man->list);
- break;
- case VMW_COMPAT_DEL:
- ttm_ref_object_base_unref(entry->tfile, entry->handle,
- TTM_REF_USAGE);
- kfree(entry);
- break;
- default:
- BUG();
- break;
- }
- }
+ return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
}
/**
- * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
*
- * @man: Pointer to the compat shader manager.
- * @list: Caller's list of compat shader actions.
+ * @user_key: User space id of the shader.
+ * @shader_type: Shader type.
*
- * This function reverts a list of compat shader additions or removals.
- * It is typically called when the execbuf ioctl call triggering these
- * actions failed for some reason, and the command stream was never
- * submitted.
+ * Returns a hash key suitable for a command buffer managed resource
+ * manager hash table.
*/
-void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
- struct list_head *list)
+static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
{
- struct vmw_compat_shader *entry, *next;
- int ret;
-
- list_for_each_entry_safe(entry, next, list, head) {
- switch (entry->state) {
- case VMW_COMPAT_ADD:
- vmw_compat_shader_free(man, entry);
- break;
- case VMW_COMPAT_DEL:
- ret = drm_ht_insert_item(&man->shaders, &entry->hash);
- list_del(&entry->head);
- list_add_tail(&entry->head, &man->list);
- entry->state = VMW_COMPAT_COMMITED;
- break;
- default:
- BUG();
- break;
- }
- }
+ return user_key | (shader_type << 20);
}
/**
* vmw_compat_shader_remove - Stage a compat shader for removal.
*
- * @man: Pointer to the compat shader manager
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @shader_type: Shader type.
- * @list: Caller's list of staged shader actions.
- *
- * This function stages a compat shader for removal and removes the key from
- * the shader manager's hash table. If the shader was previously only staged
- * for addition it is completely removed (But the execbuf code may keep a
- * reference if it was bound to a context between addition and removal). If
- * it was previously commited to the manager, it is staged for removal.
+ * @list: Caller's list of staged command buffer resource actions.
*/
-int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type,
struct list_head *list)
{
- struct vmw_compat_shader *entry;
- struct drm_hash_item *hash;
- int ret;
-
- ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
- &hash);
- if (likely(ret != 0))
+ if (!vmw_compat_shader_id_ok(user_key, shader_type))
return -EINVAL;
- entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
-
- switch (entry->state) {
- case VMW_COMPAT_ADD:
- vmw_compat_shader_free(man, entry);
- break;
- case VMW_COMPAT_COMMITED:
- (void) drm_ht_remove_item(&man->shaders, &entry->hash);
- list_del(&entry->head);
- entry->state = VMW_COMPAT_DEL;
- list_add_tail(&entry->head, list);
- break;
- default:
- BUG();
- break;
- }
-
- return 0;
+ return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
+ vmw_compat_shader_key(user_key,
+ shader_type),
+ list);
}
/**
- * vmw_compat_shader_add - Create a compat shader and add the
- * key to the manager
+ * vmw_compat_shader_add - Create a compat shader and stage it for addition
+ * as a command buffer managed resource.
*
- * @man: Pointer to the compat shader manager
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @bytecode: Pointer to the bytecode of the shader.
* @shader_type: Shader type.
* @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
* to be created with.
- * @list: Caller's list of staged shader actions.
+ * @list: Caller's list of staged command buffer resource actions.
*
- * Note that only the key is added to the shader manager's hash table.
- * The shader is not yet added to the shader manager's list of shaders.
*/
-int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+int vmw_compat_shader_add(struct vmw_private *dev_priv,
+ struct vmw_cmdbuf_res_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
- struct ttm_object_file *tfile,
struct list_head *list)
{
struct vmw_dma_buffer *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
- struct vmw_compat_shader *compat;
- u32 handle;
int ret;
+ struct vmw_resource *res;
- if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+ if (!vmw_compat_shader_id_ok(user_key, shader_type))
return -EINVAL;
/* Allocate and pin a DMA buffer */
@@ -703,7 +599,7 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
if (unlikely(buf == NULL))
return -ENOMEM;
- ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+ ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
true, vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
goto out;
@@ -728,84 +624,40 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base);
- /* Create a guest-backed shader container backed by the dma buffer */
- ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
- tfile, &handle);
- vmw_dmabuf_unreference(&buf);
+ res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
if (unlikely(ret != 0))
goto no_reserve;
- /*
- * Create a compat shader structure and stage it for insertion
- * in the manager
- */
- compat = kzalloc(sizeof(*compat), GFP_KERNEL);
- if (compat == NULL)
- goto no_compat;
-
- compat->hash.key = user_key | (shader_type << 24);
- ret = drm_ht_insert_item(&man->shaders, &compat->hash);
- if (unlikely(ret != 0))
- goto out_invalid_key;
-
- compat->state = VMW_COMPAT_ADD;
- compat->handle = handle;
- compat->tfile = tfile;
- list_add_tail(&compat->head, list);
-
- return 0;
-out_invalid_key:
- kfree(compat);
-no_compat:
- ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
+ vmw_compat_shader_key(user_key, shader_type),
+ res, list);
+ vmw_resource_unreference(&res);
no_reserve:
+ vmw_dmabuf_unreference(&buf);
out:
return ret;
}
/**
- * vmw_compat_shader_man_create - Create a compat shader manager
- *
- * @dev_priv: Pointer to a device private structure.
- *
- * Typically done at file open time. If successful returns a pointer to a
- * compat shader manager. Otherwise returns an error pointer.
- */
-struct vmw_compat_shader_manager *
-vmw_compat_shader_man_create(struct vmw_private *dev_priv)
-{
- struct vmw_compat_shader_manager *man;
- int ret;
-
- man = kzalloc(sizeof(*man), GFP_KERNEL);
- if (man == NULL)
- return ERR_PTR(-ENOMEM);
-
- man->dev_priv = dev_priv;
- INIT_LIST_HEAD(&man->list);
- ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
- if (ret == 0)
- return man;
-
- kfree(man);
- return ERR_PTR(ret);
-}
-
-/**
- * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ * vmw_compat_shader_lookup - Look up a compat shader
*
- * @man: Pointer to the shader manager to destroy.
+ * @man: Pointer to the command buffer managed resource manager identifying
+ * the shader namespace.
+ * @user_key: The user space id of the shader.
+ * @shader_type: The shader type.
*
- * Typically done at file close time.
+ * Returns a refcounted pointer to a struct vmw_resource if the shader was
+ * found. An error pointer otherwise.
*/
-void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+struct vmw_resource *
+vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type)
{
- struct vmw_compat_shader *entry, *next;
-
- mutex_lock(&man->dev_priv->cmdbuf_mutex);
- list_for_each_entry_safe(entry, next, &man->list, head)
- vmw_compat_shader_free(man, entry);
+ if (!vmw_compat_shader_id_ok(user_key, shader_type))
+ return ERR_PTR(-EINVAL);
- mutex_unlock(&man->dev_priv->cmdbuf_mutex);
- kfree(man);
+ return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
+ vmw_compat_shader_key(user_key,
+ shader_type));
}
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 719788ce7d9f..04e7b2eafbdd 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -730,6 +730,12 @@ int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
}
EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
+bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
+{
+ return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
+}
+EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
+
int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
{
struct ipu_soc *ipu = channel->ipu;
@@ -747,6 +753,22 @@ int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
}
EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
+int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
+ while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cpu_relax();
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
+
int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
{
struct ipu_soc *ipu = channel->ipu;
@@ -942,7 +964,8 @@ static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
for_each_set_bit(bit, &status, 32) {
- irq = irq_linear_revmap(ipu->domain, regs[i] * 32 + bit);
+ irq = irq_linear_revmap(ipu->domain,
+ regs[i] * 32 + bit);
if (irq)
generic_handle_irq(irq);
}
@@ -975,15 +998,22 @@ static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
- enum ipu_channel_irq irq_type)
+int ipu_map_irq(struct ipu_soc *ipu, int irq)
{
- int irq = irq_linear_revmap(ipu->domain, irq_type + channel->num);
+ int virq;
- if (!irq)
- irq = irq_create_mapping(ipu->domain, irq_type + channel->num);
+ virq = irq_linear_revmap(ipu->domain, irq);
+ if (!virq)
+ virq = irq_create_mapping(ipu->domain, irq);
- return irq;
+ return virq;
+}
+EXPORT_SYMBOL_GPL(ipu_map_irq);
+
+int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
+ enum ipu_channel_irq irq_type)
+{
+ return ipu_map_irq(ipu, irq_type + channel->num);
}
EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
@@ -1123,7 +1153,8 @@ static int ipu_irq_init(struct ipu_soc *ipu)
}
ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
- handle_level_irq, 0, IRQF_VALID, 0);
+ handle_level_irq, 0,
+ IRQF_VALID, 0);
if (ret < 0) {
dev_err(ipu->dev, "failed to alloc generic irq chips\n");
irq_domain_remove(ipu->domain);
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 9f1e5efa3acf..2326c752d89b 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -18,6 +18,7 @@
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <video/imx-ipu-v3.h>
@@ -90,6 +91,7 @@ enum ipu_dc_map {
IPU_DC_MAP_RGB565,
IPU_DC_MAP_GBR24, /* TVEv2 */
IPU_DC_MAP_BGR666,
+ IPU_DC_MAP_LVDS666,
IPU_DC_MAP_BGR24,
};
@@ -109,6 +111,9 @@ struct ipu_dc_priv {
struct device *dev;
struct ipu_dc channels[IPU_DC_NUM_CHANNELS];
struct mutex mutex;
+ struct completion comp;
+ int dc_irq;
+ int dp_irq;
};
static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority)
@@ -152,6 +157,8 @@ static int ipu_pixfmt_to_map(u32 fmt)
return IPU_DC_MAP_GBR24;
case V4L2_PIX_FMT_BGR666:
return IPU_DC_MAP_BGR666;
+ case v4l2_fourcc('L', 'V', 'D', '6'):
+ return IPU_DC_MAP_LVDS666;
case V4L2_PIX_FMT_BGR24:
return IPU_DC_MAP_BGR24;
default:
@@ -219,12 +226,16 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
writel(0x0, dc->base + DC_WR_CH_ADDR);
writel(width, priv->dc_reg + DC_DISP_CONF2(dc->di));
- ipu_module_enable(priv->ipu, IPU_CONF_DC_EN);
-
return 0;
}
EXPORT_SYMBOL_GPL(ipu_dc_init_sync);
+void ipu_dc_enable(struct ipu_soc *ipu)
+{
+ ipu_module_enable(ipu, IPU_CONF_DC_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_dc_enable);
+
void ipu_dc_enable_channel(struct ipu_dc *dc)
{
int di;
@@ -238,41 +249,55 @@ void ipu_dc_enable_channel(struct ipu_dc *dc)
}
EXPORT_SYMBOL_GPL(ipu_dc_enable_channel);
+static irqreturn_t dc_irq_handler(int irq, void *dev_id)
+{
+ struct ipu_dc *dc = dev_id;
+ u32 reg;
+
+ reg = readl(dc->base + DC_WR_CH_CONF);
+ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
+ writel(reg, dc->base + DC_WR_CH_CONF);
+
+ /* The Freescale BSP kernel clears DIx_COUNTER_RELEASE here */
+
+ complete(&dc->priv->comp);
+ return IRQ_HANDLED;
+}
+
void ipu_dc_disable_channel(struct ipu_dc *dc)
{
struct ipu_dc_priv *priv = dc->priv;
+ int irq, ret;
u32 val;
- int irq = 0, timeout = 50;
+ /* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */
if (dc->chno == 1)
- irq = IPU_IRQ_DC_FC_1;
+ irq = priv->dc_irq;
else if (dc->chno == 5)
- irq = IPU_IRQ_DP_SF_END;
+ irq = priv->dp_irq;
else
return;
- /* should wait for the interrupt here */
- mdelay(50);
+ init_completion(&priv->comp);
+ enable_irq(irq);
+ ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50));
+ disable_irq(irq);
+ if (ret <= 0) {
+ dev_warn(priv->dev, "DC stop timeout after 50 ms\n");
- if (dc->di == 0)
- val = 0x00000002;
- else
- val = 0x00000020;
-
- /* Wait for DC triple buffer to empty */
- while ((readl(priv->dc_reg + DC_STAT) & val) != val) {
- usleep_range(2000, 20000);
- timeout -= 2;
- if (timeout <= 0)
- break;
+ val = readl(dc->base + DC_WR_CH_CONF);
+ val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
+ writel(val, dc->base + DC_WR_CH_CONF);
}
-
- val = readl(dc->base + DC_WR_CH_CONF);
- val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
- writel(val, dc->base + DC_WR_CH_CONF);
}
EXPORT_SYMBOL_GPL(ipu_dc_disable_channel);
+void ipu_dc_disable(struct ipu_soc *ipu)
+{
+ ipu_module_disable(ipu, IPU_CONF_DC_EN);
+}
+EXPORT_SYMBOL_GPL(ipu_dc_disable);
+
static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map,
int byte_num, int offset, int mask)
{
@@ -339,7 +364,7 @@ int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
struct ipu_dc_priv *priv;
static int channel_offsets[] = { 0, 0x1c, 0x38, 0x54, 0x58, 0x5c,
0x78, 0, 0x94, 0xb4};
- int i;
+ int i, ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -360,13 +385,31 @@ int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
priv->channels[i].base = priv->dc_reg + channel_offsets[i];
}
+ priv->dc_irq = ipu_map_irq(ipu, IPU_IRQ_DC_FC_1);
+ if (!priv->dc_irq)
+ return -EINVAL;
+ ret = devm_request_irq(dev, priv->dc_irq, dc_irq_handler, 0, NULL,
+ &priv->channels[1]);
+ if (ret < 0)
+ return ret;
+ disable_irq(priv->dc_irq);
+ priv->dp_irq = ipu_map_irq(ipu, IPU_IRQ_DP_SF_END);
+ if (!priv->dp_irq)
+ return -EINVAL;
+ ret = devm_request_irq(dev, priv->dp_irq, dc_irq_handler, 0, NULL,
+ &priv->channels[5]);
+ if (ret < 0)
+ return ret;
+ disable_irq(priv->dp_irq);
+
writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) |
DC_WR_CH_CONF_PROG_DI_ID,
priv->channels[1].base + DC_WR_CH_CONF);
writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(0),
priv->channels[5].base + DC_WR_CH_CONF);
- writel(DC_GEN_SYNC_1_6_SYNC | DC_GEN_SYNC_PRIORITY_1, priv->dc_reg + DC_GEN);
+ writel(DC_GEN_SYNC_1_6_SYNC | DC_GEN_SYNC_PRIORITY_1,
+ priv->dc_reg + DC_GEN);
ipu->dc_priv = priv;
@@ -397,6 +440,12 @@ int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 1, 11, 0xfc); /* green */
ipu_dc_map_config(priv, IPU_DC_MAP_BGR666, 2, 17, 0xfc); /* red */
+ /* lvds666 */
+ ipu_dc_map_clear(priv, IPU_DC_MAP_LVDS666);
+ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 0, 5, 0xfc); /* blue */
+ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 1, 13, 0xfc); /* green */
+ ipu_dc_map_config(priv, IPU_DC_MAP_LVDS666, 2, 21, 0xfc); /* red */
+
/* bgr24 */
ipu_dc_map_clear(priv, IPU_DC_MAP_BGR24);
ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 2, 7, 0xff); /* red */
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 42e60b447ae7..c490ba4384fc 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -595,7 +595,7 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
}
}
- if (!sig->clk_pol)
+ if (sig->clk_pol)
di_gen |= DI_GEN_POLARITY_DISP_CLK;
ipu_di_write(di, di_gen, DI_GENERAL);
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index e1493ab36ca2..042c3958e2a0 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -28,7 +28,12 @@
#define DMFC_GENERAL1 0x0014
#define DMFC_GENERAL2 0x0018
#define DMFC_IC_CTRL 0x001c
-#define DMFC_STAT 0x0020
+#define DMFC_WR_CHAN_ALT 0x0020
+#define DMFC_WR_CHAN_DEF_ALT 0x0024
+#define DMFC_DP_CHAN_ALT 0x0028
+#define DMFC_DP_CHAN_DEF_ALT 0x002c
+#define DMFC_GENERAL1_ALT 0x0030
+#define DMFC_STAT 0x0034
#define DMFC_WR_CHAN_1_28 0
#define DMFC_WR_CHAN_2_41 8
@@ -133,6 +138,20 @@ int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc)
}
EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel);
+static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(priv->dev,
+ "Timeout waiting for DMFC FIFOs to clear\n");
+ break;
+ }
+ cpu_relax();
+ }
+}
+
void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -141,8 +160,10 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
priv->use_count--;
- if (!priv->use_count)
+ if (!priv->use_count) {
+ ipu_dmfc_wait_fifos(priv);
ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN);
+ }
if (priv->use_count < 0)
priv->use_count = 0;
diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
index e17fa3f7c4b6..98686edbcdbb 100644
--- a/drivers/gpu/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -215,10 +215,9 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
}
EXPORT_SYMBOL_GPL(ipu_dp_setup_channel);
-int ipu_dp_enable_channel(struct ipu_dp *dp)
+int ipu_dp_enable(struct ipu_soc *ipu)
{
- struct ipu_flow *flow = to_flow(dp);
- struct ipu_dp_priv *priv = flow->priv;
+ struct ipu_dp_priv *priv = ipu->dp_priv;
mutex_lock(&priv->mutex);
@@ -227,15 +226,28 @@ int ipu_dp_enable_channel(struct ipu_dp *dp)
priv->use_count++;
- if (dp->foreground) {
- u32 reg;
+ mutex_unlock(&priv->mutex);
- reg = readl(flow->base + DP_COM_CONF);
- reg |= DP_COM_CONF_FG_EN;
- writel(reg, flow->base + DP_COM_CONF);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_dp_enable);
- ipu_srm_dp_sync_update(priv->ipu);
- }
+int ipu_dp_enable_channel(struct ipu_dp *dp)
+{
+ struct ipu_flow *flow = to_flow(dp);
+ struct ipu_dp_priv *priv = flow->priv;
+ u32 reg;
+
+ if (!dp->foreground)
+ return 0;
+
+ mutex_lock(&priv->mutex);
+
+ reg = readl(flow->base + DP_COM_CONF);
+ reg |= DP_COM_CONF_FG_EN;
+ writel(reg, flow->base + DP_COM_CONF);
+
+ ipu_srm_dp_sync_update(priv->ipu);
mutex_unlock(&priv->mutex);
@@ -247,25 +259,38 @@ void ipu_dp_disable_channel(struct ipu_dp *dp)
{
struct ipu_flow *flow = to_flow(dp);
struct ipu_dp_priv *priv = flow->priv;
+ u32 reg, csc;
+
+ if (!dp->foreground)
+ return;
mutex_lock(&priv->mutex);
- priv->use_count--;
+ reg = readl(flow->base + DP_COM_CONF);
+ csc = reg & DP_COM_CONF_CSC_DEF_MASK;
+ if (csc == DP_COM_CONF_CSC_DEF_FG)
+ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
- if (dp->foreground) {
- u32 reg, csc;
+ reg &= ~DP_COM_CONF_FG_EN;
+ writel(reg, flow->base + DP_COM_CONF);
- reg = readl(flow->base + DP_COM_CONF);
- csc = reg & DP_COM_CONF_CSC_DEF_MASK;
- if (csc == DP_COM_CONF_CSC_DEF_FG)
- reg &= ~DP_COM_CONF_CSC_DEF_MASK;
+ writel(0, flow->base + DP_FG_POS);
+ ipu_srm_dp_sync_update(priv->ipu);
- reg &= ~DP_COM_CONF_FG_EN;
- writel(reg, flow->base + DP_COM_CONF);
+ if (ipu_idmac_channel_busy(priv->ipu, IPUV3_CHANNEL_MEM_BG_SYNC))
+ ipu_wait_interrupt(priv->ipu, IPU_IRQ_DP_SF_END, 50);
- writel(0, flow->base + DP_FG_POS);
- ipu_srm_dp_sync_update(priv->ipu);
- }
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
+
+void ipu_dp_disable(struct ipu_soc *ipu)
+{
+ struct ipu_dp_priv *priv = ipu->dp_priv;
+
+ mutex_lock(&priv->mutex);
+
+ priv->use_count--;
if (!priv->use_count)
ipu_module_disable(priv->ipu, IPU_CONF_DP_EN);
@@ -275,7 +300,7 @@ void ipu_dp_disable_channel(struct ipu_dp *dp)
mutex_unlock(&priv->mutex);
}
-EXPORT_SYMBOL_GPL(ipu_dp_disable_channel);
+EXPORT_SYMBOL_GPL(ipu_dp_disable);
struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow)
{
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index acf181183f0b..c93f50ec04f7 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -188,6 +188,9 @@ void ipu_srm_dp_sync_update(struct ipu_soc *ipu);
int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
+bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
+int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms);
+
int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id,
unsigned long base, u32 module, struct clk *ipu_clk);
void ipu_di_exit(struct ipu_soc *ipu, int id);