summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_fops.c44
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/drm_platform.c1
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c26
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c19
-rw-r--r--drivers/gpu/drm/i915/intel_display.c90
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c86
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c98
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pll.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c60
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/ni.c57
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c386
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c228
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c55
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c24
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c12
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c2
88 files changed, 1257 insertions, 659 deletions
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 09e11a5d921a..fd9d0af4d536 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -206,7 +206,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
size_t size;
int ret;
- DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
sizes->surface_bpp);
@@ -220,7 +220,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
obj = drm_gem_cma_create(dev, size);
- if (!obj)
+ if (IS_ERR(obj))
return -ENOMEM;
fbi = framebuffer_alloc(0, dev->dev);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7ef1b673e1be..133b4132983e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -121,6 +121,8 @@ int drm_open(struct inode *inode, struct file *filp)
int minor_id = iminor(inode);
struct drm_minor *minor;
int retcode = 0;
+ int need_setup = 0;
+ struct address_space *old_mapping;
minor = idr_find(&drm_minors_idr, minor_id);
if (!minor)
@@ -132,23 +134,37 @@ int drm_open(struct inode *inode, struct file *filp)
if (drm_device_is_unplugged(dev))
return -ENODEV;
+ if (!dev->open_count++)
+ need_setup = 1;
+ mutex_lock(&dev->struct_mutex);
+ old_mapping = dev->dev_mapping;
+ if (old_mapping == NULL)
+ dev->dev_mapping = &inode->i_data;
+ /* ihold ensures nobody can remove inode with our i_data */
+ ihold(container_of(dev->dev_mapping, struct inode, i_data));
+ inode->i_mapping = dev->dev_mapping;
+ filp->f_mapping = dev->dev_mapping;
+ mutex_unlock(&dev->struct_mutex);
+
retcode = drm_open_helper(inode, filp, dev);
- if (!retcode) {
- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- if (!dev->open_count++)
- retcode = drm_setup(dev);
- }
- if (!retcode) {
- mutex_lock(&dev->struct_mutex);
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = &inode->i_data;
- /* ihold ensures nobody can remove inode with our i_data */
- ihold(container_of(dev->dev_mapping, struct inode, i_data));
- inode->i_mapping = dev->dev_mapping;
- filp->f_mapping = dev->dev_mapping;
- mutex_unlock(&dev->struct_mutex);
+ if (retcode)
+ goto err_undo;
+ atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+ if (need_setup) {
+ retcode = drm_setup(dev);
+ if (retcode)
+ goto err_undo;
}
+ return 0;
+err_undo:
+ mutex_lock(&dev->struct_mutex);
+ filp->f_mapping = old_mapping;
+ inode->i_mapping = old_mapping;
+ iput(container_of(dev->dev_mapping, struct inode, i_data));
+ dev->dev_mapping = old_mapping;
+ mutex_unlock(&dev->struct_mutex);
+ dev->open_count--;
return retcode;
}
EXPORT_SYMBOL(drm_open);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index cdf8b1e7602d..d4b20ceda3fb 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -205,8 +205,6 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct seq_file *m = data;
- seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
-
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
atomic_read(&obj->handle_count),
@@ -239,7 +237,7 @@ int drm_vma_info(struct seq_file *m, void *data)
mutex_lock(&dev->struct_mutex);
seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
atomic_read(&dev->vma_count),
- high_memory, (void *)virt_to_phys(high_memory));
+ high_memory, (void *)(unsigned long)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
vma = pt->vma;
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index aaeb6f8d69ce..b8a282ea8751 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -64,7 +64,6 @@ int drm_get_platform_dev(struct platform_device *platdev,
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev_set_drvdata(&platdev->dev, dev);
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g1;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 59a26e577b57..fc345d4ebb03 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
- depends on DRM && PLAT_SAMSUNG
+ depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 18c271862ca8..0f68a2872673 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -374,6 +374,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
exynos_connector->encoder_id = encoder->base.id;
exynos_connector->manager = manager;
exynos_connector->dpms = DRM_MODE_DPMS_OFF;
+ connector->dpms = DRM_MODE_DPMS_OFF;
connector->encoder = encoder;
err = drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index e51503fbaf2b..241ad1eeec64 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -43,12 +43,14 @@
* @manager: specific encoder has its own manager to control a hardware
* appropriately and we can access a hardware drawing on this manager.
* @dpms: store the encoder dpms value.
+ * @updated: indicate whether overlay data updating is needed or not.
*/
struct exynos_drm_encoder {
struct drm_crtc *old_crtc;
struct drm_encoder drm_encoder;
struct exynos_drm_manager *manager;
- int dpms;
+ int dpms;
+ bool updated;
};
static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode)
@@ -85,7 +87,9 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
if (manager_ops && manager_ops->apply)
- manager_ops->apply(manager->dev);
+ if (!exynos_encoder->updated)
+ manager_ops->apply(manager->dev);
+
exynos_drm_connector_power(encoder, mode);
exynos_encoder->dpms = mode;
break;
@@ -94,6 +98,7 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_OFF:
exynos_drm_connector_power(encoder, mode);
exynos_encoder->dpms = mode;
+ exynos_encoder->updated = false;
break;
default:
DRM_ERROR("unspecified mode %d\n", mode);
@@ -205,13 +210,22 @@ static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
{
- struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_manager *manager = exynos_encoder->manager;
struct exynos_drm_manager_ops *manager_ops = manager->ops;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (manager_ops && manager_ops->commit)
manager_ops->commit(manager->dev);
+
+ /*
+ * this will avoid one issue that overlay data is updated to
+ * real hardware two times.
+ * And this variable will be used to check if the data was
+ * already updated or not by exynos_drm_encoder_dpms function.
+ */
+ exynos_encoder->updated = true;
}
static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
@@ -401,19 +415,6 @@ void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
manager_ops->dpms(manager->dev, mode);
/*
- * set current mode to new one so that data aren't updated into
- * registers by drm_helper_connector_dpms two times.
- *
- * in case that drm_crtc_helper_set_mode() is called,
- * overlay_ops->commit() and manager_ops->commit() callbacks
- * can be called two times, first at drm_crtc_helper_set_mode()
- * and second at drm_helper_connector_dpms().
- * so with this setting, when drm_helper_connector_dpms() is called
- * encoder->funcs->dpms() will be ignored.
- */
- exynos_encoder->dpms = mode;
-
- /*
* if this condition is ok then it means that the crtc is already
* detached from encoder and last function for detaching is properly
* done, so clear pipe from manager to prevent repeated call.
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 614b2e9ac462..e7fbb823fd8e 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1142,7 +1142,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(of_match_ptr(mixer_match_types),
pdev->dev.of_node);
- drv = match->data;
+ drv = (struct mixer_drv_data *)match->data;
} else {
drv = (struct mixer_drv_data *)
platform_get_device_id(pdev)->driver_data;
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 38f3a6cb8c7d..3edd981e0770 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -303,10 +303,10 @@ static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
ch7xxx_readb(dvo, CH7xxx_PM, &val);
- if (val & CH7xxx_PM_FPD)
- return false;
- else
+ if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
return true;
+ else
+ return false;
}
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c9bfd83dde64..61ae104dca8c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1505,7 +1505,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_gmch;
}
- i915_kick_out_firmware_fb(dev_priv);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ i915_kick_out_firmware_fb(dev_priv);
pci_set_master(dev->pdev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index aac4e5e1a5b9..6770ee6084b4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -118,6 +118,13 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
+unsigned int i915_preliminary_hw_support __read_mostly = 0;
+module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
+MODULE_PARM_DESC(preliminary_hw_support,
+ "Enable preliminary hardware support. "
+ "Enable Haswell and ValleyView Support. "
+ "(default: false)");
+
static struct drm_driver driver;
extern int intel_agp_enabled;
@@ -826,6 +833,12 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
+ if (intel_info->is_haswell || intel_info->is_valleyview)
+ if(!i915_preliminary_hw_support) {
+ DRM_ERROR("Preliminary hardware support disabled\n");
+ return -ENODEV;
+ }
+
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4f2831aa5fed..f511fa2f4168 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1217,6 +1217,7 @@ extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
+extern unsigned int i915_preliminary_hw_support __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1341,9 +1342,14 @@ int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
struct scatterlist *sg = obj->pages->sgl;
- while (n >= SG_MAX_SINGLE_ALLOC) {
+ int nents = obj->pages->nents;
+ while (nents > SG_MAX_SINGLE_ALLOC) {
+ if (n < SG_MAX_SINGLE_ALLOC - 1)
+ break;
+
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
n -= SG_MAX_SINGLE_ALLOC - 1;
+ nents -= SG_MAX_SINGLE_ALLOC - 1;
}
return sg_page(sg+n);
}
@@ -1427,7 +1433,7 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
- struct drm_i915_gem_request *request);
+ u32 *seqno);
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 19dbdd7dd564..107f09befe92 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1407,8 +1407,10 @@ out:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
+ case -ENOSPC:
+ return VM_FAULT_SIGBUS;
default:
- WARN_ON_ONCE(ret);
+ WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
return VM_FAULT_SIGBUS;
}
}
@@ -1822,10 +1824,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg_set_page(sg, page, PAGE_SIZE, 0);
}
+ obj->pages = st;
+
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj);
- obj->pages = st;
return 0;
err_pages:
@@ -1955,11 +1958,12 @@ i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
int
i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
- struct drm_i915_gem_request *request)
+ u32 *out_seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- uint32_t seqno;
+ struct drm_i915_gem_request *request;
u32 request_ring_position;
+ u32 seqno;
int was_empty;
int ret;
@@ -1974,11 +1978,9 @@ i915_add_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
- if (request == NULL) {
- request = kmalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
- }
+ request = kmalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
seqno = i915_gem_next_request_seqno(ring);
@@ -2030,6 +2032,8 @@ i915_add_request(struct intel_ring_buffer *ring,
}
}
+ if (out_seqno)
+ *out_seqno = seqno;
return 0;
}
@@ -3959,6 +3963,9 @@ i915_gem_init_hw(struct drm_device *dev)
if (!intel_enable_gtt())
return -EIO;
+ if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
+ I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+
i915_gem_l3_remap(dev);
i915_gem_init_swizzling(dev);
@@ -4098,7 +4105,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 64c1be0a9cfd..a4162ddff6c5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -521,7 +521,7 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
-#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 893f30164b7e..6345878ae1e7 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -143,7 +143,7 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
int old_dpms;
/* PCH platforms and VLV only support on/off. */
- if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
+ if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
@@ -219,20 +219,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
- int dpll_md_reg;
- u32 adpa, dpll_md;
-
- dpll_md_reg = DPLL_MD(intel_crtc->pipe);
-
- /*
- * Disable separate mode multiplier used when cloning SDVO to CRT
- * XXX this needs to be adjusted when we really are cloning
- */
- if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- dpll_md = I915_READ(dpll_md_reg);
- I915_WRITE(dpll_md_reg,
- dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
- }
+ u32 adpa;
adpa = ADPA_HOTPLUG_BITS;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -742,7 +729,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2b6ce9b2674a..4154bcd7a070 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3253,6 +3253,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev))
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
+
+ /*
+ * There seems to be a race in PCH platform hw (at least on some
+ * outputs) where an enabled pipe still completes any pageflip right
+ * away (as if the pipe is off) instead of waiting for vblank. As soon
+ * as the first vblank happend, everything works as expected. Hence just
+ * wait for one vblank before returning to avoid strange things
+ * happening.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
}
static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -3831,6 +3841,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ /* Use VBT settings if we have an eDP panel */
+ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+ if (edp_bpc < display_bpc) {
+ DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ display_bpc = edp_bpc;
+ }
+ continue;
+ }
+
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. (Note: >12bpc will be caught below.)
@@ -7882,6 +7903,34 @@ struct intel_quirk {
void (*hook)(struct drm_device *dev);
};
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+ void (*hook)(struct drm_device *dev);
+ const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+ return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "NCR Corporation",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
+ },
+ },
+ { } /* terminating entry */
+ },
+ .hook = quirk_invert_brightness,
+ },
+};
+
static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
@@ -7892,8 +7941,7 @@ static struct intel_quirk intel_quirks[] = {
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
- /* 855 & before need to leave pipe A & dpll A up */
- { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+ /* 830/845 need to leave pipe A & dpll A up */
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
@@ -7922,6 +7970,10 @@ static void intel_init_quirks(struct drm_device *dev)
q->subsystem_device == PCI_ANY_ID))
q->hook(dev);
}
+ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+ intel_dmi_quirks[i].hook(dev);
+ }
}
/* Disable the VGA plane that we never use */
@@ -8049,29 +8101,42 @@ static void intel_enable_pipe_a(struct drm_device *dev)
}
+static bool
+intel_check_plane_mapping(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ u32 reg, val;
+
+ if (dev_priv->num_pipe == 1)
+ return true;
+
+ reg = DSPCNTR(!crtc->plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) &&
+ (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
+ return false;
+
+ return true;
+}
+
static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg, val;
+ u32 reg;
/* Clear any frame start delays used for debugging left by the BIOS */
reg = PIPECONF(crtc->pipe);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* We need to sanitize the plane -> pipe mapping first because this will
- * disable the crtc (and hence change the state) if it is wrong. */
- if (!HAS_PCH_SPLIT(dev)) {
+ * disable the crtc (and hence change the state) if it is wrong. Note
+ * that gen4+ has a fixed plane -> pipe mapping. */
+ if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
struct intel_connector *connector;
bool plane;
- reg = DSPCNTR(crtc->plane);
- val = I915_READ(reg);
-
- if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
- (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
- goto ok;
-
DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
crtc->base.base.id);
@@ -8095,7 +8160,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
WARN_ON(crtc->active);
crtc->base.enabled = false;
}
-ok:
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
crtc->pipe == PIPE_A && !crtc->active) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d1e8ddb2d6c0..368ed8ef1600 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1797,7 +1797,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count && voltage_tries == 5) {
- if (++loop_tries == 5) {
+ ++loop_tries;
+ if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
break;
}
@@ -1807,11 +1808,15 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
}
/* Check to see if we've tried the same voltage 5 times */
- if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
- voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- voltage_tries = 0;
- } else
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++voltage_tries;
+ if (voltage_tries == 5) {
+ DRM_DEBUG_KMS("too many voltage retries, give up\n");
+ break;
+ }
+ } else
+ voltage_tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
@@ -2369,8 +2374,9 @@ static void
intel_dp_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
- if (intel_dpd_is_edp(dev))
+ if (is_edp(intel_dp))
intel_panel_destroy_backlight(dev);
drm_sysfs_connector_remove(connector);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e3166df55daa..edba93b3474b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -777,6 +777,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Supermicro X7SPA-H",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ebff850a9ab6..d7bc817f51a0 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -209,7 +209,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
}
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
- struct drm_i915_gem_request *request,
void (*tail)(struct intel_overlay *))
{
struct drm_device *dev = overlay->dev;
@@ -218,12 +217,10 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(ring, NULL, request);
- if (ret) {
- kfree(request);
- return ret;
- }
- overlay->last_flip_req = request->seqno;
+ ret = i915_add_request(ring, NULL, &overlay->last_flip_req);
+ if (ret)
+ return ret;
+
overlay->flip_tail = tail;
ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
@@ -240,7 +237,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- struct drm_i915_gem_request *request;
int ret;
BUG_ON(overlay->active);
@@ -248,17 +244,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
ret = intel_ring_begin(ring, 4);
- if (ret) {
- kfree(request);
- goto out;
- }
+ if (ret)
+ return ret;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
@@ -266,9 +254,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- ret = intel_overlay_do_wait_request(overlay, request, NULL);
-out:
- return ret;
+ return intel_overlay_do_wait_request(overlay, NULL);
}
/* overlay needs to be enabled in OCMD reg */
@@ -278,17 +264,12 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
- struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
BUG_ON(!overlay->active);
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
@@ -298,22 +279,14 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
ret = intel_ring_begin(ring, 2);
- if (ret) {
- kfree(request);
+ if (ret)
return ret;
- }
+
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
- ret = i915_add_request(ring, NULL, request);
- if (ret) {
- kfree(request);
- return ret;
- }
-
- overlay->last_flip_req = request->seqno;
- return 0;
+ return i915_add_request(ring, NULL, &overlay->last_flip_req);
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
@@ -349,15 +322,10 @@ static int intel_overlay_off(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
u32 flip_addr = overlay->flip_addr;
- struct drm_i915_gem_request *request;
int ret;
BUG_ON(!overlay->active);
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
* this applies to the disabling of the overlay or to the switching off
@@ -365,22 +333,28 @@ static int intel_overlay_off(struct intel_overlay *overlay)
flip_addr |= OFC_UPDATE;
ret = intel_ring_begin(ring, 6);
- if (ret) {
- kfree(request);
+ if (ret)
return ret;
- }
+
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ if (IS_I830(dev)) {
+ /* Workaround: Don't disable the overlay fully, since otherwise
+ * it dies on the next OVERLAY_ON cmd. */
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_NOOP);
+ } else {
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ }
intel_ring_advance(ring);
- return intel_overlay_do_wait_request(overlay, request,
- intel_overlay_off_tail);
+ return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
@@ -425,24 +399,16 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
return 0;
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
- struct drm_i915_gem_request *request;
-
/* synchronous slowpath */
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
ret = intel_ring_begin(ring, 2);
- if (ret) {
- kfree(request);
+ if (ret)
return ret;
- }
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
- ret = intel_overlay_do_wait_request(overlay, request,
+ ret = intel_overlay_do_wait_request(overlay,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e019b2369861..e2aacd329545 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -435,7 +435,7 @@ int intel_panel_setup_backlight(struct drm_device *dev)
props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev);
if (props.max_brightness == 0) {
- DRM_ERROR("Failed to get maximum backlight value\n");
+ DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
return -ENODEV;
}
dev_priv->backlight =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b3b4b6cea8b0..72f41aaa71ff 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3442,8 +3442,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
/* Bspec says we need to always set all mask bits. */
- I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
- _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
+ I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
+ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
/*
* According to the spec the following bits should be
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 0007a4d9bf6e..c600fb06e25e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -139,6 +139,11 @@ struct intel_sdvo {
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
+
+ /*
+ * the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
+ */
+ uint8_t dtd_sdvo_flags;
};
struct intel_sdvo_connector {
@@ -889,6 +894,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
}
#endif
+static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned if_index, uint8_t tx_rate,
+ uint8_t *data, unsigned length)
+{
+ uint8_t set_buf_index[2] = { if_index, 0 };
+ uint8_t hbuf_size, tmp[8];
+ int i;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ &hbuf_size, 1))
+ return false;
+
+ /* Buffer size is 0 based, hooray! */
+ hbuf_size++;
+
+ DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
+
+ for (i = 0; i < hbuf_size; i += 8) {
+ memset(tmp, 0, 8);
+ if (i < length)
+ memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ tmp, 8))
+ return false;
+ }
+
+ return intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+}
+
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
{
struct dip_infoframe avi_if = {
@@ -896,11 +940,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
- uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
- uint8_t set_buf_index[2] = { 1, 0 };
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
- uint64_t *data = (uint64_t *)sdvo_data;
- unsigned i;
intel_dip_infoframe_csum(&avi_if);
@@ -910,22 +950,9 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
sdvo_data[3] = avi_if.checksum;
memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
- if (!intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return false;
-
- for (i = 0; i < sizeof(sdvo_data); i += 8) {
- if (!intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_DATA,
- data, 8))
- return false;
- data++;
- }
-
- return intel_sdvo_set_value(intel_sdvo,
- SDVO_CMD_SET_HBUF_TXRATE,
- &tx_rate, 1);
+ return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ SDVO_HBUF_TX_VSYNC,
+ sdvo_data, sizeof(sdvo_data));
}
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
@@ -984,6 +1011,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
return false;
intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
+ intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
return true;
}
@@ -1092,6 +1120,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
+ input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
@@ -2277,10 +2307,8 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
- /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
- * as opposed to native LVDS, where we upscale with the panel-fitter
- * (and hence only the native LVDS resolution could be cloned). */
- intel_sdvo->base.cloneable = true;
+ /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
+ intel_sdvo->base.cloneable = false;
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
@@ -2354,6 +2382,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
return true;
}
+static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
+ struct drm_connector *connector, *tmp;
+
+ list_for_each_entry_safe(connector, tmp,
+ &dev->mode_config.connector_list, head) {
+ if (intel_attached_encoder(connector) == &intel_sdvo->base)
+ intel_sdvo_destroy(connector);
+ }
+}
+
static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
int type)
@@ -2677,7 +2717,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
SDVO_NAME(intel_sdvo));
- goto err;
+ /* Output_setup can leave behind connectors! */
+ goto err_output;
}
/* Only enable the hotplug irq if we need it, to work around noisy
@@ -2690,12 +2731,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
- goto err;
+ goto err_output;
if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
&intel_sdvo->pixel_clock_min,
&intel_sdvo->pixel_clock_max))
- goto err;
+ goto err_output;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
@@ -2715,6 +2756,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
+err_output:
+ intel_sdvo_output_cleanup(intel_sdvo);
+
err:
drm_encoder_cleanup(&intel_encoder->base);
i2c_del_adapter(&intel_sdvo->ddc);
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 9d030142ee43..770bdd6ecd9f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -708,6 +708,8 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_CMD_SET_AUDIO_STAT 0x91
#define SDVO_CMD_GET_AUDIO_STAT 0x92
#define SDVO_CMD_SET_HBUF_INDEX 0x93
+ #define SDVO_HBUF_INDEX_ELD 0
+ #define SDVO_HBUF_INDEX_AVI_IF 1
#define SDVO_CMD_GET_HBUF_INDEX 0x94
#define SDVO_CMD_GET_HBUF_INFO 0x95
#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 1f34549aff18..70586fde69cf 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -39,6 +39,11 @@ nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj)
nv_wo32(gpuobj, i, 0x00000000);
}
+ if (gpuobj->node) {
+ nouveau_mm_free(&nv_gpuobj(gpuobj->parent)->heap,
+ &gpuobj->node);
+ }
+
if (gpuobj->heap.block_size)
nouveau_mm_fini(&gpuobj->heap);
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index bfddf87926dd..a6d3cd6490f7 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -218,13 +218,16 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
- node->offset = roundup(offset, mm->block_size);
- node->length = rounddown(offset + length, mm->block_size) - node->offset;
+
+ if (length) {
+ node->offset = roundup(offset, mm->block_size);
+ node->length = rounddown(offset + length, mm->block_size);
+ node->length -= node->offset;
+ }
list_add_tail(&node->nl_entry, &mm->nodes);
list_add_tail(&node->fl_entry, &mm->free);
mm->heap_nodes++;
- mm->heap_size += length;
return 0;
}
@@ -236,7 +239,7 @@ nouveau_mm_fini(struct nouveau_mm *mm)
int nodes = 0;
list_for_each_entry(node, &mm->nodes, nl_entry) {
- if (nodes++ == mm->heap_nodes)
+ if (WARN_ON(nodes++ == mm->heap_nodes))
return -EBUSY;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 16a9afb1060b..15b182c84ce8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -22,6 +22,8 @@
* Authors: Ben Skeggs
*/
+#include <subdev/bar.h>
+
#include <engine/software.h>
#include <engine/disp.h>
@@ -37,6 +39,7 @@ nv50_disp_sclass[] = {
static void
nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
{
+ struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_disp *disp = &priv->base;
struct nouveau_software_chan *chan, *temp;
unsigned long flags;
@@ -46,19 +49,25 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
if (chan->vblank.crtc != crtc)
continue;
- nv_wr32(priv, 0x001704, chan->vblank.channel);
- nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
-
- if (nv_device(priv)->chipset == 0x50) {
- nv_wr32(priv, 0x001570, chan->vblank.offset);
- nv_wr32(priv, 0x001574, chan->vblank.value);
+ if (nv_device(priv)->chipset >= 0xc0) {
+ nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+ bar->flush(bar);
+ nv_wr32(priv, 0x06000c,
+ upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060010,
+ lower_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060014, chan->vblank.value);
} else {
- if (nv_device(priv)->chipset >= 0xc0) {
- nv_wr32(priv, 0x06000c,
- upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x001704, chan->vblank.channel);
+ nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+ bar->flush(bar);
+ if (nv_device(priv)->chipset == 0x50) {
+ nv_wr32(priv, 0x001570, chan->vblank.offset);
+ nv_wr32(priv, 0x001574, chan->vblank.value);
+ } else {
+ nv_wr32(priv, 0x060010, chan->vblank.offset);
+ nv_wr32(priv, 0x060014, chan->vblank.value);
}
- nv_wr32(priv, 0x060010, chan->vblank.offset);
- nv_wr32(priv, 0x060014, chan->vblank.value);
}
list_del(&chan->vblank.head);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
index e45035efb8ca..7bbb1e1b7a8d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
@@ -669,21 +669,27 @@ nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
});
}
-void
+int
nv40_grctx_init(struct nouveau_device *device, u32 *size)
{
- u32 ctxprog[256], i;
+ u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i;
struct nouveau_grctx ctx = {
.device = device,
.mode = NOUVEAU_GRCTX_PROG,
.data = ctxprog,
- .ctxprog_max = ARRAY_SIZE(ctxprog)
+ .ctxprog_max = 256,
};
+ if (!ctxprog)
+ return -ENOMEM;
+
nv40_grctx_generate(&ctx);
nv_wr32(device, 0x400324, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
nv_wr32(device, 0x400328, ctxprog[i]);
*size = ctx.ctxvals_pos * 4;
+
+ kfree(ctxprog);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 8d0021049ec0..cc6574eeb80e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -156,8 +156,8 @@ nv40_graph_context_ctor(struct nouveau_object *parent,
static int
nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
{
- struct nv04_graph_priv *priv = (void *)object->engine;
- struct nv04_graph_chan *chan = (void *)object;
+ struct nv40_graph_priv *priv = (void *)object->engine;
+ struct nv40_graph_chan *chan = (void *)object;
u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
int ret = 0;
@@ -346,7 +346,9 @@ nv40_graph_init(struct nouveau_object *object)
return ret;
/* generate and upload context program */
- nv40_grctx_init(nv_device(priv), &priv->size);
+ ret = nv40_grctx_init(nv_device(priv), &priv->size);
+ if (ret)
+ return ret;
/* No context present currently */
nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
index d2ac975afc2e..7da35a4e7970 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -15,7 +15,7 @@ nv44_graph_class(void *priv)
return !(0x0baf & (1 << (device->chipset & 0x0f)));
}
-void nv40_grctx_init(struct nouveau_device *, u32 *size);
+int nv40_grctx_init(struct nouveau_device *, u32 *size);
void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index 12418574efea..f7c581ad1991 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -38,7 +38,7 @@ struct nv40_mpeg_priv {
};
struct nv40_mpeg_chan {
- struct nouveau_mpeg base;
+ struct nouveau_mpeg_chan base;
};
/*******************************************************************************
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
index 9ee9bf4028ca..975137ba34a6 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -19,7 +19,6 @@ struct nouveau_mm {
u32 block_size;
int heap_nodes;
- u32 heap_size;
};
int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 818feabbf4a0..486f1a9217fd 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -175,14 +175,18 @@ nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
return temp;
}
-static inline bool
-nv_strncmp(void *obj, u32 addr, u32 len, const char *str)
+static inline int
+nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
{
+ unsigned char c1, c2;
+
while (len--) {
- if (nv_ro08(obj, addr++) != *(str++))
- return false;
+ c1 = nv_ro08(obj, addr++);
+ c2 = *(str++);
+ if (c1 != c2)
+ return c1 - c2;
}
- return true;
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index 39e73b91d360..41b7a6a76f19 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -54,6 +54,7 @@ int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
int clk, struct nouveau_pll_vals *);
int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
struct nouveau_pll_vals *);
-
+int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
+ int clk, struct nouveau_pll_vals *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index dcb5c2befc92..70ca7d5a1aa1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -72,7 +72,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
}
data = of_get_property(dn, "NVDA,BMP", &size);
- if (data) {
+ if (data && size) {
bios->size = size;
bios->data = kmalloc(bios->size, GFP_KERNEL);
if (bios->data)
@@ -104,6 +104,9 @@ nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
goto out;
bios->size = nv_rd08(bios, 0x700002) * 512;
+ if (!bios->size)
+ goto out;
+
bios->data = kmalloc(bios->size, GFP_KERNEL);
if (bios->data) {
for (i = 0; i < bios->size; i++)
@@ -155,6 +158,9 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
/* read entire bios image to system memory */
bios->size = nv_rd08(bios, 0x300002) * 512;
+ if (!bios->size)
+ goto out;
+
bios->data = kmalloc(bios->size, GFP_KERNEL);
if (bios->data) {
for (i = 0; i < bios->size; i++)
@@ -186,14 +192,22 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
{
struct pci_dev *pdev = nv_device(bios)->pdev;
int ret, cnt, i;
- u8 data[3];
- if (!nouveau_acpi_rom_supported(pdev))
+ if (!nouveau_acpi_rom_supported(pdev)) {
+ bios->data = NULL;
return;
+ }
bios->size = 0;
- if (nouveau_acpi_get_bios_chunk(data, 0, 3) == 3)
- bios->size = data[2] * 512;
+ bios->data = kmalloc(4096, GFP_KERNEL);
+ if (bios->data) {
+ if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096)
+ bios->size = bios->data[2] * 512;
+ kfree(bios->data);
+ }
+
+ if (!bios->size)
+ return;
bios->data = kmalloc(bios->size, GFP_KERNEL);
for (i = 0; bios->data && i < bios->size; i += cnt) {
@@ -229,12 +243,14 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios)
static int
nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
{
- if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
+ if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 ||
+ bios->data[1] != 0xAA) {
nv_info(bios, "... signature not found\n");
return 0;
}
- if (nvbios_checksum(bios->data, bios->data[2] * 512)) {
+ if (nvbios_checksum(bios->data,
+ min_t(u32, bios->data[2] * 512, bios->size))) {
nv_info(bios, "... checksum invalid\n");
/* if a ro image is somewhat bad, it's probably all rubbish */
return writeable ? 2 : 1;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 9ed6e728a94c..c51197157749 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -43,7 +43,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
*ver = nv_ro08(bios, dcb);
if (*ver >= 0x41) {
- nv_warn(bios, "DCB *ver 0x%02x unknown\n", *ver);
+ nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
return 0x0000;
} else
if (*ver >= 0x30) {
@@ -64,7 +64,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
} else
if (*ver >= 0x15) {
- if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) {
+ if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
u16 i2c = nv_ro16(bios, dcb + 2);
*hdr = 4;
*cnt = (i2c - dcb) / 10;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
index 5e5f4cddae3c..f835501203e5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -157,11 +157,10 @@ pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
while (map->reg) {
if (map->reg == reg && *ver >= 0x20) {
u16 addr = (data += hdr);
+ *type = map->type;
while (cnt--) {
- if (nv_ro32(bios, data) == map->reg) {
- *type = map->type;
+ if (nv_ro32(bios, data) == map->reg)
return data;
- }
data += *len;
}
return addr;
@@ -200,11 +199,10 @@ pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
while (map->reg) {
if (map->type == type && *ver >= 0x20) {
u16 addr = (data += hdr);
+ *reg = map->reg;
while (cnt--) {
- if (nv_ro32(bios, data) == map->reg) {
- *reg = map->reg;
+ if (nv_ro32(bios, data) == map->reg)
return data;
- }
data += *len;
}
return addr;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index cc8d7d162d7c..9068c98b96f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -66,6 +66,24 @@ nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
return ret;
}
+int
+nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
+ int clk, struct nouveau_pll_vals *pv)
+{
+ int ret, N, M, P;
+
+ ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P);
+
+ if (ret > 0) {
+ pv->refclk = info->refclk;
+ pv->N1 = N;
+ pv->M1 = M;
+ pv->log2P = P;
+ }
+ return ret;
+}
+
+
static int
nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -80,6 +98,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.pll_set = nva3_clock_pll_set;
+ priv->base.pll_calc = nva3_clock_pll_calc;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index 5ccce0b17bf3..f6962c9b6c36 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -79,6 +79,7 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.pll_set = nvc0_clock_pll_set;
+ priv->base.pll_calc = nva3_clock_pll_calc;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 436e9efe7ef5..5f570806143a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -219,13 +219,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
((priv->base.ram.size & 0x000000ff) << 32);
tags = nv_rd32(priv, 0x100320);
- if (tags) {
- ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
- if (ret)
- return ret;
+ ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
+ if (ret)
+ return ret;
- nv_debug(priv, "%d compression tags\n", tags);
- }
+ nv_debug(priv, "%d compression tags\n", tags);
size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
switch (device->chipset) {
@@ -237,6 +235,7 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
+ priv->base.ram.type = NV_MEM_TYPE_STOLEN;
break;
default:
ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
@@ -277,7 +276,6 @@ nv50_fb_dtor(struct nouveau_object *object)
__free_page(priv->r100c08_page);
}
- nouveau_mm_fini(&priv->base.vram);
nouveau_fb_destroy(&priv->base);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 3d2c88310f98..dbfc2abf0cfe 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -292,7 +292,7 @@ nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
case DCB_I2C_NVIO_BIT:
port->drive = info.drive & 0x0f;
if (device->card_type < NV_D0) {
- if (info.drive >= ARRAY_SIZE(nv50_i2c_port))
+ if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
break;
port->drive = nv50_i2c_port[port->drive];
port->sense = port->drive;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index b29237970fa0..523178685180 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -134,7 +134,7 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
end = ptimer->read(ptimer);
if (cycles == 5) {
- tach = (u64)60000000000;
+ tach = (u64)60000000000ULL;
do_div(tach, (end - start));
return tach;
} else
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
index 0203e1e12caa..9474cfca6e4c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -67,7 +67,7 @@ nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
static void
nv41_vm_flush(struct nouveau_vm *vm)
{
- struct nv04_vm_priv *priv = (void *)vm->vmm;
+ struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
mutex_lock(&nv_subdev(priv)->mutex);
nv_wr32(priv, 0x100810, 0x00000022);
@@ -92,7 +92,8 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_vmmgr_priv *priv;
int ret;
- if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
+ if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
+ !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
data, size, pobject);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
index 0ac18d05a146..aa8131436e3d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -163,7 +163,8 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_vmmgr_priv *priv;
int ret;
- if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
+ if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
+ !nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
data, size, pobject);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index cc79c796afee..cbf1fc60a386 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -241,6 +241,10 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (unlikely(!abi16))
return -ENOMEM;
+
+ if (!drm->channel)
+ return nouveau_abi16_put(abi16, -ENODEV);
+
client = nv_client(abi16->client);
if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 259e5f1adf47..35ac57f0aab6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -456,6 +456,7 @@ static struct ttm_tt *
nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
uint32_t page_flags, struct page *dummy_read)
{
+#if __OS_HAS_AGP
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct drm_device *dev = drm->dev;
@@ -463,6 +464,7 @@ nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
page_flags, dummy_read);
}
+#endif
return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 9a6e2cb282dc..d3595b23434a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -355,7 +355,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
* valid - it's not (rh#613284)
*/
if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
- if (!(nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
+ if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
status = connector_status_connected;
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8f98e5a8c488..86124b131f4f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -290,6 +290,7 @@ nouveau_display_create(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_disp *pdisp = nouveau_disp(drm->device);
struct nouveau_display *disp;
+ u32 pclass = dev->pdev->class >> 8;
int ret, gen;
disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
@@ -360,23 +361,27 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
- if (nv_device(drm->device)->card_type < NV_50)
- ret = nv04_display_create(dev);
- else
- if (nv_device(drm->device)->card_type < NV_D0)
- ret = nv50_display_create(dev);
- else
- ret = nvd0_display_create(dev);
- if (ret)
- goto disp_create_err;
-
- if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (nouveau_modeset == 1 ||
+ (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) {
+ if (nv_device(drm->device)->card_type < NV_50)
+ ret = nv04_display_create(dev);
+ else
+ if (nv_device(drm->device)->card_type < NV_D0)
+ ret = nv50_display_create(dev);
+ else
+ ret = nvd0_display_create(dev);
if (ret)
- goto vblank_err;
+ goto disp_create_err;
+
+ if (dev->mode_config.num_crtc) {
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret)
+ goto vblank_err;
+ }
+
+ nouveau_backlight_init(dev);
}
- nouveau_backlight_init(dev);
return 0;
vblank_err:
@@ -395,7 +400,8 @@ nouveau_display_destroy(struct drm_device *dev)
nouveau_backlight_exit(dev);
drm_vblank_cleanup(dev);
- disp->dtor(dev);
+ if (disp->dtor)
+ disp->dtor(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
@@ -530,9 +536,11 @@ nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
if (ret)
goto fail;
- ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
- if (ret)
- goto fail_unreserve;
+ if (likely(old_bo != new_bo)) {
+ ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
+ if (ret)
+ goto fail_unreserve;
+ }
return 0;
@@ -551,8 +559,10 @@ nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
nouveau_bo_fence(new_bo, fence);
ttm_bo_unreserve(&new_bo->bo);
- nouveau_bo_fence(old_bo, fence);
- ttm_bo_unreserve(&old_bo->bo);
+ if (likely(old_bo != new_bo)) {
+ nouveau_bo_fence(old_bo, fence);
+ ttm_bo_unreserve(&old_bo->bo);
+ }
nouveau_bo_unpin(old_bo);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ccae8c26ae2b..8503b2ea570a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -63,8 +63,9 @@ MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
static int nouveau_noaccel = 0;
module_param_named(noaccel, nouveau_noaccel, int, 0400);
-MODULE_PARM_DESC(modeset, "enable driver");
-static int nouveau_modeset = -1;
+MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
+ "0 = disabled, 1 = enabled, 2 = headless)");
+int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
static struct drm_driver driver;
@@ -128,7 +129,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
/* initialise synchronisation routines */
if (device->card_type < NV_10) ret = nv04_fence_create(drm);
- else if (device->chipset < 0x84) ret = nv10_fence_create(drm);
+ else if (device->card_type < NV_50) ret = nv10_fence_create(drm);
+ else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
else ret = nvc0_fence_create(drm);
if (ret) {
@@ -363,7 +365,8 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_pm_fini(dev);
- nouveau_display_fini(dev);
+ if (dev->mode_config.num_crtc)
+ nouveau_display_fini(dev);
nouveau_display_destroy(dev);
nouveau_irq_fini(dev);
@@ -403,13 +406,15 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pm_state.event == PM_EVENT_PRETHAW)
return 0;
- NV_INFO(drm, "suspending fbcon...\n");
- nouveau_fbcon_set_suspend(dev, 1);
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "suspending fbcon...\n");
+ nouveau_fbcon_set_suspend(dev, 1);
- NV_INFO(drm, "suspending display...\n");
- ret = nouveau_display_suspend(dev);
- if (ret)
- return ret;
+ NV_INFO(drm, "suspending display...\n");
+ ret = nouveau_display_suspend(dev);
+ if (ret)
+ return ret;
+ }
NV_INFO(drm, "evicting buffers...\n");
ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
@@ -445,8 +450,10 @@ fail_client:
nouveau_client_init(&cli->base);
}
- NV_INFO(drm, "resuming display...\n");
- nouveau_display_resume(dev);
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "resuming display...\n");
+ nouveau_display_resume(dev);
+ }
return ret;
}
@@ -486,8 +493,10 @@ nouveau_drm_resume(struct pci_dev *pdev)
nouveau_irq_postinstall(dev);
nouveau_pm_resume(dev);
- NV_INFO(drm, "resuming display...\n");
- nouveau_display_resume(dev);
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "resuming display...\n");
+ nouveau_display_resume(dev);
+ }
return 0;
}
@@ -662,9 +671,7 @@ nouveau_drm_init(void)
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force())
nouveau_modeset = 0;
- else
#endif
- nouveau_modeset = 1;
}
if (!nouveau_modeset)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 819471217546..a10169927086 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -141,4 +141,6 @@ int nouveau_drm_resume(struct pci_dev *);
nv_info((cli), fmt, ##args); \
} while (0)
+extern int nouveau_modeset;
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 9ca8afdb5549..1d8cb506a28a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -61,13 +61,15 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
nv_subdev(pmc)->intr(nv_subdev(pmc));
- if (device->card_type >= NV_D0) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nvd0_display_intr(dev);
- } else
- if (device->card_type >= NV_50) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nv50_display_intr(dev);
+ if (dev->mode_config.num_crtc) {
+ if (device->card_type >= NV_D0) {
+ if (nv_rd32(device, 0x000100) & 0x04000000)
+ nvd0_display_intr(dev);
+ } else
+ if (device->card_type >= NV_50) {
+ if (nv_rd32(device, 0x000100) & 0x04000000)
+ nv50_display_intr(dev);
+ }
}
return IRQ_HANDLED;
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 0bf64c90aa20..5566172774df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -52,7 +52,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_pm *pm = nouveau_pm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm);
+ struct nouveau_therm *therm = nouveau_therm(drm->device);
int ret;
/*XXX: not on all boards, we should control based on temperature
@@ -64,7 +64,6 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
ret = therm->fan_set(therm, perflvl->fanspeed);
if (ret && ret != -ENODEV) {
NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
- return ret;
}
}
@@ -706,8 +705,7 @@ nouveau_hwmon_init(struct drm_device *dev)
struct device *hwmon_dev;
int ret = 0;
- if (!therm || !therm->temp_get || !therm->attr_get ||
- !therm->attr_set || therm->temp_get(therm) < 0)
+ if (!therm || !therm->temp_get || !therm->attr_get || !therm->attr_set)
return -ENODEV;
hwmon_dev = hwmon_device_register(&dev->pdev->dev);
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 347a3bd78d04..64f7020fb605 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -220,7 +220,7 @@ out:
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
if (blue == 0x18) {
- NV_INFO(drm, "Load detected on head A\n");
+ NV_DEBUG(drm, "Load detected on head A\n");
return connector_status_connected;
}
@@ -338,8 +338,8 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
if (nv17_dac_sample_load(encoder) &
NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
- NV_INFO(drm, "Load detected on output %c\n",
- '@' + ffs(dcb->or));
+ NV_DEBUG(drm, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
return connector_status_connected;
} else {
return connector_status_disconnected;
@@ -413,9 +413,9 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
- NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
- nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
@@ -461,8 +461,8 @@ static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
return;
nv_encoder->last_dpms = mode;
- NV_INFO(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index da55d7642c8c..184cdf806761 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -476,9 +476,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
- NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
- nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
@@ -520,8 +520,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
return;
nv_encoder->last_dpms = mode;
- NV_INFO(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
if (was_powersaving && is_powersaving_dpms(mode))
return;
@@ -565,8 +565,8 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
return;
nv_encoder->last_dpms = mode;
- NV_INFO(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 099fbeda6e2e..62e826a139b3 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -75,8 +75,8 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
uint8_t crtc1A;
- NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
@@ -167,9 +167,8 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
helper->dpms(encoder, DRM_MODE_DPMS_ON);
- NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
- '@' + ffs(nv_encoder->dcb->or));
+ NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv04_tv_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 96184d02c8d9..3bce0299f64a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1690,41 +1690,49 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
}
/* all other cases */
pll_in_use = radeon_get_pll_use_mask(crtc);
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
- } else {
- if (ASIC_IS_AVIVO(rdev)) {
- /* in DP mode, the DP ref clock can come from either PPLL
- * depending on the asic:
- * DCE3: PPLL1 or PPLL2
- */
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
- /* use the same PPLL for all DP monitors */
- pll = radeon_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = radeon_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- /* all other cases */
- pll_in_use = radeon_get_pll_use_mask(crtc);
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ /* in DP mode, the DP ref clock can come from either PPLL
+ * depending on the asic:
+ * DCE3: PPLL1 or PPLL2
+ */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ /* use the same PPLL for all DP monitors */
+ pll = radeon_get_shared_dp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ } else {
+ /* use the same PPLL for all monitors with the same clock */
+ pll = radeon_get_shared_nondp_ppll(crtc);
+ if (pll != ATOM_PPLL_INVALID)
+ return pll;
+ }
+ /* all other cases */
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ /* the order shouldn't matter here, but we probably
+ * need this until we have atomic modeset
+ */
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ } else {
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- } else {
- /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
- return radeon_crtc->crtc_id;
}
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
+ } else {
+ /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
+ return radeon_crtc->crtc_id;
}
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 49cbb3795a10..010bae19554a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -184,6 +184,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig;
u8 backlight_level;
+ char bl_name[16];
if (!radeon_encoder->enc_priv)
return;
@@ -203,7 +204,9 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
- bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+ snprintf(bl_name, sizeof(bl_name),
+ "radeon_bl%d", dev->primary->index);
+ bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_atom_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
@@ -1622,7 +1625,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
/* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a1f49c5fd74b..219942c660d7 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1330,6 +1330,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
+ } else {
+ save->crtc_enabled[i] = false;
}
}
@@ -1372,7 +1374,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
for (i = 0; i < rdev->num_crtc; i++) {
- if (save->crtc_enabled) {
+ if (save->crtc_enabled[i]) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
@@ -3431,9 +3433,14 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
if (!(mask & DRM_PCIE_SPEED_50))
return;
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if (speed_cntl & LC_CURRENT_DATA_RATE) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+ }
+
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
- speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 573ed1bc6cf7..c042e497e450 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -264,7 +264,7 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
/* macro tile width & height */
palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
- mtileb = (palign / 8) * (halign / 8) * tileb;;
+ mtileb = (palign / 8) * (halign / 8) * tileb;
mtile_pr = surf->nbx / palign;
mtile_ps = (mtile_pr * surf->nby) / halign;
surf->layer_size = mtile_ps * mtileb * slice_pt;
@@ -2725,6 +2725,9 @@ static bool evergreen_vm_reg_valid(u32 reg)
/* check config regs */
switch (reg) {
case GRBM_GFX_INDEX:
+ case CP_STRMOUT_CNTL:
+ case CP_COHER_CNTL:
+ case CP_COHER_SIZE:
case VGT_VTX_VECT_EJECT_REG:
case VGT_CACHE_INVALIDATION:
case VGT_GS_VERTEX_REUSE:
@@ -2829,6 +2832,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
return true;
default:
+ DRM_ERROR("Invalid register 0x%x in CS\n", reg);
return false;
}
}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index df542f1a5dfb..2bc0f6a1b428 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -91,6 +91,10 @@
#define FB_READ_EN (1 << 0)
#define FB_WRITE_EN (1 << 1)
+#define CP_STRMOUT_CNTL 0x84FC
+
+#define CP_COHER_CNTL 0x85F0
+#define CP_COHER_SIZE 0x85F4
#define CP_COHER_BASE 0x85F8
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8bcb554ea0c5..81e6a568c29d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -770,9 +770,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
WREG32(0x15DC, 0);
/* empty context1-7 */
+ /* Assign the pt base to something valid for now; the pts used for
+ * the VMs are determined by the application and setup and assigned
+ * on the fly in the vm part of radeon_gart.c
+ */
for (i = 1; i < 8; i++) {
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->gart.table_addr >> 12);
}
@@ -1534,26 +1538,31 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
- int i;
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- for (i = 0; i < count; ++i) {
- uint64_t value = 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- addr += incr;
-
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- addr += incr;
- }
+ while (count) {
+ unsigned ndw = 1 + count * 2;
+ if (ndw > 0x3FFF)
+ ndw = 0x3FFF;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+ uint64_t value = 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ addr += incr;
+
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ addr += incr;
+ }
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
}
}
@@ -1572,12 +1581,6 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
if (vm == NULL)
return;
- radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0));
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0));
- radeon_ring_write(ring, vm->last_pfn);
-
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
@@ -1588,4 +1591,8 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm->id);
+
+ /* sync PFP to ME, otherwise we might get invalid PFP reads */
+ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ radeon_ring_write(ring, 0x0);
}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2423d1b5d385..cbef6815907a 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -502,6 +502,7 @@
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 70c800ff6190..cda280d157da 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3703,6 +3703,12 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if (!(mask & DRM_PCIE_SPEED_50))
return;
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+ if (speed_cntl & LC_CURRENT_DATA_RATE) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+ }
+
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
/* 55 nm r6xx asics */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b04c06444d8b..8c42d54c2e26 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -663,9 +663,14 @@ struct radeon_vm {
struct list_head list;
struct list_head va;
unsigned id;
- unsigned last_pfn;
- u64 pd_gpu_addr;
- struct radeon_sa_bo *sa_bo;
+
+ /* contains the page directory */
+ struct radeon_sa_bo *page_directory;
+ uint64_t pd_gpu_addr;
+
+ /* array of page tables, one for each page directory entry */
+ struct radeon_sa_bo **page_tables;
+
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@@ -1843,9 +1848,10 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
-int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
void radeon_vm_fence(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index b0a5688c67f8..196d28d99570 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -201,7 +201,7 @@ static int radeon_atif_verify_interface(acpi_handle handle,
size = *(u16 *) info->buffer.pointer;
if (size < 12) {
- DRM_INFO("ATIF buffer is too small: %lu\n", size);
+ DRM_INFO("ATIF buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
@@ -370,6 +370,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
radeon_set_backlight_level(rdev, enc, req.backlight_level);
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *dig = enc->enc_priv;
backlight_force_update(dig->bl_dev,
@@ -379,6 +380,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
backlight_force_update(dig->bl_dev,
BACKLIGHT_UPDATE_HOTKEY);
}
+#endif
}
}
/* TODO: check other events */
@@ -485,7 +487,7 @@ static int radeon_atcs_verify_interface(acpi_handle handle,
size = *(u16 *) info->buffer.pointer;
if (size < 8) {
- DRM_INFO("ATCS buffer is too small: %lu\n", size);
+ DRM_INFO("ATCS buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 10ea17a6b2a6..42433344cb1b 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -69,9 +69,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
/* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
{ PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
PCI_VENDOR_ID_DELL, 0x00e3, 2},
- /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
+ /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
PCI_VENDOR_ID_DELL, 0x0149, 1},
+ /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
+ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+ PCI_VENDOR_ID_IBM, 0x0531, 1},
/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
0x1025, 0x0061, 1},
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 582e99449c12..15f5ded65e0c 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -87,7 +87,7 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
atpx_arg_elements[1].integer.value = 0;
}
- status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer);
+ status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
/* Fail only if calling the method fails and ATPX is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -148,7 +148,7 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
size = *(u16 *) info->buffer.pointer;
if (size < 8) {
- printk("ATPX buffer is too small: %lu\n", size);
+ printk("ATPX buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
@@ -352,9 +352,9 @@ static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
}
/**
- * radeon_atpx_switchto - switch to the requested GPU
+ * radeon_atpx_power_state - power down/up the requested GPU
*
- * @id: GPU to switch to
+ * @id: GPU to power down/up
* @state: requested power state (0 = off, 1 = on)
*
* Execute the necessary ATPX function to power down/up the discrete GPU
@@ -373,11 +373,11 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
}
/**
- * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles
+ * radeon_atpx_pci_probe_handle - look up the ATPX handle
*
* @pdev: pci device
*
- * Look up the ATPX and ATRM handles (all asics).
+ * Look up the ATPX handles (all asics).
* Returns true if the handles are found, false if not.
*/
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 67cfc1795ecd..b884c362a8c2 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -941,7 +941,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct drm_mode_object *obj;
int i;
enum drm_connector_status ret = connector_status_disconnected;
- bool dret = false;
+ bool dret = false, broken_edid = false;
if (!force && radeon_check_hpd_status_unchanged(connector))
return connector->status;
@@ -965,6 +965,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected;
DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
radeon_connector->ddc_bus = NULL;
+ } else {
+ ret = connector_status_connected;
+ broken_edid = true; /* defer use_digital to later */
}
} else {
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
@@ -1047,13 +1050,24 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
encoder_funcs = encoder->helper_private;
if (encoder_funcs->detect) {
- if (ret != connector_status_connected) {
- ret = encoder_funcs->detect(encoder, connector);
- if (ret == connector_status_connected) {
- radeon_connector->use_digital = false;
+ if (!broken_edid) {
+ if (ret != connector_status_connected) {
+ /* deal with analog monitors without DDC */
+ ret = encoder_funcs->detect(encoder, connector);
+ if (ret == connector_status_connected) {
+ radeon_connector->use_digital = false;
+ }
+ if (ret != connector_status_disconnected)
+ radeon_connector->detected_by_load = true;
}
- if (ret != connector_status_disconnected)
- radeon_connector->detected_by_load = true;
+ } else {
+ enum drm_connector_status lret;
+ /* assume digital unless load detected otherwise */
+ radeon_connector->use_digital = true;
+ lret = encoder_funcs->detect(encoder, connector);
+ DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+ if (lret == connector_status_connected)
+ radeon_connector->use_digital = false;
}
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index cb7b7c062fef..41672cc563fb 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -478,6 +478,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
}
out:
+ radeon_vm_add_to_lru(rdev, vm);
mutex_unlock(&vm->mutex);
mutex_unlock(&rdev->vm_manager.lock);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 64a42647f08a..e2f5f888c374 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -355,6 +355,8 @@ int radeon_wb_init(struct radeon_device *rdev)
*/
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{
+ uint64_t limit = (uint64_t)radeon_vram_limit << 20;
+
mc->vram_start = base;
if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
@@ -368,8 +370,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
- mc->real_vram_size = radeon_vram_limit;
+ if (limit && limit < mc->real_vram_size)
+ mc->real_vram_size = limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
@@ -835,6 +837,19 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
}
/**
+ * radeon_check_pot_argument - check that argument is a power of two
+ *
+ * @arg: value to check
+ *
+ * Validates that a certain argument is a power of two (all asics).
+ * Returns true if argument is valid.
+ */
+static bool radeon_check_pot_argument(int arg)
+{
+ return (arg & (arg - 1)) == 0;
+}
+
+/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
@@ -845,52 +860,25 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
static void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
- switch (radeon_vram_limit) {
- case 0:
- case 4:
- case 8:
- case 16:
- case 32:
- case 64:
- case 128:
- case 256:
- case 512:
- case 1024:
- case 2048:
- case 4096:
- break;
- default:
+ if (!radeon_check_pot_argument(radeon_vram_limit)) {
dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
radeon_vram_limit);
radeon_vram_limit = 0;
- break;
}
- radeon_vram_limit = radeon_vram_limit << 20;
+
/* gtt size must be power of two and greater or equal to 32M */
- switch (radeon_gart_size) {
- case 4:
- case 8:
- case 16:
+ if (radeon_gart_size < 32) {
dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
radeon_gart_size);
radeon_gart_size = 512;
- break;
- case 32:
- case 64:
- case 128:
- case 256:
- case 512:
- case 1024:
- case 2048:
- case 4096:
- break;
- default:
+
+ } else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
radeon_gart_size = 512;
- break;
}
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
+
/* AGP mode can only be -1, 1, 2, 4, 8 */
switch (radeon_agpmode) {
case -1:
@@ -1018,6 +1006,10 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
/* initialize vm here */
mutex_init(&rdev->vm_manager.lock);
+ /* Adjust VM size here.
+ * Currently set to 4GB ((1 << 20) 4k pages).
+ * Max GPUVM size for cayman and SI is 40 bits.
+ */
rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index f0c06d196b75..4debd60e5aa6 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -355,14 +355,13 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
- rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
- GFP_KERNEL);
+ rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
- rdev->gart.num_cpu_pages, GFP_KERNEL);
+ rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
+ rdev->gart.num_cpu_pages);
if (rdev->gart.pages_addr == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
@@ -388,8 +387,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
- kfree(rdev->gart.pages);
- kfree(rdev->gart.pages_addr);
+ vfree(rdev->gart.pages);
+ vfree(rdev->gart.pages_addr);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
@@ -423,6 +422,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
*/
/**
+ * radeon_vm_num_pde - return the number of page directory entries
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the number of page directory entries (cayman+).
+ */
+static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
+{
+ return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
+}
+
+/**
* radeon_vm_directory_size - returns the size of the page directory in bytes
*
* @rdev: radeon_device pointer
@@ -431,7 +442,7 @@ void radeon_gart_fini(struct radeon_device *rdev)
*/
static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
{
- return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8;
+ return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
}
/**
@@ -451,11 +462,11 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
if (!rdev->vm_manager.enabled) {
/* allocate enough for 2 full VM pts */
- size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
- size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8);
+ size = radeon_vm_directory_size(rdev);
+ size += rdev->vm_manager.max_pfn * 8;
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- size,
+ RADEON_GPU_PAGE_ALIGN(size),
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -476,7 +487,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
/* restore page table */
list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
- if (vm->sa_bo == NULL)
+ if (vm->page_directory == NULL)
continue;
list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -500,16 +511,25 @@ static void radeon_vm_free_pt(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va;
+ int i;
- if (!vm->sa_bo)
+ if (!vm->page_directory)
return;
list_del_init(&vm->list);
- radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
+ radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false;
}
+
+ if (vm->page_tables == NULL)
+ return;
+
+ for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
+ radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
+
+ kfree(vm->page_tables);
}
/**
@@ -546,63 +566,106 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
}
/**
+ * radeon_vm_evict - evict page table to make room for new one
+ *
+ * @rdev: radeon_device pointer
+ * @vm: VM we want to allocate something for
+ *
+ * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
+ * Returns 0 for success, -ENOMEM for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_vm *vm_evict;
+
+ if (list_empty(&rdev->vm_manager.lru_vm))
+ return -ENOMEM;
+
+ vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
+ struct radeon_vm, list);
+ if (vm_evict == vm)
+ return -ENOMEM;
+
+ mutex_lock(&vm_evict->mutex);
+ radeon_vm_free_pt(rdev, vm_evict);
+ mutex_unlock(&vm_evict->mutex);
+ return 0;
+}
+
+/**
* radeon_vm_alloc_pt - allocates a page table for a VM
*
* @rdev: radeon_device pointer
* @vm: vm to bind
*
* Allocate a page table for the requested vm (cayman+).
- * Also starts to populate the page table.
* Returns 0 for success, error for failure.
*
* Global and local mutex must be locked!
*/
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{
- struct radeon_vm *vm_evict;
- int r;
+ unsigned pd_size, pts_size;
u64 *pd_addr;
- int tables_size;
+ int r;
if (vm == NULL) {
return -EINVAL;
}
- /* allocate enough to cover the current VM size */
- tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
- tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
-
- if (vm->sa_bo != NULL) {
- /* update lru */
- list_del_init(&vm->list);
- list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+ if (vm->page_directory != NULL) {
return 0;
}
retry:
- r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
- tables_size, RADEON_GPU_PAGE_SIZE, false);
+ pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+ r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+ &vm->page_directory, pd_size,
+ RADEON_GPU_PAGE_SIZE, false);
if (r == -ENOMEM) {
- if (list_empty(&rdev->vm_manager.lru_vm)) {
+ r = radeon_vm_evict(rdev, vm);
+ if (r)
return r;
- }
- vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
- mutex_lock(&vm_evict->mutex);
- radeon_vm_free_pt(rdev, vm_evict);
- mutex_unlock(&vm_evict->mutex);
goto retry;
} else if (r) {
return r;
}
- pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo);
- vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
- memset(pd_addr, 0, tables_size);
+ vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
+
+ /* Initially clear the page directory */
+ pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
+ memset(pd_addr, 0, pd_size);
+
+ pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
+ vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+
+ if (vm->page_tables == NULL) {
+ DRM_ERROR("Cannot allocate memory for page table array\n");
+ radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+/**
+ * radeon_vm_add_to_lru - add VMs page table to LRU list
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to add to LRU
+ *
+ * Add the allocated page table to the LRU list (cayman+).
+ *
+ * Global mutex must be locked!
+ */
+void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ list_del_init(&vm->list);
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
- return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
- &rdev->ring_tmp_bo.bo->tbo.mem);
}
/**
@@ -793,20 +856,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
mutex_lock(&vm->mutex);
- if (last_pfn > vm->last_pfn) {
- /* release mutex and lock in right order */
- mutex_unlock(&vm->mutex);
- mutex_lock(&rdev->vm_manager.lock);
- mutex_lock(&vm->mutex);
- /* and check again */
- if (last_pfn > vm->last_pfn) {
- /* grow va space 32M by 32M */
- unsigned align = ((32 << 20) >> 12) - 1;
- radeon_vm_free_pt(rdev, vm);
- vm->last_pfn = (last_pfn + align) & ~align;
- }
- mutex_unlock(&rdev->vm_manager.lock);
- }
head = &vm->va;
last_offset = 0;
list_for_each_entry(tmp, &vm->va, vm_list) {
@@ -865,6 +914,154 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
}
/**
+ * radeon_vm_update_pdes - make sure that page directory is valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_update_pdes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint64_t start, uint64_t end)
+{
+ static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
+
+ uint64_t last_pde = ~0, last_pt = ~0;
+ unsigned count = 0;
+ uint64_t pt_idx;
+ int r;
+
+ start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+ end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+
+ /* walk over the address space and update the page directory */
+ for (pt_idx = start; pt_idx <= end; ++pt_idx) {
+ uint64_t pde, pt;
+
+ if (vm->page_tables[pt_idx])
+ continue;
+
+retry:
+ r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+ &vm->page_tables[pt_idx],
+ RADEON_VM_PTE_COUNT * 8,
+ RADEON_GPU_PAGE_SIZE, false);
+
+ if (r == -ENOMEM) {
+ r = radeon_vm_evict(rdev, vm);
+ if (r)
+ return r;
+ goto retry;
+ } else if (r) {
+ return r;
+ }
+
+ pde = vm->pd_gpu_addr + pt_idx * 8;
+
+ pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+
+ if (((last_pde + 8 * count) != pde) ||
+ ((last_pt + incr * count) != pt)) {
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pde,
+ last_pt, count, incr,
+ RADEON_VM_PAGE_VALID);
+ }
+
+ count = 1;
+ last_pde = pde;
+ last_pt = pt;
+ } else {
+ ++count;
+ }
+ }
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
+ incr, RADEON_VM_PAGE_VALID);
+
+ }
+
+ return 0;
+}
+
+/**
+ * radeon_vm_update_ptes - make sure that page tables are valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_update_ptes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
+{
+ static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
+
+ uint64_t last_pte = ~0, last_dst = ~0;
+ unsigned count = 0;
+ uint64_t addr;
+
+ start = start / RADEON_GPU_PAGE_SIZE;
+ end = end / RADEON_GPU_PAGE_SIZE;
+
+ /* walk over the address space and update the page tables */
+ for (addr = start; addr < end; ) {
+ uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
+ unsigned nptes;
+ uint64_t pte;
+
+ if ((addr & ~mask) == (end & ~mask))
+ nptes = end - addr;
+ else
+ nptes = RADEON_VM_PTE_COUNT - (addr & mask);
+
+ pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+ pte += (addr & mask) * 8;
+
+ if ((last_pte + 8 * count) != pte) {
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pte,
+ last_dst, count,
+ RADEON_GPU_PAGE_SIZE,
+ flags);
+ }
+
+ count = nptes;
+ last_pte = pte;
+ last_dst = dst;
+ } else {
+ count += nptes;
+ }
+
+ addr += nptes;
+ dst += nptes * RADEON_GPU_PAGE_SIZE;
+ }
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ }
+}
+
+/**
* radeon_vm_bo_update_pte - map a bo into the vm page table
*
* @rdev: radeon_device pointer
@@ -887,12 +1084,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_semaphore *sem = NULL;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
- uint64_t pe, addr;
- uint64_t pfn;
+ uint64_t addr;
int r;
/* nothing to do if vm isn't bound */
- if (vm->sa_bo == NULL)
+ if (vm->page_directory == NULL)
return 0;
bo_va = radeon_vm_bo_find(vm, bo);
@@ -939,25 +1135,29 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
}
}
- /* estimate number of dw needed */
- /* reserve space for 32-bit padding */
- ndw = 32;
-
nptes = radeon_bo_ngpu_pages(bo);
- pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE);
+ /* assume two extra pdes in case the mapping overlaps the borders */
+ npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
- /* handle cases where a bo spans several pdes */
- npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) -
- (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE;
+ /* estimate number of dw needed */
+ /* semaphore, fence and padding */
+ ndw = 32;
+
+ if (RADEON_VM_BLOCK_SIZE > 11)
+ /* reserve space for one header for every 2k dwords */
+ ndw += (nptes >> 11) * 4;
+ else
+ /* reserve space for one header for
+ every (1 << BLOCK_SIZE) entries */
+ ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
- /* reserve space for one header for every 2k dwords */
- ndw += (nptes >> 11) * 3;
/* reserve space for pte addresses */
ndw += nptes * 2;
/* reserve space for one header for every 2k dwords */
- ndw += (npdes >> 11) * 3;
+ ndw += (npdes >> 11) * 4;
+
/* reserve space for pde addresses */
ndw += npdes * 2;
@@ -971,22 +1171,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
radeon_fence_note_sync(vm->fence, ridx);
}
- /* update page table entries */
- pe = vm->pd_gpu_addr;
- pe += radeon_vm_directory_size(rdev);
- pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8;
-
- radeon_asic_vm_set_page(rdev, pe, addr, nptes,
- RADEON_GPU_PAGE_SIZE, bo_va->flags);
-
- /* update page directory entries */
- addr = pe;
-
- pe = vm->pd_gpu_addr;
- pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
+ r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
- radeon_asic_vm_set_page(rdev, pe, addr, npdes,
- RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID);
+ radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
+ addr, bo_va->flags);
radeon_fence_unref(&vm->fence);
r = radeon_fence_emit(rdev, &vm->fence, ridx);
@@ -997,6 +1189,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
radeon_ring_unlock_commit(rdev, ring);
radeon_semaphore_free(rdev, &sem, vm->fence);
radeon_fence_unref(&vm->last_flush);
+
return 0;
}
@@ -1056,31 +1249,15 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
* @rdev: radeon_device pointer
* @vm: requested vm
*
- * Init @vm (cayman+).
- * Map the IB pool and any other shared objects into the VM
- * by default as it's used by all VMs.
- * Returns 0 for success, error for failure.
+ * Init @vm fields (cayman+).
*/
-int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
- struct radeon_bo_va *bo_va;
- int r;
-
vm->id = 0;
vm->fence = NULL;
- vm->last_pfn = 0;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
-
- /* map the ib pool buffer at 0 in virtual address space, set
- * read only
- */
- bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
- r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
- RADEON_VM_PAGE_READABLE |
- RADEON_VM_PAGE_SNOOPED);
- return r;
}
/**
@@ -1102,17 +1279,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_vm_free_pt(rdev, vm);
mutex_unlock(&rdev->vm_manager.lock);
- /* remove all bo at this point non are busy any more because unbind
- * waited for the last vm fence to signal
- */
- r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (!r) {
- bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
- list_del_init(&bo_va->bo_list);
- list_del_init(&bo_va->vm_list);
- radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
- kfree(bo_va);
- }
if (!list_empty(&vm->va)) {
dev_err(rdev->dev, "still active bo inside vm\n");
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f38fbcc46935..fe5c1f6b7957 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -53,6 +53,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
struct drm_gem_object **obj)
{
struct radeon_bo *robj;
+ unsigned long max_size;
int r;
*obj = NULL;
@@ -60,11 +61,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
+
+ /* maximun bo size is the minimun btw visible vram and gtt size */
+ max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ if (size > max_size) {
+ printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
+ __func__, __LINE__, size >> 20, max_size >> 20);
+ return -ENOMEM;
+ }
+
+retry:
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
if (r) {
- if (r != -ERESTARTSYS)
+ if (r != -ERESTARTSYS) {
+ if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
+ initial_domain |= RADEON_GEM_DOMAIN_GTT;
+ goto retry;
+ }
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
+ }
return r;
}
*obj = &robj->gem_base;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 83b8d8aa71c0..dc781c49b96b 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -419,6 +419,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
+ struct radeon_bo_va *bo_va;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -426,7 +427,15 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
- r = radeon_vm_init(rdev, &fpriv->vm);
+ radeon_vm_init(rdev, &fpriv->vm);
+
+ /* map the ib pool buffer read only into
+ * virtual address space */
+ bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_SNOOPED);
if (r) {
radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);
@@ -454,6 +463,17 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv;
+ struct radeon_bo_va *bo_va;
+ int r;
+
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (!r) {
+ bo_va = radeon_vm_bo_find(&fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ if (bo_va)
+ radeon_vm_bo_rmv(rdev, bo_va);
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ }
radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 5677a424b585..6857cb4efb76 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -295,6 +295,7 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ uint32_t crtc_ext_cntl = 0;
uint32_t mask;
if (radeon_crtc->crtc_id)
@@ -307,6 +308,16 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
RADEON_CRTC_VSYNC_DIS |
RADEON_CRTC_HSYNC_DIS);
+ /*
+ * On all dual CRTC GPUs this bit controls the CRTC of the primary DAC.
+ * Therefore it is set in the DAC DMPS function.
+ * This is different for GPU's with a single CRTC but a primary and a
+ * TV DAC: here it controls the single CRTC no matter where it is
+ * routed. Therefore we set it here.
+ */
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ crtc_ext_cntl = RADEON_CRTC_CRT_ON;
+
switch (mode) {
case DRM_MODE_DPMS_ON:
radeon_crtc->enabled = true;
@@ -317,7 +328,7 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
- WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
+ WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
}
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
@@ -331,7 +342,7 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
else {
WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
RADEON_CRTC_DISP_REQ_EN_B));
- WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask);
+ WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~(mask | crtc_ext_cntl));
}
radeon_crtc->enabled = false;
/* adjust pm to dpms changes AFTER disabling crtcs */
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 92487e614778..f5ba2241dacc 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -269,27 +269,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.disable = radeon_legacy_encoder_disable,
};
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
-static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
-{
- struct radeon_backlight_privdata *pdata = bl_get_data(bd);
- uint8_t level;
-
- /* Convert brightness to hardware level */
- if (bd->props.brightness < 0)
- level = 0;
- else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
- level = RADEON_MAX_BL_LEVEL;
- else
- level = bd->props.brightness;
-
- if (pdata->negative)
- level = RADEON_MAX_BL_LEVEL - level;
-
- return level;
-}
-
u8
radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
{
@@ -331,6 +310,27 @@ radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 leve
radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
}
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
+{
+ struct radeon_backlight_privdata *pdata = bl_get_data(bd);
+ uint8_t level;
+
+ /* Convert brightness to hardware level */
+ if (bd->props.brightness < 0)
+ level = 0;
+ else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
+ level = RADEON_MAX_BL_LEVEL;
+ else
+ level = bd->props.brightness;
+
+ if (pdata->negative)
+ level = RADEON_MAX_BL_LEVEL - level;
+
+ return level;
+}
+
static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
@@ -370,6 +370,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
uint8_t backlight_level;
+ char bl_name[16];
if (!radeon_encoder->enc_priv)
return;
@@ -389,7 +390,9 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
- bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+ snprintf(bl_name, sizeof(bl_name),
+ "radeon_bl%d", dev->primary->index);
+ bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
@@ -534,7 +537,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
break;
}
- WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
+ /* handled in radeon_crtc_dpms() */
+ if (!(rdev->flags & RADEON_SINGLE_CRTC))
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
WREG32(RADEON_DAC_CNTL, dac_cntl);
WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
@@ -659,6 +664,8 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
if (ASIC_IS_R300(rdev))
tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
+ else if (ASIC_IS_RV100(rdev))
+ tmp |= (0x1ac << RADEON_DAC_FORCE_DATA_SHIFT);
else
tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
@@ -668,6 +675,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
WREG32(RADEON_DAC_CNTL, tmp);
+ tmp = dac_macro_cntl;
tmp &= ~(RADEON_DAC_PDWN_R |
RADEON_DAC_PDWN_G |
RADEON_DAC_PDWN_B);
@@ -991,11 +999,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
- if (tmds) {
- if (tmds->i2c_bus)
- radeon_i2c_destroy(tmds->i2c_bus);
- }
+ /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
kfree(radeon_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(radeon_encoder);
@@ -1093,7 +1097,8 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
} else {
if (is_tv)
WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
- else
+ /* handled in radeon_crtc_dpms() */
+ else if (!(rdev->flags & RADEON_SINGLE_CRTC))
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
}
@@ -1417,13 +1422,104 @@ static bool radeon_legacy_tv_detect(struct drm_encoder *encoder,
return found;
}
+static bool radeon_legacy_ext_dac_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ uint32_t gpio_monid, fp2_gen_cntl, disp_output_cntl, crtc2_gen_cntl;
+ uint32_t disp_lin_trans_grph_a, disp_lin_trans_grph_b, disp_lin_trans_grph_c;
+ uint32_t disp_lin_trans_grph_d, disp_lin_trans_grph_e, disp_lin_trans_grph_f;
+ uint32_t tmp, crtc2_h_total_disp, crtc2_v_total_disp;
+ uint32_t crtc2_h_sync_strt_wid, crtc2_v_sync_strt_wid;
+ bool found = false;
+ int i;
+
+ /* save the regs we need */
+ gpio_monid = RREG32(RADEON_GPIO_MONID);
+ fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ disp_lin_trans_grph_a = RREG32(RADEON_DISP_LIN_TRANS_GRPH_A);
+ disp_lin_trans_grph_b = RREG32(RADEON_DISP_LIN_TRANS_GRPH_B);
+ disp_lin_trans_grph_c = RREG32(RADEON_DISP_LIN_TRANS_GRPH_C);
+ disp_lin_trans_grph_d = RREG32(RADEON_DISP_LIN_TRANS_GRPH_D);
+ disp_lin_trans_grph_e = RREG32(RADEON_DISP_LIN_TRANS_GRPH_E);
+ disp_lin_trans_grph_f = RREG32(RADEON_DISP_LIN_TRANS_GRPH_F);
+ crtc2_h_total_disp = RREG32(RADEON_CRTC2_H_TOTAL_DISP);
+ crtc2_v_total_disp = RREG32(RADEON_CRTC2_V_TOTAL_DISP);
+ crtc2_h_sync_strt_wid = RREG32(RADEON_CRTC2_H_SYNC_STRT_WID);
+ crtc2_v_sync_strt_wid = RREG32(RADEON_CRTC2_V_SYNC_STRT_WID);
+
+ tmp = RREG32(RADEON_GPIO_MONID);
+ tmp &= ~RADEON_GPIO_A_0;
+ WREG32(RADEON_GPIO_MONID, tmp);
+
+ WREG32(RADEON_FP2_GEN_CNTL, (RADEON_FP2_ON |
+ RADEON_FP2_PANEL_FORMAT |
+ R200_FP2_SOURCE_SEL_TRANS_UNIT |
+ RADEON_FP2_DVO_EN |
+ R200_FP2_DVO_RATE_SEL_SDR));
+
+ WREG32(RADEON_DISP_OUTPUT_CNTL, (RADEON_DISP_DAC_SOURCE_RMX |
+ RADEON_DISP_TRANS_MATRIX_GRAPHICS));
+
+ WREG32(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_EN |
+ RADEON_CRTC2_DISP_REQ_EN_B));
+
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, 0x00000000);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, 0x000003f0);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, 0x00000000);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, 0x000003f0);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, 0x00000000);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, 0x000003f0);
+
+ WREG32(RADEON_CRTC2_H_TOTAL_DISP, 0x01000008);
+ WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, 0x00000800);
+ WREG32(RADEON_CRTC2_V_TOTAL_DISP, 0x00080001);
+ WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, 0x00000080);
+
+ for (i = 0; i < 200; i++) {
+ tmp = RREG32(RADEON_GPIO_MONID);
+ if (tmp & RADEON_GPIO_Y_0)
+ found = true;
+
+ if (found)
+ break;
+
+ if (!drm_can_sleep())
+ mdelay(1);
+ else
+ msleep(1);
+ }
+
+ /* restore the regs we used */
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, disp_lin_trans_grph_a);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, disp_lin_trans_grph_b);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, disp_lin_trans_grph_c);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, disp_lin_trans_grph_d);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, disp_lin_trans_grph_e);
+ WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, disp_lin_trans_grph_f);
+ WREG32(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp);
+ WREG32(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp);
+ WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid);
+ WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
+ WREG32(RADEON_GPIO_MONID, gpio_monid);
+
+ return found;
+}
+
static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
- uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp;
+ uint32_t crtc2_gen_cntl = 0, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
+ uint32_t gpiopad_a = 0, pixclks_cntl, tmp;
+ uint32_t disp_output_cntl = 0, disp_hw_debug = 0, crtc_ext_cntl = 0;
enum drm_connector_status found = connector_status_disconnected;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
@@ -1460,12 +1556,27 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
return connector_status_disconnected;
}
+ /* R200 uses an external DAC for secondary DAC */
+ if (rdev->family == CHIP_R200) {
+ if (radeon_legacy_ext_dac_detect(encoder, connector))
+ found = connector_status_connected;
+ return found;
+ }
+
/* save the regs we need */
pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
- gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0;
- disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0;
- disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG);
- crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+ } else {
+ if (ASIC_IS_R300(rdev)) {
+ gpiopad_a = RREG32(RADEON_GPIOPAD_A);
+ disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
+ } else {
+ disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+ }
+ crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
+ }
tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
@@ -1474,22 +1585,24 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
| RADEON_PIX2CLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
- if (ASIC_IS_R300(rdev))
- WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
-
- tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
- tmp |= RADEON_CRTC2_CRT2_ON |
- (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
-
- WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
-
- if (ASIC_IS_R300(rdev)) {
- tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
- tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
- WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
+ WREG32(RADEON_CRTC_EXT_CNTL, tmp);
} else {
- tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
- WREG32(RADEON_DISP_HW_DEBUG, tmp);
+ tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
+ tmp |= RADEON_CRTC2_CRT2_ON |
+ (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
+ WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
+
+ if (ASIC_IS_R300(rdev)) {
+ WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
+ tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
+ tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
+ WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
+ } else {
+ tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
+ WREG32(RADEON_DISP_HW_DEBUG, tmp);
+ }
}
tmp = RADEON_TV_DAC_NBLANK |
@@ -1531,14 +1644,19 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
WREG32(RADEON_DAC_CNTL2, dac_cntl2);
WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
- WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
- if (ASIC_IS_R300(rdev)) {
- WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
- WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+ if (rdev->flags & RADEON_SINGLE_CRTC) {
+ WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
} else {
- WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
+ if (ASIC_IS_R300(rdev)) {
+ WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
+ WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
+ } else {
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ }
}
+
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
return found;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8b27dd6e3144..b91118ccef86 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -105,7 +105,6 @@ int radeon_bo_create(struct radeon_device *rdev,
struct radeon_bo *bo;
enum ttm_bo_type type;
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
- unsigned long max_size = 0;
size_t acc_size;
int r;
@@ -121,18 +120,9 @@ int radeon_bo_create(struct radeon_device *rdev,
}
*bo_ptr = NULL;
- /* maximun bo size is the minimun btw visible vram and gtt size */
- max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
- if ((page_align << PAGE_SHIFT) >= max_size) {
- printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
- __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
- return -ENOMEM;
- }
-
acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
sizeof(struct radeon_bo));
-retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
@@ -154,15 +144,6 @@ retry:
acc_size, sg, &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS) {
- if (domain == RADEON_GEM_DOMAIN_VRAM) {
- domain |= RADEON_GEM_DOMAIN_GTT;
- goto retry;
- }
- dev_err(rdev->dev,
- "object_init failed for (%lu, 0x%08X)\n",
- size, domain);
- }
return r;
}
*bo_ptr = bo;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index bba66902c83b..47634f27f2e5 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -305,7 +305,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
#if DRM_DEBUG_CODE
if (ring->count_dw <= 0) {
- DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
}
#endif
ring->ring[ring->wptr++] = v;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index f79633a036c3..4422d630b33b 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2407,12 +2407,13 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
WREG32(0x15DC, 0);
/* empty context1-15 */
- /* FIXME start with 4G, once using 2 level pt switch to full
- * vm size space
- */
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
+ /* Assign the pt base to something valid for now; the pts used for
+ * the VMs are determined by the application and setup and assigned
+ * on the fly in the vm part of radeon_gart.c
+ */
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
@@ -2473,6 +2474,7 @@ static bool si_vm_reg_valid(u32 reg)
/* check config regs */
switch (reg) {
case GRBM_GFX_INDEX:
+ case CP_STRMOUT_CNTL:
case VGT_VTX_VECT_EJECT_REG:
case VGT_CACHE_INVALIDATION:
case VGT_ESGS_RING_SIZE:
@@ -2807,26 +2809,31 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
- int i;
- uint64_t value;
- radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(1)));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe));
- for (i = 0; i < count; ++i) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- addr += incr;
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
+ while (count) {
+ unsigned ndw = 2 + count * 2;
+ if (ndw > 0x3FFE)
+ ndw = 0x3FFE;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1)));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe));
+ for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+ uint64_t value;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
}
}
@@ -2867,6 +2874,10 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm->id);
+
+ /* sync PFP to ME, otherwise we might get invalid PFP reads */
+ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+ radeon_ring_write(ring, 0x0);
}
/*
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7d2a20e56577..a8871afc5b4e 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -424,6 +424,7 @@
# define RDERR_INT_ENABLE (1 << 0)
# define GUI_IDLE_INT_ENABLE (1 << 19)
+#define CP_STRMOUT_CNTL 0x84FC
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index c71d493fd0c5..1c350fc4e449 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -201,6 +201,8 @@ static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
goto done;
}
+ platform_set_drvdata(pdev, sdev);
+
done:
if (ret)
shmob_drm_unload(dev);
@@ -299,11 +301,9 @@ static struct drm_driver shmob_drm_driver = {
#if CONFIG_PM_SLEEP
static int shmob_drm_pm_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *ddev = platform_get_drvdata(pdev);
- struct shmob_drm_device *sdev = ddev->dev_private;
+ struct shmob_drm_device *sdev = dev_get_drvdata(dev);
- drm_kms_helper_poll_disable(ddev);
+ drm_kms_helper_poll_disable(sdev->ddev);
shmob_drm_crtc_suspend(&sdev->crtc);
return 0;
@@ -311,9 +311,7 @@ static int shmob_drm_pm_suspend(struct device *dev)
static int shmob_drm_pm_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *ddev = platform_get_drvdata(pdev);
- struct shmob_drm_device *sdev = ddev->dev_private;
+ struct shmob_drm_device *sdev = dev_get_drvdata(dev);
mutex_lock(&sdev->ddev->mode_config.mutex);
shmob_drm_crtc_resume(&sdev->crtc);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 402ab69f9f99..bf6e4b5a73b5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -580,6 +580,7 @@ retry:
if (unlikely(ret != 0))
return ret;
+retry_reserve:
spin_lock(&glob->lru_lock);
if (unlikely(list_empty(&bo->ddestroy))) {
@@ -587,14 +588,20 @@ retry:
return 0;
}
- ret = ttm_bo_reserve_locked(bo, interruptible,
- no_wait_reserve, false, 0);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- if (unlikely(ret != 0)) {
+ if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
- return ret;
+ if (likely(!no_wait_reserve))
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+
+ goto retry_reserve;
}
+ BUG_ON(ret != 0);
+
/**
* We can re-check for sync object without taking
* the bo::lock since setting the sync object requires
@@ -811,17 +818,14 @@ retry:
no_wait_reserve, no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);
- if (likely(ret == 0 || ret == -ERESTARTSYS))
- return ret;
-
- goto retry;
+ return ret;
}
- ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
- if (likely(!no_wait_gpu))
+ if (likely(!no_wait_reserve))
ret = ttm_bo_wait_unreserved(bo, interruptible);
kref_put(&bo->list_kref, ttm_bo_release_list);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 860dc4813e99..bd2a3b40cd12 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -749,7 +749,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
list_for_each_entry(p, &plist, lru) {
- clear_page(page_address(p));
+ if (PageHighMem(p))
+ clear_highpage(p);
+ else
+ clear_page(page_address(p));
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bf8260133ea9..7d759a430294 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -308,9 +308,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
if (unlikely(to_page == NULL))
goto out_err;
- preempt_disable();
copy_highpage(to_page, from_page);
- preempt_enable();
page_cache_release(from_page);
}
@@ -358,9 +356,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
ret = PTR_ERR(to_page);
goto out_err;
}
- preempt_disable();
copy_highpage(to_page, from_page);
- preempt_enable();
set_page_dirty(to_page);
mark_page_accessed(to_page);
page_cache_release(to_page);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index fccd361f7b50..87aa5f5d3c88 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -104,7 +104,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
- u32 byte_offset, u32 byte_width,
+ u32 byte_offset, u32 device_byte_offset, u32 byte_width,
int *ident_ptr, int *sent_ptr);
int udl_dumb_create(struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 69a2b16f42a6..d4ab3beaada0 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -114,9 +114,10 @@ static void udlfb_dpy_deferred_io(struct fb_info *info,
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
- &urb, (char *) info->fix.smem_start,
- &cmd, cur->index << PAGE_SHIFT,
- PAGE_SIZE, &bytes_identical, &bytes_sent))
+ &urb, (char *) info->fix.smem_start,
+ &cmd, cur->index << PAGE_SHIFT,
+ cur->index << PAGE_SHIFT,
+ PAGE_SIZE, &bytes_identical, &bytes_sent))
goto error;
bytes_rendered += PAGE_SIZE;
}
@@ -187,10 +188,11 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
for (i = y; i < y + height ; i++) {
const int line_offset = fb->base.pitches[0] * i;
const int byte_offset = line_offset + (x * bpp);
-
+ const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
if (udl_render_hline(dev, bpp, &urb,
(char *) fb->obj->vmapping,
- &cmd, byte_offset, width * bpp,
+ &cmd, byte_offset, dev_byte_offset,
+ width * bpp,
&bytes_identical, &bytes_sent))
goto error;
}
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index dc095526ffb7..142fee5f983f 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -213,11 +213,12 @@ static void udl_compress_hline16(
*/
int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
- u32 byte_offset, u32 byte_width,
+ u32 byte_offset, u32 device_byte_offset,
+ u32 byte_width,
int *ident_ptr, int *sent_ptr)
{
const u8 *line_start, *line_end, *next_pixel;
- u32 base16 = 0 + (byte_offset / bpp) * 2;
+ u32 base16 = 0 + (device_byte_offset / bpp) * 2;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 3ce68a2e312d..d1498bfd7873 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -306,7 +306,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
BUG_ON(!atomic_read(&bo->reserved));
BUG_ON(old_mem_type != TTM_PL_VRAM &&
- old_mem_type != VMW_PL_FLAG_GMR);
+ old_mem_type != VMW_PL_GMR);
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
if (pin)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ed3c1e7ddde9..2dd185e42f21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1098,6 +1098,11 @@ static void vmw_pm_complete(struct device *kdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ mutex_lock(&dev_priv->hw_mutex);
+ vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
+ (void) vmw_read(dev_priv, SVGA_REG_ID);
+ mutex_unlock(&dev_priv->hw_mutex);
+
/**
* Reclaim 3d reference held by fbdev and potentially
* start fifo.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b07ca2e4d04b..7290811f89be 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -110,6 +110,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
ret = copy_to_user(buffer, bounce, size);
+ if (ret)
+ ret = -EFAULT;
vfree(bounce);
if (unlikely(ret != 0))