summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_crtc.c10
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c18
-rw-r--r--drivers/gpu/drm/drm_edid.c26
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c21
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c720
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c44
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c34
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c159
-rw-r--r--drivers/gpu/drm/i915/intel_display.c165
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c173
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c133
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c159
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c84
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c77
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_calc.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c35
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c52
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c1
-rw-r--r--drivers/gpu/drm/radeon/atom.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c39
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c309
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600d.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c382
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c97
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c81
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c14
95 files changed, 2389 insertions, 1308 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6985cb1da72c..2baa6708e44c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -156,12 +156,12 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
{ DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
{ DRM_MODE_CONNECTOR_Component, "Component", 0 },
- { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 },
- { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
- { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
};
static struct drm_prop_enum_list drm_encoder_enum_list[] =
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index dcbeb98f195a..bede10a03407 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -241,7 +241,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (!drm_helper_encoder_in_use(encoder)) {
+ if (encoder->crtc && !drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
/* disconnector encoder from any connector */
encoder->crtc = NULL;
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
struct drm_crtc *tmp;
int crtc_mask = 1;
- WARN(!crtc, "checking null crtc?");
+ WARN(!crtc, "checking null crtc?\n");
dev = crtc->dev;
@@ -471,6 +471,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret = 0;
+ int i;
DRM_DEBUG_KMS("\n");
@@ -666,6 +667,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (ret != 0)
goto fail;
}
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+ }
kfree(save_connectors);
kfree(save_encoders);
@@ -841,7 +848,7 @@ static void output_poll_execute(struct work_struct *work)
struct delayed_work *delayed_work = to_delayed_work(work);
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
- enum drm_connector_status old_status, status;
+ enum drm_connector_status old_status;
bool repoll = false, changed = false;
if (!drm_kms_helper_poll)
@@ -866,8 +873,9 @@ static void output_poll_execute(struct work_struct *work)
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- status = connector->funcs->detect(connector, false);
- if (old_status != status)
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("connector status updated to %d\n", connector->status);
+ if (old_status != connector->status)
changed = true;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c1a26217a530..a245d17165ae 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = len,
- .buf = buf + start,
+ .buf = buf,
}
};
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
- int i, j = 0;
+ int i, j = 0, valid_extensions = 0;
u8 *block, *new;
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
- if (drm_do_probe_ddc_edid(adapter, block, j,
- EDID_LENGTH))
+ if (drm_do_probe_ddc_edid(adapter,
+ block + (valid_extensions + 1) * EDID_LENGTH,
+ j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + j * EDID_LENGTH))
+ if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+ valid_extensions++;
break;
+ }
}
if (i == 4)
- goto carp;
+ dev_warn(connector->dev->dev,
+ "%s: Ignoring invalid EDID block %d.\n",
+ drm_get_connector_name(connector), j);
+ }
+
+ if (valid_extensions != block[0x7e]) {
+ block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+ block[0x7e] = valid_extensions;
+ new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!new)
+ goto out;
+ block = new;
}
return block;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index b744dad5c237..a39794bac04b 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -37,7 +37,6 @@
#include "drmP.h"
#include <linux/poll.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
/* from BKL pushdown: note that nothing else serializes idr_find() */
DEFINE_MUTEX(drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 9d3a5030b6e1..16d5155edad1 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -585,10 +585,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
struct timeval now;
unsigned long flags;
unsigned int seq;
+ int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL)
- return -ENOMEM;
+ if (e == NULL) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
e->pipe = pipe;
e->base.pid = current->pid;
@@ -603,9 +606,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- return -ENOMEM;
+ ret = -EBUSY;
+ goto err_unlock;
}
file_priv->event_space -= sizeof e->event;
@@ -626,7 +628,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
- drm_vblank_put(dev, e->pipe);
+ drm_vblank_put(dev, pipe);
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
trace_drm_vblank_event_delivered(current->pid, pipe,
@@ -638,6 +640,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+err_put:
+ drm_vblank_put(dev, pipe);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 7a26f4dd21ae..e6800819bca8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -767,6 +767,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BLT:
value = HAS_BLT(dev);
break;
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3467dd420760..f737960712e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
unsigned int i915_powersave = 1;
-module_param_named(powersave, i915_powersave, int, 0400);
+module_param_named(powersave, i915_powersave, int, 0600);
unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
@@ -150,7 +150,8 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1,
- .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .has_fbc = 0, /* disabled due to buggy hardware */
.has_bsd_ring = 1,
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2c19b6285e..409826da3099 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1045,6 +1045,8 @@ void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
+int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+ bool interruptible);
int i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
@@ -1321,6 +1323,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453208b5..275ec6ed43ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,8 +38,7 @@
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
- bool pipelined);
+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -547,6 +546,19 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj_priv;
int ret = 0;
+ if (args->size == 0)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size))
+ return -EFAULT;
+
+ ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
+ if (ret)
+ return -EFAULT;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -564,23 +576,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (args->size == 0)
- goto out;
-
- if (!access_ok(VERIFY_WRITE,
- (char __user *)(uintptr_t)args->data_ptr,
- args->size)) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
-
ret = i915_gem_object_get_pages_or_evict(obj);
if (ret)
goto out;
@@ -981,7 +976,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_pwrite *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ int ret;
+
+ if (args->size == 0)
+ return 0;
+
+ if (!access_ok(VERIFY_READ,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size))
+ return -EFAULT;
+
+ ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
+ if (ret)
+ return -EFAULT;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -994,30 +1002,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
obj_priv = to_intel_bo(obj);
-
/* Bounds check destination. */
if (args->offset > obj->size || args->size > obj->size - args->offset) {
ret = -EINVAL;
goto out;
}
- if (args->size == 0)
- goto out;
-
- if (!access_ok(VERIFY_READ,
- (char __user *)(uintptr_t)args->data_ptr,
- args->size)) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
-
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
@@ -2172,7 +2162,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- if (list_empty(&ring->gpu_write_list))
+ if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0;
i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2180,7 @@ i915_gpu_idle(struct drm_device *dev)
int ret;
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- list_empty(&dev_priv->bsd_ring.active_list) &&
- list_empty(&dev_priv->blt_ring.active_list));
+ list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return 0;
@@ -2605,7 +2593,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
if (reg->gpu) {
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
@@ -2753,8 +2741,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
/** Flushes any GPU write domain for the object if it's dirty. */
static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
- bool pipelined)
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
@@ -2773,10 +2760,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
obj->read_domains,
old_write_domain);
- if (pipelined)
- return 0;
-
- return i915_gem_object_wait_rendering(obj, true);
+ return 0;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2837,18 +2821,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret != 0)
return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
i915_gem_object_flush_cpu_write_domain(obj);
- if (write) {
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
@@ -2886,7 +2867,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
@@ -2909,6 +2890,20 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
return 0;
}
+int
+i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+ bool interruptible)
+{
+ if (!obj->active)
+ return 0;
+
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
+ 0, obj->base.write_domain);
+
+ return i915_gem_object_wait_rendering(&obj->base, interruptible);
+}
+
/**
* Moves a single object to the CPU read, and possibly write domain.
*
@@ -2921,9 +2916,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret != 0)
return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
i915_gem_object_flush_gtt_write_domain(obj);
@@ -2932,12 +2930,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
- if (write) {
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
@@ -3108,7 +3100,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
* write domain
*/
if (obj->write_domain &&
- obj->write_domain != obj->pending_read_domains) {
+ (obj->write_domain != obj->pending_read_domains ||
+ obj_priv->ring != ring)) {
flush_domains |= obj->write_domain;
invalidate_domains |=
obj->pending_read_domains & ~obj->write_domain;
@@ -3201,9 +3194,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
if (offset == 0 && size == obj->size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret != 0)
return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
@@ -3250,192 +3247,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
return 0;
}
-/**
- * Pin an object to the GTT and evaluate the relocations landing in it.
- */
static int
-i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry)
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_relocation_entry __user *user_relocs;
- struct drm_gem_object *target_obj = NULL;
- uint32_t target_handle = 0;
- int i, ret = 0;
+ struct drm_gem_object *target_obj;
+ uint32_t target_offset;
+ int ret = -EINVAL;
- user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
- uint32_t target_offset;
+ target_obj = drm_gem_object_lookup(dev, file_priv,
+ reloc->target_handle);
+ if (target_obj == NULL)
+ return -ENOENT;
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc))) {
- ret = -EFAULT;
- break;
- }
+ target_offset = to_intel_bo(target_obj)->gtt_offset;
- if (reloc.target_handle != target_handle) {
- drm_gem_object_unreference(target_obj);
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
- target_obj = drm_gem_object_lookup(dev, file_priv,
- reloc.target_handle);
- if (target_obj == NULL) {
- ret = -ENOENT;
- break;
- }
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (target_offset == 0) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc->target_handle);
+ goto err;
+ }
- target_handle = reloc.target_handle;
- }
- target_offset = to_intel_bo(target_obj)->gtt_offset;
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (reloc->write_domain & (reloc->write_domain - 1)) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ goto err;
+ }
+ if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc->read_domains & I915_GEM_DOMAIN_CPU) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ goto err;
+ }
+ if (reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
+ target_obj->pending_write_domain);
+ goto err;
+ }
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc.offset,
- (int) reloc.target_handle,
- (int) reloc.read_domains,
- (int) reloc.write_domain,
- (int) target_offset,
- (int) reloc.presumed_offset,
- reloc.delta);
-#endif
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_offset == 0) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc.target_handle);
- ret = -EINVAL;
- break;
- }
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_offset == reloc->presumed_offset)
+ goto out;
- /* Validate that the target is in a valid r/w GPU domain */
- if (reloc.write_domain & (reloc.write_domain - 1)) {
- DRM_ERROR("reloc with multiple write domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
- ret = -EINVAL;
- break;
- }
- if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
- reloc.read_domains & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("reloc with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
- ret = -EINVAL;
- break;
- }
- if (reloc.write_domain && target_obj->pending_write_domain &&
- reloc.write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.write_domain,
- target_obj->pending_write_domain);
- ret = -EINVAL;
- break;
- }
+ /* Check that the relocation address is valid... */
+ if (reloc->offset > obj->base.size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ goto err;
+ }
+ if (reloc->offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ goto err;
+ }
- target_obj->pending_read_domains |= reloc.read_domains;
- target_obj->pending_write_domain |= reloc.write_domain;
+ /* and points to somewhere within the target object. */
+ if (reloc->delta >= target_obj->size) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta,
+ (int) target_obj->size);
+ goto err;
+ }
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_offset == reloc.presumed_offset)
- continue;
+ reloc->delta += target_offset;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+ char *vaddr;
- /* Check that the relocation address is valid... */
- if (reloc.offset > obj->base.size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset, (int) obj->base.size);
- ret = -EINVAL;
- break;
- }
- if (reloc.offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset);
- ret = -EINVAL;
- break;
- }
+ vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ kunmap_atomic(vaddr);
+ } else {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
- /* and points to somewhere within the target object. */
- if (reloc.delta >= target_obj->size) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.delta, (int) target_obj->size);
- ret = -EINVAL;
- break;
- }
+ ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
+ if (ret)
+ goto err;
- reloc.delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
- uint32_t page_offset = reloc.offset & ~PAGE_MASK;
- char *vaddr;
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += obj->gtt_offset;
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + (reloc->offset & ~PAGE_MASK));
+ iowrite32(reloc->delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+ }
- vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
- *(uint32_t *)(vaddr + page_offset) = reloc.delta;
- kunmap_atomic(vaddr);
- } else {
- uint32_t __iomem *reloc_entry;
- void __iomem *reloc_page;
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
- if (ret)
- break;
+out:
+ ret = 0;
+err:
+ drm_gem_object_unreference(target_obj);
+ return ret;
+}
- /* Map the page containing the relocation we're going to perform. */
- reloc.offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc.offset & PAGE_MASK);
- reloc_entry = (uint32_t __iomem *)
- (reloc_page + (reloc.offset & ~PAGE_MASK));
- iowrite32(reloc.delta, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
- }
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry)
+{
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ int i, ret;
+
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry reloc;
+
+ if (__copy_from_user_inatomic(&reloc,
+ user_relocs+i,
+ sizeof(reloc)))
+ return -EFAULT;
+
+ ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
+ if (ret)
+ return ret;
- /* and update the user's relocation entry */
- reloc.presumed_offset = target_offset;
if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset))) {
- ret = -EFAULT;
- break;
- }
+ &reloc.presumed_offset,
+ sizeof(reloc.presumed_offset)))
+ return -EFAULT;
}
- drm_gem_object_unreference(target_obj);
- return ret;
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ int i, ret;
+
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int
-i915_gem_execbuffer_pin(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object(obj, file,
+ &exec_list[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i, retry;
@@ -3497,6 +3532,133 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
return 0;
}
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ int i, total, ret;
+
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->in_execbuffer = false;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec_list[i].relocation_count;
+
+ reloc = drm_malloc_ab(total, sizeof(*reloc));
+ if (reloc == NULL) {
+ mutex_lock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+ if (copy_from_user(reloc+total, user_relocs,
+ exec_list[i].relocation_count *
+ sizeof(*reloc))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ total += exec_list[i].relocation_count;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ ret = i915_gem_execbuffer_reserve(dev, file,
+ object_list, exec_list,
+ count);
+ if (ret)
+ goto err;
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
+ &exec_list[i],
+ reloc + total);
+ if (ret)
+ goto err;
+
+ total += exec_list[i].relocation_count;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ drm_free_large(reloc);
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring,
+ struct drm_gem_object **objects,
+ int count)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i;
+
+ /* Zero the global flush/invalidate flags. These
+ * will be modified as new domains are computed
+ * for each object
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+ dev_priv->mm.flush_rings = 0;
+ for (i = 0; i < count; i++)
+ i915_gem_object_set_to_gpu_domain(objects[i], ring);
+
+ if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+#endif
+ i915_gem_flush(dev, file,
+ dev->invalidate_domains,
+ dev->flush_domains,
+ dev_priv->mm.flush_rings);
+ }
+
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
+ /* XXX replace with semaphores */
+ if (obj->ring && ring != obj->ring) {
+ ret = i915_gem_object_wait_rendering(&obj->base, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
@@ -3580,8 +3742,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
- size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
+ int length; /* limited by fault_in_pages_readable() */
+
+ /* First check for malicious input causing overflow */
+ if (exec[i].relocation_count >
+ INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ return -EINVAL;
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
@@ -3724,18 +3893,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_pin(dev, file,
- object_list, exec_list,
- args->buffer_count);
+ ret = i915_gem_execbuffer_reserve(dev, file,
+ object_list, exec_list,
+ args->buffer_count);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->base.pending_read_domains = 0;
- obj->base.pending_write_domain = 0;
- ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
+ ret = i915_gem_execbuffer_relocate(dev, file,
+ object_list, exec_list,
+ args->buffer_count);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, file,
+ object_list,
+ exec_list,
+ args->buffer_count);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ }
if (ret)
goto err;
}
@@ -3757,33 +3932,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- /* Zero the global flush/invalidate flags. These
- * will be modified as new domains are computed
- * for each object
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- dev_priv->mm.flush_rings = 0;
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- /* Compute new gpu domains and update invalidate/flush */
- i915_gem_object_set_to_gpu_domain(obj, ring);
- }
-
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev, file,
- dev->invalidate_domains,
- dev->flush_domains,
- dev_priv->mm.flush_rings);
- }
+ ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
+ object_list, args->buffer_count);
+ if (ret)
+ goto err;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4195,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
alignment = i915_gem_get_gtt_alignment(obj);
if (obj_priv->gtt_offset & (alignment - 1)) {
WARN(obj_priv->pin_count,
- "bo is already pinned with incorrect alignment:"
- " offset=%x, req.alignment=%x\n",
+ "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
obj_priv->gtt_offset, alignment);
ret = i915_gem_object_unbind(obj);
if (ret)
@@ -4223,10 +4374,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+ if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(dev, file_priv,
obj_priv->ring,
0, obj->write_domain);
+ } else if (obj_priv->ring->outstanding_lazy_request) {
+ /* This ring is not being cleared by active usage,
+ * so emit a request to do so.
+ */
+ u32 seqno = i915_add_request(dev,
+ NULL, NULL,
+ obj_priv->ring);
+ if (seqno == 0)
+ ret = -ENOMEM;
+ }
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
@@ -4856,17 +5017,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- void *obj_addr;
- int ret;
- char __user *user_data;
+ void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
- DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
- ret = copy_from_user(obj_addr, user_data, args->size);
- if (ret)
- return -EFAULT;
+ if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+ unsigned long unwritten;
+
+ /* The physical object once assigned is fixed for the lifetime
+ * of the obj, so we can safely drop the lock and continue
+ * to access vaddr.
+ */
+ mutex_unlock(&dev->struct_mutex);
+ unwritten = copy_from_user(vaddr, user_data, args->size);
+ mutex_lock(&dev->struct_mutex);
+ if (unwritten)
+ return -EFAULT;
+ }
drm_agp_chipset_flush(dev);
return 0;
@@ -4900,9 +5068,7 @@ i915_gpu_is_active(struct drm_device *dev)
int lists_empty;
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- list_empty(&dev_priv->bsd_ring.active_list) &&
- list_empty(&dev_priv->blt_ring.active_list);
+ list_empty(&dev_priv->mm.active_list);
return !lists_empty;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013f53fa..d8ae7d1d0cc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- list_empty(&dev_priv->bsd_ring.active_list) &&
- list_empty(&dev_priv->blt_ring.active_list));
+ list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return -ENOSPC;
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- list_empty(&dev_priv->bsd_ring.active_list) &&
- list_empty(&dev_priv->blt_ring.active_list));
+ list_empty(&dev_priv->mm.active_list));
BUG_ON(!lists_empty);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 25ed911a3112..878fc766a12c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3033,6 +3033,7 @@
#define TRANS_DP_10BPC (1<<9)
#define TRANS_DP_6BPC (2<<9)
#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_BPC_MASK (3<<9)
#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 989c19d2d959..42729d25da58 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ /* Cursor state */
+ dev_priv->saveCURACNTR = I915_READ(CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+ /* Cursor state */
+ I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+
return;
}
@@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev)
/* Don't save them in KMS mode */
i915_save_modeset_reg(dev);
- /* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(CURAPOS);
- dev_priv->saveCURABASE = I915_READ(CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
-
/* CRT state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
@@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev)
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
- /* Cursor state */
- I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
-
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_enable_drps(dev);
+ intel_init_emon(dev);
+ }
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 65c88f9ba12c..2cb8e0b9f1ee 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -190,37 +190,6 @@ out:
kfree(output.pointer);
}
-static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
-{
- return 0;
-}
-
-static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
- enum vga_switcheroo_state state)
-{
- return 0;
-}
-
-static int intel_dsm_init(void)
-{
- return 0;
-}
-
-static int intel_dsm_get_client_id(struct pci_dev *pdev)
-{
- if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
- return VGA_SWITCHEROO_IGD;
- else
- return VGA_SWITCHEROO_DIS;
-}
-
-static struct vga_switcheroo_handler intel_dsm_handler = {
- .switchto = intel_dsm_switchto,
- .power_state = intel_dsm_power_state,
- .init = intel_dsm_init,
- .get_client_id = intel_dsm_get_client_id,
-};
-
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle, intel_handle;
@@ -276,11 +245,8 @@ void intel_register_dsm_handler(void)
{
if (!intel_dsm_detect())
return;
-
- vga_switcheroo_register_handler(&intel_dsm_handler);
}
void intel_unregister_dsm_handler(void)
{
- vga_switcheroo_unregister_handler();
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c55c77043357..8df574316063 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -34,6 +34,25 @@
#include "i915_drm.h"
#include "i915_drv.h"
+/* Here's the desired hotplug mode */
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+ ADPA_CRT_HOTPLUG_WARMUP_10MS | \
+ ADPA_CRT_HOTPLUG_SAMPLE_4S | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
+ ADPA_CRT_HOTPLUG_VOLREF_325MV | \
+ ADPA_CRT_HOTPLUG_ENABLE)
+
+struct intel_crt {
+ struct intel_encoder base;
+ bool force_hotplug_required;
+};
+
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_crt, base);
+}
+
static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -129,7 +148,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
}
- adpa = 0;
+ adpa = ADPA_HOTPLUG_BITS;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -157,53 +176,44 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 adpa, temp;
+ u32 adpa;
bool ret;
- bool turn_off_dac = false;
- temp = adpa = I915_READ(PCH_ADPA);
+ /* The first time through, trigger an explicit detection cycle */
+ if (crt->force_hotplug_required) {
+ bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ u32 save_adpa;
- if (HAS_PCH_SPLIT(dev))
- turn_off_dac = true;
-
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- if (turn_off_dac)
- adpa &= ~ADPA_DAC_ENABLE;
-
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
-
- adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
- ADPA_CRT_HOTPLUG_WARMUP_10MS |
- ADPA_CRT_HOTPLUG_SAMPLE_4S |
- ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */
- ADPA_CRT_HOTPLUG_VOLREF_325MV |
- ADPA_CRT_HOTPLUG_ENABLE |
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
-
- DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
- I915_WRITE(PCH_ADPA, adpa);
-
- if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000))
- DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
-
- if (turn_off_dac) {
- /* Make sure hotplug is enabled */
- I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
- (void)I915_READ(PCH_ADPA);
+ crt->force_hotplug_required = 0;
+
+ save_adpa = adpa = I915_READ(PCH_ADPA);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ if (turn_off_dac)
+ adpa &= ~ADPA_DAC_ENABLE;
+
+ I915_WRITE(PCH_ADPA, adpa);
+
+ if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+
+ if (turn_off_dac) {
+ I915_WRITE(PCH_ADPA, save_adpa);
+ POSTING_READ(PCH_ADPA);
+ }
}
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
- adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
- if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) ||
- (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO))
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
+ DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
return ret;
}
@@ -277,13 +287,12 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
}
-static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
+static bool intel_crt_detect_ddc(struct intel_crt *crt)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
/* CRT should always be at 0, but check anyway */
- if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
+ if (crt->base.type != INTEL_OUTPUT_ANALOG)
return false;
if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
@@ -291,7 +300,7 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
return true;
}
- if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
+ if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
}
@@ -300,9 +309,9 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
}
static enum drm_connector_status
-intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
+intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
{
- struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_encoder *encoder = &crt->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -434,7 +443,7 @@ static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
@@ -443,28 +452,31 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
- } else
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via hotplug\n");
return connector_status_disconnected;
+ }
}
- if (intel_crt_detect_ddc(&encoder->base))
+ if (intel_crt_detect_ddc(crt))
return connector_status_connected;
if (!force)
return connector->status;
/* for pre-945g platforms use load detect */
- if (encoder->base.crtc && encoder->base.crtc->enabled) {
- status = intel_crt_load_detect(encoder->base.crtc, encoder);
+ crtc = crt->base.base.crtc;
+ if (crtc && crtc->enabled) {
+ status = intel_crt_load_detect(crtc, crt);
} else {
- crtc = intel_get_load_detect_pipe(encoder, connector,
+ crtc = intel_get_load_detect_pipe(&crt->base, connector,
NULL, &dpms_mode);
if (crtc) {
- if (intel_crt_detect_ddc(&encoder->base))
+ if (intel_crt_detect_ddc(crt))
status = connector_status_connected;
else
- status = intel_crt_load_detect(crtc, encoder);
- intel_release_load_detect_pipe(encoder,
+ status = intel_crt_load_detect(crtc, crt);
+ intel_release_load_detect_pipe(&crt->base,
connector, dpms_mode);
} else
status = connector_status_unknown;
@@ -536,17 +548,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
- struct intel_encoder *intel_encoder;
+ struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
- if (!intel_encoder)
+ crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
+ if (!crt)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(crt);
return;
}
@@ -554,20 +566,20 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
+ drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_connector_attach_encoder(intel_connector, &crt->base);
- intel_encoder->type = INTEL_OUTPUT_ANALOG;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ crt->base.type = INTEL_OUTPUT_ANALOG;
+ crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
+ 1 << INTEL_ANALOG_CLONE_BIT |
+ 1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ crt->base.crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
+ drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
@@ -577,5 +589,22 @@ void intel_crt_init(struct drm_device *dev)
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ /*
+ * Configure the automatic hotplug detection stuff
+ */
+ crt->force_hotplug_required = 0;
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(PCH_ADPA);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(PCH_ADPA, adpa);
+ POSTING_READ(PCH_ADPA);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ crt->force_hotplug_required = 1;
+ }
+
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065374b2..d9b7092439ef 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1611,6 +1611,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
wait_event(dev_priv->pending_flip_queue,
atomic_read(&obj_priv->pending_flip) == 0);
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer.
+ */
+ ret = i915_gem_object_flush_gpu(obj_priv, false);
+ if (ret) {
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -1681,6 +1693,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
udelay(500);
}
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ POSTING_READ(reg);
+ udelay(1000);
+}
+
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -1767,27 +1810,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
DRM_DEBUG_KMS("FDI train done\n");
- /* enable normal train */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
- I915_WRITE(reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_NORMAL_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE;
- }
- I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
- /* wait one idle pattern time */
- POSTING_READ(reg);
- udelay(1000);
}
static const int const snb_b_fdi_train_param [] = {
@@ -2090,15 +2112,19 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
+ intel_fdi_normal_train(crtc);
+
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK);
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
+ temp |= TRANS_DP_8BPC;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2200,9 +2226,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+ if (HAS_PCH_IBX(dev))
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
@@ -2687,27 +2714,19 @@ fdi_reduce_ratio(u32 *num, u32 *den)
}
}
-#define DATA_N 0x800000
-#define LINK_N 0x80000
-
static void
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
int link_clock, struct fdi_m_n *m_n)
{
- u64 temp;
-
m_n->tu = 64; /* default size */
- temp = (u64) DATA_N * pixel_clock;
- temp = div_u64(temp, link_clock);
- m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
- m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
- m_n->gmch_n = DATA_N;
+ /* BUG_ON(pixel_clock > INT_MAX / 36); */
+ m_n->gmch_m = bits_per_pixel * pixel_clock;
+ m_n->gmch_n = link_clock * nlanes * 8;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
- temp = (u64) LINK_N * pixel_clock;
- m_n->link_m = div_u64(temp, link_clock);
- m_n->link_n = LINK_N;
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
@@ -3691,6 +3710,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (HAS_PCH_SPLIT(dev)) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
int lane = 0, link_bw, bpp;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
@@ -3774,6 +3794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc->fdi_lanes = lane;
+ if (pixel_multiplier > 1)
+ link_bw *= pixel_multiplier;
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
@@ -5211,6 +5233,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_sanitize_modesetting(struct drm_device *dev,
+ int pipe, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ /* Who knows what state these registers were left in by the BIOS or
+ * grub?
+ *
+ * If we leave the registers in a conflicting state (e.g. with the
+ * display plane reading from the other pipe than the one we intend
+ * to use) then when we attempt to teardown the active mode, we will
+ * not disable the pipes and planes in the correct order -- leaving
+ * a plane reading from a disabled pipe and possibly leading to
+ * undefined behaviour.
+ */
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+ if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
+ return;
+
+ /* This display plane is active and attached to the other CPU pipe. */
+ pipe = !pipe;
+
+ /* Disable the plane and wait for it to stop reading from the pipe. */
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
+
+ if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Switch off the pipe. */
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ if (val & PIPECONF_ENABLE) {
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+ intel_wait_for_pipe_off(dev, pipe);
+ }
+}
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
@@ -5262,6 +5333,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
+
+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -5311,9 +5384,14 @@ static void intel_setup_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
+ bool has_lvds = false;
if (IS_MOBILE(dev) && !IS_I830(dev))
- intel_lvds_init(dev);
+ has_lvds = intel_lvds_init(dev);
+ if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
+ /* disable the panel fitter on everything but LVDS */
+ I915_WRITE(PFIT_CONTROL, 0);
+ }
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
@@ -5581,20 +5659,19 @@ void ironlake_enable_drps(struct drm_device *dev)
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
- fstart = fmax;
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
- dev_priv->fmax = fstart; /* IPS callback will increase this */
+ dev_priv->fmax = fmax; /* IPS callback will increase this */
dev_priv->fstart = fstart;
- dev_priv->max_delay = fmax;
+ dev_priv->max_delay = fstart;
dev_priv->min_delay = fmin;
dev_priv->cur_delay = fstart;
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
- fstart);
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891f4f1d63b1..df648cb4c296 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -584,17 +584,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = dev_priv->panel_fixed_mode->clock;
}
- /* Just use VBT values for eDP */
- if (is_edp(intel_dp)) {
- intel_dp->lane_count = dev_priv->edp.lanes;
- intel_dp->link_bw = dev_priv->edp.rate;
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
- DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock);
- return true;
- }
-
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -613,6 +602,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
+ if (is_edp(intel_dp)) {
+ /* okay we failed just pick the highest */
+ intel_dp->lane_count = max_lane_count;
+ intel_dp->link_bw = bws[max_clock];
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+
+ return true;
+ }
+
return false;
}
@@ -1087,21 +1089,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
}
static uint32_t
-intel_dp_signal_levels(struct intel_dp *intel_dp)
+intel_dp_signal_levels(uint8_t train_set, int lane_count)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t signal_levels = 0;
- u8 train_set = intel_dp->train_set[0];
- u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
- u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
+ uint32_t signal_levels = 0;
- if (is_edp(intel_dp)) {
- vswing = dev_priv->edp.vswing;
- preemphasis = dev_priv->edp.preemphasis;
- }
-
- switch (vswing) {
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
default:
signal_levels |= DP_VOLTAGE_0_4;
@@ -1116,7 +1108,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp)
signal_levels |= DP_VOLTAGE_1_2;
break;
}
- switch (preemphasis) {
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
default:
signal_levels |= DP_PRE_EMPHASIS_0;
@@ -1203,18 +1195,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp)
}
static bool
-intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
- return false;
-
- return true;
-}
-
-static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
@@ -1226,9 +1206,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
- if (!intel_dp_aux_handshake_required(intel_dp))
- return true;
-
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
@@ -1261,11 +1238,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
- if (intel_dp_aux_handshake_required(intel_dp))
- /* Write the link configuration data */
- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
- intel_dp->link_configuration,
- DP_LINK_CONFIGURATION_SIZE);
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
@@ -1283,7 +1259,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1297,37 +1273,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
break;
/* Set training pattern 1 */
- udelay(500);
- if (intel_dp_aux_handshake_required(intel_dp)) {
+ udelay(100);
+ if (!intel_dp_get_link_status(intel_dp))
break;
- } else {
- if (!intel_dp_get_link_status(intel_dp))
- break;
- if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
- clock_recovery = true;
- break;
- }
+ if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ clock_recovery = true;
+ break;
+ }
- /* Check to see if we've tried the max voltage */
- for (i = 0; i < intel_dp->lane_count; i++)
- if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
- break;
- if (i == intel_dp->lane_count)
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
+ if (i == intel_dp->lane_count)
+ break;
- /* Check to see if we've tried the same voltage 5 times */
- if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
- ++tries;
- if (tries == 5)
- break;
- } else
- tries = 0;
- voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
- }
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
}
intel_dp->DP = DP;
@@ -1354,7 +1326,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1368,28 +1340,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP_TRAINING_PATTERN_2))
break;
- udelay(500);
-
- if (!intel_dp_aux_handshake_required(intel_dp)) {
+ udelay(400);
+ if (!intel_dp_get_link_status(intel_dp))
break;
- } else {
- if (!intel_dp_get_link_status(intel_dp))
- break;
- if (intel_channel_eq_ok(intel_dp)) {
- channel_eq = true;
- break;
- }
+ if (intel_channel_eq_ok(intel_dp)) {
+ channel_eq = true;
+ break;
+ }
- /* Try 5 times */
- if (tries > 5)
- break;
+ /* Try 5 times */
+ if (tries > 5)
+ break;
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
- ++tries;
- }
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
+ ++tries;
}
+
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
@@ -1408,6 +1376,9 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
+ if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ return;
+
DRM_DEBUG_KMS("\n");
if (is_edp(intel_dp)) {
@@ -1430,6 +1401,28 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (is_edp(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
+
+ if (!HAS_PCH_CPT(dev) &&
+ I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ /* Hardware workaround: leaving our transcoder select
+ * set to transcoder B while it's off will prevent the
+ * corresponding HDMI output on transcoder A.
+ *
+ * Combine this with another hardware workaround:
+ * transcoder select bit can only be cleared while the
+ * port is enabled.
+ */
+ DP &= ~DP_PIPEB_SELECT;
+ I915_WRITE(intel_dp->output_reg, DP);
+
+ /* Changes to enable or select take place the vblank
+ * after being written.
+ */
+ intel_wait_for_vblank(intel_dp->base.base.dev,
+ intel_crtc->pipe);
+ }
+
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
}
@@ -1517,7 +1510,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
status = connector_status_connected;
}
- return bit;
+ return status;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9af9f86a8765..e52c6125bb1f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -237,7 +237,7 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
-extern void intel_lvds_init(struct drm_device *dev);
+extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
extern void intel_init_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 2be4f728ed0c..3dba086e7eea 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -160,7 +160,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
};
struct intel_gpio *gpio;
- if (pin < 1 || pin > 7)
+ if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
return NULL;
gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
@@ -172,7 +172,8 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
gpio->reg += PCH_GPIOA - GPIOA;
gpio->dev_priv = dev_priv;
- snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
+ snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+ "i915 GPIO%c", "?BACDE?F"[pin]);
gpio->adapter.owner = THIS_MODULE;
gpio->adapter.algo_data = &gpio->algo;
gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
@@ -349,7 +350,7 @@ int intel_setup_gmbus(struct drm_device *dev)
"panel",
"dpc",
"dpb",
- "reserved"
+ "reserved",
"dpd",
};
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -366,8 +367,8 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
- I2C_NAME_SIZE,
- "gmbus %s",
+ sizeof(bus->adapter.name),
+ "i915 gmbus %s",
names[i]);
bus->adapter.dev.parent = &dev->pdev->dev;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1a649990ea9..25bcedf386fd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
/**
* Sets the power state for the panel.
*/
-static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
+static void intel_lvds_enable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -82,26 +82,61 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
lvds_reg = LVDS;
}
- if (on) {
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
- intel_panel_set_backlight(dev, dev_priv->backlight_level);
- } else {
- dev_priv->backlight_level = intel_panel_get_backlight(dev);
-
- intel_panel_set_backlight(dev, 0);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- if (intel_lvds->pfit_control) {
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
- I915_WRITE(PFIT_CONTROL, 0);
- intel_lvds->pfit_control = 0;
+ if (intel_lvds->pfit_dirty) {
+ /*
+ * Enable automatic panel scaling so that non-native modes
+ * fill the screen. The panel fitter should only be
+ * adjusted whilst the pipe is disabled, according to
+ * register description and PRM.
+ */
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ intel_lvds->pfit_control,
+ intel_lvds->pfit_pgm_ratios);
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
+ DRM_ERROR("timed out waiting for panel to power off\n");
+ } else {
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
intel_lvds->pfit_dirty = false;
}
+ }
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ POSTING_READ(lvds_reg);
+
+ intel_panel_set_backlight(dev, dev_priv->backlight_level);
+}
+
+static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+{
+ struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ctl_reg, lvds_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+ intel_panel_set_backlight(dev, 0);
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+
+ if (intel_lvds->pfit_control) {
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
+ I915_WRITE(PFIT_CONTROL, 0);
+ intel_lvds->pfit_dirty = true;
}
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
}
@@ -110,9 +145,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_set_power(intel_lvds, true);
+ intel_lvds_enable(intel_lvds);
else
- intel_lvds_set_power(intel_lvds, false);
+ intel_lvds_disable(intel_lvds);
/* XXX: We never power down the LVDS pairs. */
}
@@ -411,43 +446,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
/* Always do a full power on as we do not know what state
* we were left in.
*/
- intel_lvds_set_power(intel_lvds, true);
+ intel_lvds_enable(intel_lvds);
}
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
/*
* The LVDS pin pair will already have been turned on in the
* intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
*/
-
- if (HAS_PCH_SPLIT(dev))
- return;
-
- if (!intel_lvds->pfit_dirty)
- return;
-
- /*
- * Enable automatic panel scaling so that non-native modes fill the
- * screen. Should be enabled before the pipe is enabled, according to
- * register description and PRM.
- */
- DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- intel_lvds->pfit_control,
- intel_lvds->pfit_pgm_ratios);
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
-
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
}
/**
@@ -481,11 +491,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
- if (intel_lvds->edid) {
- drm_mode_connector_update_edid_property(connector,
- intel_lvds->edid);
+ if (intel_lvds->edid)
return drm_add_edid_modes(connector, intel_lvds->edid);
- }
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
if (mode == 0)
@@ -840,7 +847,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_device *dev)
+bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds;
@@ -856,37 +863,37 @@ void intel_lvds_init(struct drm_device *dev)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
- return;
+ return false;
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
- return;
+ return false;
}
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
- return;
+ return false;
if (dev_priv->edp.support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
- return;
+ return false;
}
}
if (!intel_lvds_ddc_probe(dev, pin)) {
DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
- return;
+ return false;
}
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
if (!intel_lvds) {
- return;
+ return false;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_lvds);
- return;
+ return false;
}
if (!HAS_PCH_SPLIT(dev)) {
@@ -939,7 +946,16 @@ void intel_lvds_init(struct drm_device *dev)
*/
intel_lvds->edid = drm_get_edid(connector,
&dev_priv->gmbus[pin].adapter);
-
+ if (intel_lvds->edid) {
+ if (drm_add_edid_modes(connector,
+ intel_lvds->edid)) {
+ drm_mode_connector_update_edid_property(connector,
+ intel_lvds->edid);
+ } else {
+ kfree(intel_lvds->edid);
+ intel_lvds->edid = NULL;
+ }
+ }
if (!intel_lvds->edid) {
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
@@ -1020,7 +1036,7 @@ out:
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
- return;
+ return true;
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
@@ -1028,4 +1044,5 @@ failed:
drm_encoder_cleanup(encoder);
kfree(intel_lvds);
kfree(intel_connector);
+ return false;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 917c7dc3cd6b..9b0d9a867aea 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
return 0;
err_out:
- iounmap(opregion->header);
+ iounmap(base);
return err;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d25219a..02ff0a481f47 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
{
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
- u32 stride_mask, depth, tmp;
+ u32 stride_mask;
+ int depth;
+ u32 tmp;
/* check src dimensions */
if (IS_845G(dev) || IS_I830(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc353ae2..89a65be8a3f3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -156,28 +156,30 @@ static int init_ring_common(struct drm_device *dev,
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
- DRM_ERROR("%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ DRM_DEBUG_KMS("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
I915_WRITE_HEAD(ring, 0);
- DRM_ERROR("%s head forced to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ DRM_ERROR("failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ }
}
I915_WRITE_CTL(ring,
((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
- | RING_NO_REPORT | RING_VALID);
+ | RING_REPORT_64K | RING_VALID);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* If the head is still not zero, the ring is dead */
@@ -654,6 +656,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
i915_gem_object_unpin(ring->gem_object);
drm_gem_object_unreference(ring->gem_object);
ring->gem_object = NULL;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
cleanup_status_page(dev, ring);
}
@@ -688,6 +694,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
{
unsigned long end;
drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 head;
+
+ head = intel_read_status_page(ring, 4);
+ if (head) {
+ ring->head = head & HEAD_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ if (ring->space >= n)
+ return 0;
+ }
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
@@ -854,19 +871,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
/* do nothing */
}
+
+/* Workaround for some stepping of SNB,
+ * each time when BLT engine ring tail moved,
+ * the first command in the ring to be parsed
+ * should be MI_BATCH_BUFFER_START
+ */
+#define NEED_BLT_WORKAROUND(dev) \
+ (IS_GEN6(dev) && (dev->pdev->revision < 8))
+
+static inline struct drm_i915_gem_object *
+to_blt_workaround(struct intel_ring_buffer *ring)
+{
+ return ring->private;
+}
+
+static int blt_ring_init(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ if (NEED_BLT_WORKAROUND(dev)) {
+ struct drm_i915_gem_object *obj;
+ u32 __iomem *ptr;
+ int ret;
+
+ obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = i915_gem_object_pin(&obj->base, 4096);
+ if (ret) {
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+ }
+
+ ptr = kmap(obj->pages[0]);
+ iowrite32(MI_BATCH_BUFFER_END, ptr);
+ iowrite32(MI_NOOP, ptr+1);
+ kunmap(obj->pages[0]);
+
+ ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+ if (ret) {
+ i915_gem_object_unpin(&obj->base);
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+ }
+
+ ring->private = obj;
+ }
+
+ return init_ring_common(dev, ring);
+}
+
+static void blt_ring_begin(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ int num_dwords)
+{
+ if (ring->private) {
+ intel_ring_begin(dev, ring, num_dwords+2);
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
+ intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+ } else
+ intel_ring_begin(dev, ring, 4);
+}
+
+static void blt_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ blt_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_FLUSH_DW);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_advance(dev, ring);
+}
+
+static u32
+blt_ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 flush_domains)
+{
+ u32 seqno = i915_gem_get_seqno(dev);
+
+ blt_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(dev, ring,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(dev, ring, seqno);
+ intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+ intel_ring_advance(dev, ring);
+
+ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+ return seqno;
+}
+
+static void blt_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ if (!ring->private)
+ return;
+
+ i915_gem_object_unpin(ring->private);
+ drm_gem_object_unreference(ring->private);
+ ring->private = NULL;
+}
+
static const struct intel_ring_buffer gen6_blt_ring = {
.name = "blt ring",
.id = RING_BLT,
.mmio_base = BLT_RING_BASE,
.size = 32 * PAGE_SIZE,
- .init = init_ring_common,
+ .init = blt_ring_init,
.write_tail = ring_write_tail,
- .flush = gen6_ring_flush,
- .add_request = ring_add_request,
+ .flush = blt_ring_flush,
+ .add_request = blt_ring_add_request,
.get_seqno = ring_status_page_get_seqno,
.user_irq_get = blt_ring_get_user_irq,
.user_irq_put = blt_ring_put_user_irq,
.dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .cleanup = blt_ring_cleanup,
};
int intel_init_render_ring_buffer(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a05aff0e5764..3126c2681983 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,6 +63,7 @@ struct intel_ring_buffer {
struct drm_i915_gem_execbuffer2 *exec,
struct drm_clip_rect *cliprects,
uint64_t exec_offset);
+ void (*cleanup)(struct intel_ring_buffer *ring);
/**
* List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@ struct intel_ring_buffer {
wait_queue_head_t irq_queue;
drm_local_map_t map;
+
+ void *private;
};
static inline u32
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de158b76bcd5..d97e6cb52d34 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -107,7 +107,8 @@ struct intel_sdvo {
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
- bool has_audio;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
/**
* This is set if we detect output of sdvo device as LVDS and
@@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (intel_sdvo->is_hdmi &&
+ if (intel_sdvo->has_hdmi_monitor &&
!intel_sdvo_set_avi_infoframe(intel_sdvo))
return;
@@ -1063,7 +1064,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
- if (intel_sdvo->has_audio)
+ if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1295,55 +1296,14 @@ intel_sdvo_get_edid(struct drm_connector *connector)
return drm_get_edid(connector, &sdvo->ddc);
}
-static struct drm_connector *
-intel_find_analog_connector(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct intel_sdvo *encoder;
-
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- base.base.head) {
- if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- if (&encoder->base ==
- intel_attached_encoder(connector))
- return connector;
- }
- }
- }
-
- return NULL;
-}
-
-static int
-intel_analog_is_connected(struct drm_device *dev)
-{
- struct drm_connector *analog_connector;
-
- analog_connector = intel_find_analog_connector(dev);
- if (!analog_connector)
- return false;
-
- if (analog_connector->funcs->detect(analog_connector, false) ==
- connector_status_disconnected)
- return false;
-
- return true;
-}
-
/* Mac mini hack -- use the same DDC as the analog connector */
static struct edid *
intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- if (!intel_analog_is_connected(connector->dev))
- return NULL;
-
- return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ return drm_get_edid(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
}
enum drm_connector_status
@@ -1388,8 +1348,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
/* DDC bus is shared, match EDID to connector type */
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
- intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
+ if (intel_sdvo->is_hdmi) {
+ intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
}
connector->display_info.raw_edid = NULL;
kfree(edid);
@@ -1398,7 +1360,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
if (status == connector_status_connected) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
if (intel_sdvo_connector->force_audio)
- intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
+ intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
}
return status;
@@ -1415,10 +1377,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (!intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
- if (intel_sdvo->is_tv) {
- /* add 30ms delay when the output type is SDVO-TV */
+
+ /* add 30ms delay when the output type might be TV */
+ if (intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
mdelay(30);
- }
+
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
@@ -1472,8 +1436,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- drm_mode_connector_update_edid_property(connector, edid);
- drm_add_edid_modes(connector, edid);
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1713,12 +1679,12 @@ intel_sdvo_set_property(struct drm_connector *connector,
intel_sdvo_connector->force_audio = val;
- if (val > 0 && intel_sdvo->has_audio)
+ if (val > 0 && intel_sdvo->has_hdmi_audio)
return 0;
- if (val < 0 && !intel_sdvo->has_audio)
+ if (val < 0 && !intel_sdvo->has_hdmi_audio)
return 0;
- intel_sdvo->has_audio = val > 0;
+ intel_sdvo->has_hdmi_audio = val > 0;
goto done;
}
@@ -2070,6 +2036,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -2077,8 +2045,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
- intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
-
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 8933e985c227..d3a9c6e02477 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -31,6 +31,7 @@
*/
#include <linux/backlight.h>
+#include <linux/acpi.h>
#include "drmP.h"
#include "nouveau_drv.h"
@@ -136,6 +137,14 @@ int nouveau_backlight_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+#ifdef CONFIG_ACPI
+ if (acpi_video_backlight_support()) {
+ NV_INFO(dev, "ACPI backlight interface available, "
+ "not registering our own\n");
+ return 0;
+ }
+#endif
+
switch (dev_priv->card_type) {
case NV_40:
return nouveau_nv40_backlight_init(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 5f21030a293b..b2293576f278 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6829,7 +6829,7 @@ nouveau_bios_posted(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned htotal;
- if (dev_priv->chipset >= NV_50) {
+ if (dev_priv->card_type >= NV_50) {
if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
NVReadVgaCrtc(dev, 0, 0x1a) == 0)
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 80353e2b8409..c41e1c200ef5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -143,8 +143,10 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nvbo->no_vm = no_vm;
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
+ nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
+ nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
+ &align, &size);
align >>= PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
@@ -176,6 +178,31 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
}
+static void
+set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+
+ if (dev_priv->card_type == NV_10 &&
+ nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
+ /*
+ * Make sure that the color and depth buffers are handled
+ * by independent memory controller units. Up to a 9x
+ * speed up when alpha-blending and depth-test are enabled
+ * at the same time.
+ */
+ int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
+
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
+ nvbo->placement.fpfn = vram_pages / 2;
+ nvbo->placement.lpfn = ~0;
+ } else {
+ nvbo->placement.fpfn = 0;
+ nvbo->placement.lpfn = vram_pages / 2;
+ }
+ }
+}
+
void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
{
@@ -190,6 +217,8 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
pl->busy_placement = nvbo->busy_placements;
set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
type | busy, flags);
+
+ set_placement_range(nvbo, type);
}
int
@@ -525,7 +554,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
stride = 16 * 4;
height = amount / stride;
- if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
+ if (new_mem->mem_type == TTM_PL_VRAM &&
+ nouveau_bo_tile_layout(nvbo)) {
ret = RING_SPACE(chan, 8);
if (ret)
return ret;
@@ -546,7 +576,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
OUT_RING (chan, 1);
}
- if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
+ if (old_mem->mem_type == TTM_PL_VRAM &&
+ nouveau_bo_tile_layout(nvbo)) {
ret = RING_SPACE(chan, 8);
if (ret)
return ret;
@@ -753,7 +784,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
if (dev_priv->card_type == NV_50) {
ret = nv50_mem_vm_bind_linear(dev,
offset + dev_priv->vm_vram_base,
- new_mem->size, nvbo->tile_flags,
+ new_mem->size,
+ nouveau_bo_tile_layout(nvbo),
offset);
if (ret)
return ret;
@@ -894,7 +926,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
* nothing to do here.
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
- if (dev_priv->card_type < NV_50 || !nvbo->tile_flags)
+ if (dev_priv->card_type < NV_50 ||
+ !nouveau_bo_tile_layout(nvbo))
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 0871495096fa..52c356e9a3d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -281,7 +281,7 @@ detect_analog:
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
if (!nv_encoder && !nouveau_tv_disable)
nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
- if (nv_encoder) {
+ if (nv_encoder && force) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_encoder_helper_funcs *helper =
encoder->helper_private;
@@ -641,11 +641,28 @@ nouveau_connector_get_modes(struct drm_connector *connector)
return ret;
}
+static unsigned
+get_tmds_link_bandwidth(struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct dcb_entry *dcb = nv_connector->detected_encoder->dcb;
+
+ if (dcb->location != DCB_LOC_ON_CHIP ||
+ dev_priv->chipset >= 0x46)
+ return 165000;
+ else if (dev_priv->chipset >= 0x40)
+ return 155000;
+ else if (dev_priv->chipset >= 0x18)
+ return 135000;
+ else
+ return 112000;
+}
+
static int
nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -663,11 +680,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
max_clock = 400000;
break;
case OUTPUT_TMDS:
- if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
- !nv_encoder->dcb->duallink_possible)
- max_clock = 165000;
- else
- max_clock = 330000;
+ max_clock = get_tmds_link_bandwidth(connector);
+ if (nouveau_duallink && nv_encoder->dcb->duallink_possible)
+ max_clock *= 2;
break;
case OUTPUT_ANALOG:
max_clock = nv_encoder->dcb->crtconf.maxfreq;
@@ -709,44 +724,6 @@ nouveau_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
-void
-nouveau_connector_set_polling(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- bool spare_crtc = false;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- spare_crtc |= !crtc->enabled;
-
- connector->polled = 0;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_VGA:
- case DRM_MODE_CONNECTOR_TV:
- if (dev_priv->card_type >= NV_50 ||
- (nv_gf4_disp_arch(dev) && spare_crtc))
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- break;
-
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_DVID:
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_eDP:
- if (dev_priv->card_type >= NV_50)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
- spare_crtc)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- break;
-
- default:
- break;
- }
-}
-
static const struct drm_connector_helper_funcs
nouveau_connector_helper_funcs = {
.get_modes = nouveau_connector_get_modes,
@@ -872,6 +849,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/* fall-through */
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
@@ -888,11 +866,16 @@ nouveau_connector_create(struct drm_device *dev, int index)
dev->mode_config.dithering_mode_property,
nv_connector->use_dithering ?
DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+
+ if (dcb->type != DCB_CONNECTOR_LVDS) {
+ if (dev_priv->card_type >= NV_50)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ }
break;
}
- nouveau_connector_set_polling(connector);
-
drm_sysfs_connector_add(connector);
dcb->drm = connector;
return dcb->drm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index c21ed6b16f88..711b1e9203af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -52,9 +52,6 @@ static inline struct nouveau_connector *nouveau_connector(
struct drm_connector *
nouveau_connector_create(struct drm_device *, int index);
-void
-nouveau_connector_set_polling(struct drm_connector *);
-
int
nouveau_connector_bpp(struct drm_connector *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3a07e580d27a..1c7db64c03bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -100,6 +100,9 @@ struct nouveau_bo {
int pin_refcnt;
};
+#define nouveau_bo_tile_layout(nvbo) \
+ ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
+
static inline struct nouveau_bo *
nouveau_bo(struct ttm_buffer_object *bo)
{
@@ -304,6 +307,7 @@ struct nouveau_fifo_engine {
void (*destroy_context)(struct nouveau_channel *);
int (*load_context)(struct nouveau_channel *);
int (*unload_context)(struct drm_device *);
+ void (*tlb_flush)(struct drm_device *dev);
};
struct nouveau_pgraph_object_method {
@@ -336,6 +340,7 @@ struct nouveau_pgraph_engine {
void (*destroy_context)(struct nouveau_channel *);
int (*load_context)(struct nouveau_channel *);
int (*unload_context)(struct drm_device *);
+ void (*tlb_flush)(struct drm_device *dev);
void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
uint32_t size, uint32_t pitch);
@@ -485,13 +490,13 @@ enum nv04_fp_display_regs {
};
struct nv04_crtc_reg {
- unsigned char MiscOutReg; /* */
+ unsigned char MiscOutReg;
uint8_t CRTC[0xa0];
uint8_t CR58[0x10];
uint8_t Sequencer[5];
uint8_t Graphics[9];
uint8_t Attribute[21];
- unsigned char DAC[768]; /* Internal Colorlookuptable */
+ unsigned char DAC[768];
/* PCRTC regs */
uint32_t fb_start;
@@ -539,43 +544,9 @@ struct nv04_output_reg {
};
struct nv04_mode_state {
- uint32_t bpp;
- uint32_t width;
- uint32_t height;
- uint32_t interlace;
- uint32_t repaint0;
- uint32_t repaint1;
- uint32_t screen;
- uint32_t scale;
- uint32_t dither;
- uint32_t extra;
- uint32_t fifo;
- uint32_t pixel;
- uint32_t horiz;
- int arbitration0;
- int arbitration1;
- uint32_t pll;
- uint32_t pllB;
- uint32_t vpll;
- uint32_t vpll2;
- uint32_t vpllB;
- uint32_t vpll2B;
+ struct nv04_crtc_reg crtc_reg[2];
uint32_t pllsel;
uint32_t sel_clk;
- uint32_t general;
- uint32_t crtcOwner;
- uint32_t head;
- uint32_t head2;
- uint32_t cursorConfig;
- uint32_t cursor0;
- uint32_t cursor1;
- uint32_t cursor2;
- uint32_t timingH;
- uint32_t timingV;
- uint32_t displayV;
- uint32_t crtcSync;
-
- struct nv04_crtc_reg crtc_reg[2];
};
enum nouveau_card_type {
@@ -613,6 +584,12 @@ struct drm_nouveau_private {
struct work_struct irq_work;
struct work_struct hpd_work;
+ struct {
+ spinlock_t lock;
+ uint32_t hpd0_bits;
+ uint32_t hpd1_bits;
+ } hpd_state;
+
struct list_head vbl_waiting;
struct {
@@ -1045,6 +1022,7 @@ extern int nv50_fifo_create_context(struct nouveau_channel *);
extern void nv50_fifo_destroy_context(struct nouveau_channel *);
extern int nv50_fifo_load_context(struct nouveau_channel *);
extern int nv50_fifo_unload_context(struct drm_device *);
+extern void nv50_fifo_tlb_flush(struct drm_device *dev);
/* nvc0_fifo.c */
extern int nvc0_fifo_init(struct drm_device *);
@@ -1122,6 +1100,8 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern void nv50_graph_context_switch(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
+extern void nv50_graph_tlb_flush(struct drm_device *dev);
+extern void nv86_graph_tlb_flush(struct drm_device *dev);
/* nvc0_graph.c */
extern int nvc0_graph_init(struct drm_device *);
@@ -1239,7 +1219,6 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
-extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
/* nouveau_fence.c */
struct nouveau_fence;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 441b12420bb1..ab1bbfbf266e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -249,6 +249,7 @@ alloc_semaphore(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_semaphore *sema;
+ int ret;
if (!USE_SEMA(dev))
return NULL;
@@ -257,10 +258,14 @@ alloc_semaphore(struct drm_device *dev)
if (!sema)
goto fail;
+ ret = drm_mm_pre_get(&dev_priv->fence.heap);
+ if (ret)
+ goto fail;
+
spin_lock(&dev_priv->fence.lock);
sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
if (sema->mem)
- sema->mem = drm_mm_get_block(sema->mem, 4, 0);
+ sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0);
spin_unlock(&dev_priv->fence.lock);
if (!sema->mem)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5c4c929d7f74..9a1fdcf400c2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -107,23 +107,29 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
}
static bool
-nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
- switch (tile_flags) {
- case 0x0000:
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7000:
- case 0x7400:
- case 0x7a00:
- case 0xe000:
- break;
- default:
- NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
- return false;
+nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->card_type >= NV_50) {
+ switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
+ case 0x0000:
+ case 0x1800:
+ case 0x2800:
+ case 0x4800:
+ case 0x7000:
+ case 0x7400:
+ case 0x7a00:
+ case 0xe000:
+ return true;
+ }
+ } else {
+ if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
+ return true;
}
- return true;
+ NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
+ return false;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index bed669a54a2d..b9672a05c411 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -519,11 +519,11 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
struct pll_lims pll_lim;
struct nouveau_pll_vals pv;
- uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
+ enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0;
- if (get_pll_limits(dev, pllreg, &pll_lim))
+ if (get_pll_limits(dev, pll, &pll_lim))
return;
- nouveau_hw_get_pllvals(dev, pllreg, &pv);
+ nouveau_hw_get_pllvals(dev, pll, &pv);
if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
@@ -536,7 +536,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
pv.M1 = pll_lim.vco1.max_m;
pv.N1 = pll_lim.vco1.min_n;
pv.log2P = pll_lim.max_usable_log2p;
- nouveau_hw_setpll(dev, pllreg, &pv);
+ nouveau_hw_setpll(dev, pll_lim.reg, &pv);
}
/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
index 869130f83602..2989090b9434 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.h
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -416,6 +416,25 @@ nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
}
static inline void
+nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
+
+ if (dev_priv->card_type == NV_04) {
+ /*
+ * Hilarious, the 24th bit doesn't want to stick to
+ * PCRTC_START...
+ */
+ int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX);
+
+ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX,
+ (cre_heb & ~0x40) | ((offset >> 18) & 0x40));
+ }
+}
+
+static inline void
nv_show_cursor(struct drm_device *dev, int head, bool show)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index fdd7e3de79c8..cb389d014326 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -256,7 +256,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
if (index >= DCB_MAX_NUM_I2C_ENTRIES)
return NULL;
- if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) {
+ if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
uint32_t reg = 0xe500, val;
if (i2c->port_type == 6) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 6fd51a51c608..7bfd9e6c9d67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -42,6 +42,13 @@
#include "nouveau_connector.h"
#include "nv50_display.h"
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+static int nouveau_ratelimit(void)
+{
+ return __ratelimit(&nouveau_ratelimit_state);
+}
+
void
nouveau_irq_preinstall(struct drm_device *dev)
{
@@ -53,6 +60,7 @@ nouveau_irq_preinstall(struct drm_device *dev)
if (dev_priv->card_type >= NV_50) {
INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
+ spin_lock_init(&dev_priv->hpd_state.lock);
INIT_LIST_HEAD(&dev_priv->vbl_waiting);
}
}
@@ -202,8 +210,8 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- u32 get = nv_rd32(dev, 0x003244);
- u32 put = nv_rd32(dev, 0x003240);
+ u32 dma_get = nv_rd32(dev, 0x003244);
+ u32 dma_put = nv_rd32(dev, 0x003240);
u32 push = nv_rd32(dev, 0x003220);
u32 state = nv_rd32(dev, 0x003228);
@@ -213,16 +221,18 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
u32 ib_get = nv_rd32(dev, 0x003334);
u32 ib_put = nv_rd32(dev, 0x003330);
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
"State 0x%08x Push 0x%08x\n",
- chid, ho_get, get, ho_put, put, ib_get, ib_put,
- state, push);
+ chid, ho_get, dma_get, ho_put,
+ dma_put, ib_get, ib_put, state,
+ push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
nv_wr32(dev, 0x003364, 0x00000000);
- if (get != put || ho_get != ho_put) {
- nv_wr32(dev, 0x003244, put);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nv_wr32(dev, 0x003244, dma_put);
nv_wr32(dev, 0x003328, ho_put);
} else
if (ib_get != ib_put) {
@@ -231,10 +241,10 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
} else {
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
"Put 0x%08x State 0x%08x Push 0x%08x\n",
- chid, get, put, state, push);
+ chid, dma_get, dma_put, state, push);
- if (get != put)
- nv_wr32(dev, 0x003244, put);
+ if (dma_get != dma_put)
+ nv_wr32(dev, 0x003244, dma_put);
}
nv_wr32(dev, 0x003228, 0x00000000);
@@ -266,8 +276,9 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
}
if (status) {
- NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
- status, chid);
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+ status, chid);
nv_wr32(dev, NV03_PFIFO_INTR_0, status);
status = 0;
}
@@ -544,13 +555,6 @@ nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
}
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-static int nouveau_ratelimit(void)
-{
- return __ratelimit(&nouveau_ratelimit_state);
-}
-
static inline void
nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index a163c7c612e7..fe4a30dc4b42 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -33,9 +33,9 @@
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
-#include "nouveau_drv.h"
-#define MIN(a,b) a < b ? a : b
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
/*
* NV10-NV40 tiling helpers
@@ -175,11 +175,10 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
}
}
}
- dev_priv->engine.instmem.flush(dev);
- nv50_vm_flush(dev, 5);
- nv50_vm_flush(dev, 0);
- nv50_vm_flush(dev, 4);
+ dev_priv->engine.instmem.flush(dev);
+ dev_priv->engine.fifo.tlb_flush(dev);
+ dev_priv->engine.graph.tlb_flush(dev);
nv50_vm_flush(dev, 6);
return 0;
}
@@ -209,11 +208,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
pte++;
}
}
- dev_priv->engine.instmem.flush(dev);
- nv50_vm_flush(dev, 5);
- nv50_vm_flush(dev, 0);
- nv50_vm_flush(dev, 4);
+ dev_priv->engine.instmem.flush(dev);
+ dev_priv->engine.fifo.tlb_flush(dev);
+ dev_priv->engine.graph.tlb_flush(dev);
nv50_vm_flush(dev, 6);
}
@@ -653,6 +651,7 @@ nouveau_mem_gart_init(struct drm_device *dev)
void
nouveau_mem_timing_init(struct drm_device *dev)
{
+ /* cards < NVC0 only */
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
@@ -719,14 +718,14 @@ nouveau_mem_timing_init(struct drm_device *dev)
tUNK_19 = 1;
tUNK_20 = 0;
tUNK_21 = 0;
- switch (MIN(recordlen,21)) {
- case 21:
+ switch (min(recordlen, 22)) {
+ case 22:
tUNK_21 = entry[21];
- case 20:
+ case 21:
tUNK_20 = entry[20];
- case 19:
+ case 20:
tUNK_19 = entry[19];
- case 18:
+ case 19:
tUNK_18 = entry[18];
default:
tUNK_0 = entry[0];
@@ -756,24 +755,30 @@ nouveau_mem_timing_init(struct drm_device *dev)
timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
if(recordlen > 19) {
timing->reg_100228 += (tUNK_19 - 1) << 24;
- } else {
+ }/* I cannot back-up this else-statement right now
+ else {
timing->reg_100228 += tUNK_12 << 24;
- }
+ }*/
/* XXX: reg_10022c */
+ timing->reg_10022c = tUNK_2 - 1;
timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
tUNK_13 << 8 | tUNK_13);
/* XXX: +6? */
timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
- if(tUNK_10 > tUNK_11) {
- timing->reg_100234 += tUNK_10 << 16;
- } else {
- timing->reg_100234 += tUNK_11 << 16;
+ timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
+
+ /* XXX; reg_100238, reg_10023c
+ * reg: 0x00??????
+ * reg_10023c:
+ * 0 for pre-NV50 cards
+ * 0x????0202 for NV50+ cards (empirical evidence) */
+ if(dev_priv->card_type >= NV_50) {
+ timing->reg_10023c = 0x202;
}
- /* XXX; reg_100238, reg_10023c */
NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
timing->reg_100220, timing->reg_100224,
timing->reg_100228, timing->reg_10022c);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 896cf8634144..dd572adca02a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -129,7 +129,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
if (ramin == NULL) {
spin_unlock(&dev_priv->ramin_lock);
nouveau_gpuobj_ref(NULL, &gpuobj);
- return ret;
+ return -ENOMEM;
}
ramin = drm_mm_get_block_atomic(ramin, size, align);
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 1c99c55d6d46..9f7b158f5825 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -284,6 +284,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
}
}
+#ifdef CONFIG_HWMON
static ssize_t
nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
{
@@ -395,10 +396,12 @@ static struct attribute *hwmon_attributes[] = {
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
};
+#endif
static int
nouveau_hwmon_init(struct drm_device *dev)
{
+#ifdef CONFIG_HWMON
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct device *hwmon_dev;
@@ -425,13 +428,14 @@ nouveau_hwmon_init(struct drm_device *dev)
}
pm->hwmon = hwmon_dev;
-
+#endif
return 0;
}
static void
nouveau_hwmon_fini(struct drm_device *dev)
{
+#ifdef CONFIG_HWMON
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
@@ -439,6 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
hwmon_device_unregister(pm->hwmon);
}
+#endif
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 7f16697cc96c..2d8580927ca4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -153,26 +153,42 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
return -ENOMEM;
}
+static struct nouveau_ramht_entry *
+nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
+{
+ struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
+ struct nouveau_ramht_entry *entry;
+ unsigned long flags;
+
+ if (!ramht)
+ return NULL;
+
+ spin_lock_irqsave(&ramht->lock, flags);
+ list_for_each_entry(entry, &ramht->entries, head) {
+ if (entry->channel == chan &&
+ (!handle || entry->handle == handle)) {
+ list_del(&entry->head);
+ spin_unlock_irqrestore(&ramht->lock, flags);
+
+ return entry;
+ }
+ }
+ spin_unlock_irqrestore(&ramht->lock, flags);
+
+ return NULL;
+}
+
static void
-nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
+nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
- struct nouveau_ramht_entry *entry, *tmp;
+ unsigned long flags;
u32 co, ho;
- list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) {
- if (entry->channel != chan || entry->handle != handle)
- continue;
-
- nouveau_gpuobj_ref(NULL, &entry->gpuobj);
- list_del(&entry->head);
- kfree(entry);
- break;
- }
-
+ spin_lock_irqsave(&chan->ramht->lock, flags);
co = ho = nouveau_ramht_hash_handle(chan, handle);
do {
if (nouveau_ramht_entry_valid(dev, ramht, co) &&
@@ -184,7 +200,7 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
nv_wo32(ramht, co + 0, 0x00000000);
nv_wo32(ramht, co + 4, 0x00000000);
instmem->flush(dev);
- return;
+ goto out;
}
co += 8;
@@ -194,17 +210,22 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
chan->id, handle);
+out:
+ spin_unlock_irqrestore(&chan->ramht->lock, flags);
}
void
nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
{
- struct nouveau_ramht *ramht = chan->ramht;
- unsigned long flags;
+ struct nouveau_ramht_entry *entry;
- spin_lock_irqsave(&ramht->lock, flags);
- nouveau_ramht_remove_locked(chan, handle);
- spin_unlock_irqrestore(&ramht->lock, flags);
+ entry = nouveau_ramht_remove_entry(chan, handle);
+ if (!entry)
+ return;
+
+ nouveau_ramht_remove_hash(chan, entry->handle);
+ nouveau_gpuobj_ref(NULL, &entry->gpuobj);
+ kfree(entry);
}
struct nouveau_gpuobj *
@@ -265,23 +286,19 @@ void
nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
struct nouveau_channel *chan)
{
- struct nouveau_ramht_entry *entry, *tmp;
+ struct nouveau_ramht_entry *entry;
struct nouveau_ramht *ramht;
- unsigned long flags;
if (ref)
kref_get(&ref->refcount);
ramht = *ptr;
if (ramht) {
- spin_lock_irqsave(&ramht->lock, flags);
- list_for_each_entry_safe(entry, tmp, &ramht->entries, head) {
- if (entry->channel != chan)
- continue;
-
- nouveau_ramht_remove_locked(chan, entry->handle);
+ while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
+ nouveau_ramht_remove_hash(chan, entry->handle);
+ nouveau_gpuobj_ref(NULL, &entry->gpuobj);
+ kfree(entry);
}
- spin_unlock_irqrestore(&ramht->lock, flags);
kref_put(&ramht->refcount, nouveau_ramht_del);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 288bacac7e5a..d4ac97007038 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -120,8 +120,8 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
dev_priv->engine.instmem.flush(nvbe->dev);
if (dev_priv->card_type == NV_50) {
- nv50_vm_flush(dev, 5); /* PGRAPH */
- nv50_vm_flush(dev, 0); /* PFIFO */
+ dev_priv->engine.fifo.tlb_flush(dev);
+ dev_priv->engine.graph.tlb_flush(dev);
}
nvbe->bound = true;
@@ -162,8 +162,8 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
dev_priv->engine.instmem.flush(nvbe->dev);
if (dev_priv->card_type == NV_50) {
- nv50_vm_flush(dev, 5);
- nv50_vm_flush(dev, 0);
+ dev_priv->engine.fifo.tlb_flush(dev);
+ dev_priv->engine.graph.tlb_flush(dev);
}
nvbe->bound = false;
@@ -224,7 +224,11 @@ nouveau_sgdma_init(struct drm_device *dev)
int i, ret;
if (dev_priv->card_type < NV_50) {
- aper_size = (64 * 1024 * 1024);
+ if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
+ aper_size = 64 * 1024 * 1024;
+ else
+ aper_size = 512 * 1024 * 1024;
+
obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
obj_size += 8; /* ctxdma header */
} else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index ed7757f14083..049f755567e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -354,6 +354,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv50_graph_destroy_context;
engine->graph.load_context = nv50_graph_load_context;
engine->graph.unload_context = nv50_graph_unload_context;
+ if (dev_priv->chipset != 0x86)
+ engine->graph.tlb_flush = nv50_graph_tlb_flush;
+ else {
+ /* from what i can see nvidia do this on every
+ * pre-NVA3 board except NVAC, but, we've only
+ * ever seen problems on NV86
+ */
+ engine->graph.tlb_flush = nv86_graph_tlb_flush;
+ }
engine->fifo.channels = 128;
engine->fifo.init = nv50_fifo_init;
engine->fifo.takedown = nv50_fifo_takedown;
@@ -365,6 +374,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv50_fifo_destroy_context;
engine->fifo.load_context = nv50_fifo_load_context;
engine->fifo.unload_context = nv50_fifo_unload_context;
+ engine->fifo.tlb_flush = nv50_fifo_tlb_flush;
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
@@ -1041,6 +1051,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_PTIMER_TIME:
getparam->value = dev_priv->engine.timer.read(dev);
break;
+ case NOUVEAU_GETPARAM_HAS_BO_USAGE:
+ getparam->value = 1;
+ break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card
@@ -1051,7 +1064,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
}
/* FALLTHRU */
default:
- NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
+ NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
}
@@ -1066,7 +1079,7 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
switch (setparam->param) {
default:
- NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
+ NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 16bbbf1eff63..7ecc4adc1e45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -191,7 +191,7 @@ nv40_temp_get(struct drm_device *dev)
int offset = sensor->offset_mult / sensor->offset_div;
int core_temp;
- if (dev_priv->chipset >= 0x50) {
+ if (dev_priv->card_type >= NV_50) {
core_temp = nv_rd32(dev, 0x20008);
} else {
core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index c71abc2a34d5..40e180741629 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -158,7 +158,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_connector *connector;
unsigned char seq1 = 0, crtc17 = 0;
unsigned char crtc1A;
@@ -213,10 +212,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
NVVgaSeqReset(dev, nv_crtc->index, false);
NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
-
- /* Update connector polling modes */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- nouveau_connector_set_polling(connector);
}
static bool
@@ -831,7 +826,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
/* Update the framebuffer location. */
regp->fb_start = nv_crtc->fb.offset & ~3;
regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
- NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start);
+ nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
/* Update the arbitration parameters. */
nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index c936403b26e2..ef23550407b5 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -185,14 +185,15 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
- /* For internal panels and gpu scaling on DVI we need the native mode */
- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
- if (!nv_connector->native_mode)
- return false;
+ if (!nv_connector->native_mode ||
+ nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
+ mode->hdisplay > nv_connector->native_mode->hdisplay ||
+ mode->vdisplay > nv_connector->native_mode->vdisplay) {
+ nv_encoder->mode = *adjusted_mode;
+
+ } else {
nv_encoder->mode = *nv_connector->native_mode;
adjusted_mode->clock = nv_connector->native_mode->clock;
- } else {
- nv_encoder->mode = *adjusted_mode;
}
return true;
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 6a6eb697d38e..eb1c70dd82ed 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -76,6 +76,15 @@ nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
reg += 4;
nouveau_hw_setpll(dev, reg, &state->calc);
+
+ if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
+ if (dev_priv->card_type == NV_20)
+ nv_mask(dev, 0x1002c4, 0, 1 << 20);
+
+ /* Reset the DLLs */
+ nv_mask(dev, 0x1002c0, 0, 1 << 8);
+ }
+
kfree(state);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c
index 2cdc2bfe7179..de81151648f8 100644
--- a/drivers/gpu/drm/nouveau/nv50_calc.c
+++ b/drivers/gpu/drm/nouveau/nv50_calc.c
@@ -51,24 +51,28 @@ nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk,
int *N, int *fN, int *M, int *P)
{
fixed20_12 fb_div, a, b;
+ u32 refclk = pll->refclk / 10;
+ u32 max_vco_freq = pll->vco1.maxfreq / 10;
+ u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10;
+ clk /= 10;
- *P = pll->vco1.maxfreq / clk;
+ *P = max_vco_freq / clk;
if (*P > pll->max_p)
*P = pll->max_p;
if (*P < pll->min_p)
*P = pll->min_p;
- /* *M = ceil(refclk / pll->vco.max_inputfreq); */
- a.full = dfixed_const(pll->refclk);
- b.full = dfixed_const(pll->vco1.max_inputfreq);
+ /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */
+ a.full = dfixed_const(refclk + max_vco_inputfreq);
+ b.full = dfixed_const(max_vco_inputfreq);
a.full = dfixed_div(a, b);
- a.full = dfixed_ceil(a);
+ a.full = dfixed_floor(a);
*M = dfixed_trunc(a);
/* fb_div = (vco * *M) / refclk; */
fb_div.full = dfixed_const(clk * *P);
fb_div.full = dfixed_mul(fb_div, a);
- a.full = dfixed_const(pll->refclk);
+ a.full = dfixed_const(refclk);
fb_div.full = dfixed_div(fb_div, a);
/* *N = floor(fb_div); */
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 16380d52cd88..56476d0c6de8 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
- nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
+ nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
ret = RING_SPACE(evo, 2);
@@ -578,7 +578,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
fb->nvbo->tile_mode);
}
if (dev_priv->chipset == 0x50)
- OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format);
+ OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
else
OUT_RING(evo, format);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 55c9663ef2bf..f624c611ddea 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1032,11 +1032,18 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
struct drm_connector *connector;
const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
uint32_t unplug_mask, plug_mask, change_mask;
- uint32_t hpd0, hpd1 = 0;
+ uint32_t hpd0, hpd1;
- hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ spin_lock_irq(&dev_priv->hpd_state.lock);
+ hpd0 = dev_priv->hpd_state.hpd0_bits;
+ dev_priv->hpd_state.hpd0_bits = 0;
+ hpd1 = dev_priv->hpd_state.hpd1_bits;
+ dev_priv->hpd_state.hpd1_bits = 0;
+ spin_unlock_irq(&dev_priv->hpd_state.lock);
+
+ hpd0 &= nv_rd32(dev, 0xe050);
if (dev_priv->chipset >= 0x90)
- hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+ hpd1 &= nv_rd32(dev, 0xe070);
plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
@@ -1078,10 +1085,6 @@ nv50_display_irq_hotplug_bh(struct work_struct *work)
helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
}
- nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
- if (dev_priv->chipset >= 0x90)
- nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
-
drm_helper_hpd_irq_event(dev);
}
@@ -1092,8 +1095,22 @@ nv50_display_irq_handler(struct drm_device *dev)
uint32_t delayed = 0;
if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
- if (!work_pending(&dev_priv->hpd_work))
- queue_work(dev_priv->wq, &dev_priv->hpd_work);
+ uint32_t hpd0_bits, hpd1_bits = 0;
+
+ hpd0_bits = nv_rd32(dev, 0xe054);
+ nv_wr32(dev, 0xe054, hpd0_bits);
+
+ if (dev_priv->chipset >= 0x90) {
+ hpd1_bits = nv_rd32(dev, 0xe074);
+ nv_wr32(dev, 0xe074, hpd1_bits);
+ }
+
+ spin_lock(&dev_priv->hpd_state.lock);
+ dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
+ dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
+ spin_unlock(&dev_priv->hpd_state.lock);
+
+ queue_work(dev_priv->wq, &dev_priv->hpd_work);
}
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index a46a961102f3..1da65bd60c10 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -464,3 +464,8 @@ nv50_fifo_unload_context(struct drm_device *dev)
return 0;
}
+void
+nv50_fifo_tlb_flush(struct drm_device *dev)
+{
+ nv50_vm_flush(dev, 5);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index cbf5ae2f67d4..8b669d0af610 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -402,3 +402,55 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
{ 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
{}
};
+
+void
+nv50_graph_tlb_flush(struct drm_device *dev)
+{
+ nv50_vm_flush(dev, 0);
+}
+
+void
+nv86_graph_tlb_flush(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ bool idle, timeout = false;
+ unsigned long flags;
+ u64 start;
+ u32 tmp;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
+
+ start = ptimer->read(dev);
+ do {
+ idle = true;
+
+ for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+
+ for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+
+ for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+ } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
+
+ if (timeout) {
+ NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
+ "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
+ nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
+ }
+
+ nv50_vm_flush(dev, 0);
+
+ nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a53fc974332b..b773229b7647 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -402,7 +402,6 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
}
dev_priv->engine.instmem.flush(dev);
- nv50_vm_flush(dev, 4);
nv50_vm_flush(dev, 6);
gpuobj->im_bound = 1;
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 8e421f644a54..05efb5b9f13e 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -112,6 +112,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base += 3;
break;
case ATOM_IIO_WRITE:
+ (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f12a5b3ec050..4dc5b4714c5a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1650,7 +1650,36 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
}
}
- rdev->config.evergreen.tile_config = gb_addr_config;
+ /* setup tiling info dword. gb_addr_config is not adequate since it does
+ * not have bank info, so create a custom tiling dword.
+ * bits 3:0 num_pipes
+ * bits 7:4 num_banks
+ * bits 11:8 group_size
+ * bits 15:12 row_size
+ */
+ rdev->config.evergreen.tile_config = 0;
+ switch (rdev->config.evergreen.max_tile_pipes) {
+ case 1:
+ default:
+ rdev->config.evergreen.tile_config |= (0 << 0);
+ break;
+ case 2:
+ rdev->config.evergreen.tile_config |= (1 << 0);
+ break;
+ case 4:
+ rdev->config.evergreen.tile_config |= (2 << 0);
+ break;
+ case 8:
+ rdev->config.evergreen.tile_config |= (3 << 0);
+ break;
+ }
+ rdev->config.evergreen.tile_config |=
+ ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ rdev->config.evergreen.tile_config |=
+ ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+ rdev->config.evergreen.tile_config |=
+ ((gb_addr_config & 0x30000000) >> 28) << 12;
+
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -2033,7 +2062,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 grbm_int_cntl = 0;
if (!rdev->irq.installed) {
- WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
return -EINVAL;
}
/* don't enable anything if the ih is disabled */
@@ -2295,6 +2324,7 @@ restart_ih:
case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
@@ -2316,6 +2346,7 @@ restart_ih:
case 0: /* D2 vblank */
if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
@@ -2337,6 +2368,7 @@ restart_ih:
case 0: /* D3 vblank */
if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
@@ -2358,6 +2390,7 @@ restart_ih:
case 0: /* D4 vblank */
if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
@@ -2379,6 +2412,7 @@ restart_ih:
case 0: /* D5 vblank */
if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
@@ -2400,6 +2434,7 @@ restart_ih:
case 0: /* D6 vblank */
if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index ac3b6dde23db..e0e590110dd4 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -459,7 +459,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size += evergreen_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("evergreen failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0e8f28a68927..8e10aa9f74b0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
int r;
if (rdev->gart.table.ram.ptr) {
- WARN(1, "R100 PCI GART already initialized.\n");
+ WARN(1, "R100 PCI GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
uint32_t tmp = 0;
if (!rdev->irq.installed) {
- WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 34527e600fe9..cde1d3480d93 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj) {
- WARN(1, "RV370 PCIE GART already initialized.\n");
+ WARN(1, "RV370 PCIE GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 33952a12f0a3..4d7a2e1bdb90 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
{
u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
ASIC_T_SHIFT;
- u32 actual_temp = 0;
- if ((temp >> 7) & 1)
- actual_temp = 0;
- else
- actual_temp = (temp >> 1) & 0xff;
-
- return actual_temp * 1000;
+ return temp * 1000;
}
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -884,12 +878,15 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
u32 tmp;
/* flush hdp cache so updates hit vram */
- if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
+ !(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
u32 tmp;
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ * This seems to cause problems on some AGP cards. Just use the old
+ * method for them.
*/
WREG32(HDP_DEBUG1, 0);
tmp = readl((void __iomem *)ptr);
@@ -919,7 +916,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj) {
- WARN(1, "R600 PCIE GART already initialized.\n");
+ WARN(1, "R600 PCIE GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
@@ -1201,8 +1198,10 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
mc->vram_end, mc->real_vram_size >> 20);
} else {
u64 base = 0;
- if (rdev->flags & RADEON_IS_IGP)
- base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
+ if (rdev->flags & RADEON_IS_IGP) {
+ base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+ }
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
radeon_gtt_location(rdev, mc);
@@ -2724,7 +2723,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
/* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
- true,
+ PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->ih.ring_obj);
if (r) {
@@ -2995,7 +2994,7 @@ int r600_irq_set(struct radeon_device *rdev)
u32 hdmi1, hdmi2;
if (!rdev->irq.installed) {
- WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
return -EINVAL;
}
/* don't enable anything if the ih is disabled */
@@ -3489,10 +3488,12 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
{
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
- * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
+ * This seems to cause problems on some AGP cards. Just use the old
+ * method for them.
*/
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
- rdev->vram_scratch.ptr) {
+ rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 8362974ef41a..86e5aa07f0db 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 37cc2aa9f923..0f90fc3482ce 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -50,6 +50,7 @@ struct r600_cs_track {
u32 nsamples;
u32 cb_color_base_last[8];
struct radeon_bo *cb_color_bo[8];
+ u64 cb_color_bo_mc[8];
u32 cb_color_bo_offset[8];
struct radeon_bo *cb_color_frag_bo[8];
struct radeon_bo *cb_color_tile_bo[8];
@@ -67,6 +68,7 @@ struct r600_cs_track {
u32 db_depth_size;
u32 db_offset;
struct radeon_bo *db_bo;
+ u64 db_bo_mc;
};
static inline int r600_bpe_from_format(u32 *bpe, u32 format)
@@ -140,6 +142,68 @@ static inline int r600_bpe_from_format(u32 *bpe, u32 format)
return 0;
}
+struct array_mode_checker {
+ int array_mode;
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ u32 nsamples;
+ u32 bpe;
+};
+
+/* returns alignment in pixels for pitch/height/depth and bytes for base */
+static inline int r600_get_array_mode_alignment(struct array_mode_checker *values,
+ u32 *pitch_align,
+ u32 *height_align,
+ u32 *depth_align,
+ u64 *base_align)
+{
+ u32 tile_width = 8;
+ u32 tile_height = 8;
+ u32 macro_tile_width = values->nbanks;
+ u32 macro_tile_height = values->npipes;
+ u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples;
+ u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
+
+ switch (values->array_mode) {
+ case ARRAY_LINEAR_GENERAL:
+ /* technically tile_width/_height for pitch/height */
+ *pitch_align = 1; /* tile_width */
+ *height_align = 1; /* tile_height */
+ *depth_align = 1;
+ *base_align = 1;
+ break;
+ case ARRAY_LINEAR_ALIGNED:
+ *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe));
+ *height_align = tile_height;
+ *depth_align = 1;
+ *base_align = values->group_size;
+ break;
+ case ARRAY_1D_TILED_THIN1:
+ *pitch_align = max((u32)tile_width,
+ (u32)(values->group_size /
+ (tile_height * values->bpe * values->nsamples)));
+ *height_align = tile_height;
+ *depth_align = 1;
+ *base_align = values->group_size;
+ break;
+ case ARRAY_2D_TILED_THIN1:
+ *pitch_align = max((u32)macro_tile_width,
+ (u32)(((values->group_size / tile_height) /
+ (values->bpe * values->nsamples)) *
+ values->nbanks)) * tile_width;
+ *height_align = macro_tile_height * tile_height;
+ *depth_align = 1;
+ *base_align = max(macro_tile_bytes,
+ (*pitch_align) * values->bpe * (*height_align) * values->nsamples);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void r600_cs_track_init(struct r600_cs_track *track)
{
int i;
@@ -153,10 +217,12 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->cb_color_info[i] = 0;
track->cb_color_bo[i] = NULL;
track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ track->cb_color_bo_mc[i] = 0xFFFFFFFF;
}
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
track->db_bo = NULL;
+ track->db_bo_mc = 0xFFFFFFFF;
/* assume the biggest format and that htile is enabled */
track->db_depth_info = 7 | (1 << 25);
track->db_depth_view = 0xFFFFC000;
@@ -168,7 +234,10 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
- u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align;
+ u32 bpe = 0, slice_tile_max, size, tmp;
+ u32 height, height_align, pitch, pitch_align, depth_align;
+ u64 base_offset, base_align;
+ struct array_mode_checker array_check;
volatile u32 *ib = p->ib->ptr;
unsigned array_mode;
@@ -183,60 +252,40 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
i, track->cb_color_info[i]);
return -EINVAL;
}
- /* pitch is the number of 8x8 tiles per row */
- pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1;
+ /* pitch in pixels */
+ pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
slice_tile_max *= 64;
- height = slice_tile_max / (pitch * 8);
+ height = slice_tile_max / pitch;
if (height > 8192)
height = 8192;
array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
+
+ base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
+ array_check.array_mode = array_mode;
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = track->nsamples;
+ array_check.bpe = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
+ return -EINVAL;
+ }
switch (array_mode) {
case V_0280A0_ARRAY_LINEAR_GENERAL:
- /* technically height & 0x7 */
break;
case V_0280A0_ARRAY_LINEAR_ALIGNED:
- pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- if (!IS_ALIGNED(height, 8)) {
- dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
- __func__, __LINE__, height);
- return -EINVAL;
- }
break;
case V_0280A0_ARRAY_1D_TILED_THIN1:
- pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
/* avoid breaking userspace */
if (height > 7)
height &= ~0x7;
- if (!IS_ALIGNED(height, 8)) {
- dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
- __func__, __LINE__, height);
- return -EINVAL;
- }
break;
case V_0280A0_ARRAY_2D_TILED_THIN1:
- pitch_align = max((u32)track->nbanks,
- (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- if (!IS_ALIGNED((height / 8), track->npipes)) {
- dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
- __func__, __LINE__, height);
- return -EINVAL;
- }
break;
default:
dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -244,8 +293,24 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
track->cb_color_info[i]);
return -EINVAL;
}
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, height_align)) {
+ dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
+ __func__, __LINE__, height);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+ return -EINVAL;
+ }
+
/* check offset */
- tmp = height * pitch * 8 * bpe;
+ tmp = height * pitch * bpe;
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
/* the initial DDX does bad things with the CB size occasionally */
@@ -260,15 +325,11 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
return -EINVAL;
}
}
- if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
- dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
- return -EINVAL;
- }
/* limit max tile */
- tmp = (height * pitch * 8) >> 6;
+ tmp = (height * pitch) >> 6;
if (tmp < slice_tile_max)
slice_tile_max = tmp;
- tmp = S_028060_PITCH_TILE_MAX(pitch - 1) |
+ tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
ib[track->cb_color_size_idx[i]] = tmp;
return 0;
@@ -310,7 +371,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
/* Check depth buffer */
if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
G_028800_Z_ENABLE(track->db_depth_control)) {
- u32 nviews, bpe, ntiles, pitch, pitch_align, height, size, slice_tile_max;
+ u32 nviews, bpe, ntiles, size, slice_tile_max;
+ u32 height, height_align, pitch, pitch_align, depth_align;
+ u64 base_offset, base_align;
+ struct array_mode_checker array_check;
+ int array_mode;
+
if (track->db_bo == NULL) {
dev_warn(p->dev, "z/stencil with no depth buffer\n");
return -EINVAL;
@@ -353,41 +419,34 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
} else {
size = radeon_bo_size(track->db_bo);
- pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1;
+ /* pitch in pixels */
+ pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
slice_tile_max *= 64;
- height = slice_tile_max / (pitch * 8);
+ height = slice_tile_max / pitch;
if (height > 8192)
height = 8192;
- switch (G_028010_ARRAY_MODE(track->db_depth_info)) {
+ base_offset = track->db_bo_mc + track->db_offset;
+ array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+ array_check.array_mode = array_mode;
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = track->nsamples;
+ array_check.bpe = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+ switch (array_mode) {
case V_028010_ARRAY_1D_TILED_THIN1:
- pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
/* don't break userspace */
height &= ~0x7;
- if (!IS_ALIGNED(height, 8)) {
- dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
- __func__, __LINE__, height);
- return -EINVAL;
- }
break;
case V_028010_ARRAY_2D_TILED_THIN1:
- pitch_align = max((u32)track->nbanks,
- (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- if (!IS_ALIGNED((height / 8), track->npipes)) {
- dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
- __func__, __LINE__, height);
- return -EINVAL;
- }
break;
default:
dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
@@ -395,15 +454,27 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
track->db_depth_info);
return -EINVAL;
}
- if (!IS_ALIGNED(track->db_offset, track->group_size)) {
- dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset);
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, height_align)) {
+ dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
+ __func__, __LINE__, height);
return -EINVAL;
}
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+ return -EINVAL;
+ }
+
ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
tmp = ntiles * bpe * 64 * nviews;
if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
radeon_bo_size(track->db_bo));
return -EINVAL;
@@ -954,6 +1025,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
+ track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
break;
case DB_DEPTH_BASE:
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -965,6 +1037,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
track->db_offset = radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_bo = reloc->robj;
+ track->db_bo_mc = reloc->lobj.gpu_offset;
break;
case DB_HTILE_DATA_BASE:
case SQ_PGM_START_FS:
@@ -1086,16 +1159,25 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels
static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
struct radeon_bo *texture,
struct radeon_bo *mipmap,
+ u64 base_offset,
+ u64 mip_offset,
u32 tiling_flags)
{
struct r600_cs_track *track = p->track;
u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
- u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align;
+ u32 word0, word1, l0_size, mipmap_size;
+ u32 height_align, pitch, pitch_align, depth_align;
+ u64 base_align;
+ struct array_mode_checker array_check;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
return 0;
+ /* convert to bytes */
+ base_offset <<= 8;
+ mip_offset <<= 8;
+
word0 = radeon_get_ib_value(p, idx + 0);
if (tiling_flags & RADEON_TILING_MACRO)
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
@@ -1128,46 +1210,38 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
return -EINVAL;
}
- pitch = G_038000_PITCH(word0) + 1;
- switch (G_038000_TILE_MODE(word0)) {
- case V_038000_ARRAY_LINEAR_GENERAL:
- pitch_align = 1;
- /* XXX check height align */
- break;
- case V_038000_ARRAY_LINEAR_ALIGNED:
- pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- /* XXX check height align */
- break;
- case V_038000_ARRAY_1D_TILED_THIN1:
- pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- /* XXX check height align */
- break;
- case V_038000_ARRAY_2D_TILED_THIN1:
- pitch_align = max((u32)track->nbanks,
- (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8;
- if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
- return -EINVAL;
- }
- /* XXX check height align */
- break;
- default:
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_038000_TILE_MODE(word0), word0);
+ /* pitch in texels */
+ pitch = (G_038000_PITCH(word0) + 1) * 8;
+ array_check.array_mode = G_038000_TILE_MODE(word0);
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = 1;
+ array_check.bpe = bpe;
+ if (r600_get_array_mode_alignment(&array_check,
+ &pitch_align, &height_align, &depth_align, &base_align)) {
+ dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
+ __func__, __LINE__, G_038000_TILE_MODE(word0));
+ return -EINVAL;
+ }
+
+ /* XXX check height as well... */
+
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(base_offset, base_align)) {
+ dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
+ __func__, __LINE__, base_offset);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(mip_offset, base_align)) {
+ dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
+ __func__, __LINE__, mip_offset);
return -EINVAL;
}
- /* XXX check offset align */
word0 = radeon_get_ib_value(p, idx + 4);
word1 = radeon_get_ib_value(p, idx + 5);
@@ -1402,7 +1476,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
r = r600_check_texture_resource(p, idx+(i*7)+1,
- texture, mipmap, reloc->lobj.tiling_flags);
+ texture, mipmap,
+ base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
+ mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
+ reloc->lobj.tiling_flags);
if (r)
return r;
ib[idx+1+(i*7)+2] += base_offset;
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d84612ae47e0..33cda016b083 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -86,6 +86,7 @@
#define R600_HDP_NONSURFACE_BASE 0x2c04
#define R600_BUS_CNTL 0x5420
+# define R600_BIOS_ROM_DIS (1 << 1)
#define R600_CONFIG_CNTL 0x5424
#define R600_CONFIG_MEMSIZE 0x5428
#define R600_CONFIG_F0_BASE 0x542C
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 966a793e225b..bff4dc4f410f 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -51,6 +51,12 @@
#define PTE_READABLE (1 << 5)
#define PTE_WRITEABLE (1 << 6)
+/* tiling bits */
+#define ARRAY_LINEAR_GENERAL 0x00000000
+#define ARRAY_LINEAR_ALIGNED 0x00000001
+#define ARRAY_1D_TILED_THIN1 0x00000002
+#define ARRAY_2D_TILED_THIN1 0x00000004
+
/* Registers */
#define ARB_POP 0x2418
#define ENABLE_TC128 (1 << 30)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 73f600d39ad4..3a7095743d44 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1262,6 +1262,10 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
(rdev->family == CHIP_RS400) || \
(rdev->family == CHIP_RS480))
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
+#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
+ (rdev->family == CHIP_RS690) || \
+ (rdev->family == CHIP_RS740) || \
+ (rdev->family >= CHIP_R600))
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 04cac7ec9039..bc5a2c3382d9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
}
}
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
}
}
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
@@ -526,8 +542,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
if (crev < 2)
return false;
- router.valid = false;
-
obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
(ctx->bios + data_offset +
@@ -624,6 +638,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
continue;
+ router.ddc_valid = false;
+ router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
@@ -647,9 +663,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
usDeviceTag));
} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
- router.valid = false;
for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
- u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID);
+ u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
(ctx->bios + data_offset +
@@ -657,6 +672,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ATOM_I2C_RECORD *i2c_record;
ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+ ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
(ctx->bios + data_offset +
@@ -690,10 +706,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
record;
- router.valid = true;
- router.mux_type = ddc_path->ucMuxType;
- router.mux_control_pin = ddc_path->ucMuxControlPin;
- router.mux_state = ddc_path->ucMuxState[enum_id];
+ router.ddc_valid = true;
+ router.ddc_mux_type = ddc_path->ucMuxType;
+ router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+ router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+ break;
+ case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+ cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+ record;
+ router.cd_valid = true;
+ router.cd_mux_type = cd_path->ucMuxType;
+ router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+ router.cd_mux_state = cd_path->ucMuxState[enum_id];
break;
}
record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +884,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
struct radeon_router router;
- router.valid = false;
+ router.ddc_valid = false;
+ router.cd_valid = false;
bios_connectors = kzalloc(bc_size, GFP_KERNEL);
if (!bios_connectors)
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 7932dc4d6b90..c558685cc637 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
size = bsize;
n = 1024;
- r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
+ r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
+ r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 654787ec43f4..8f2c7b50dcf5 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -130,6 +130,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
return true;
}
+
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
@@ -143,7 +144,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
bool r;
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -152,7 +153,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -191,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
}
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -216,7 +217,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
bool r;
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -231,7 +232,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -262,7 +263,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
/* restore regs */
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 7b7ea269549c..137b8075f6e7 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -571,6 +571,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
}
if (clk_mask && data_mask) {
+ /* system specific masks */
i2c.mask_clk_mask = clk_mask;
i2c.mask_data_mask = data_mask;
i2c.a_clk_mask = clk_mask;
@@ -579,7 +580,19 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.en_data_mask = data_mask;
i2c.y_clk_mask = clk_mask;
i2c.y_data_mask = data_mask;
+ } else if ((ddc_line == RADEON_GPIOPAD_MASK) ||
+ (ddc_line == RADEON_MDGPIO_MASK)) {
+ /* default gpiopad masks */
+ i2c.mask_clk_mask = (0x20 << 8);
+ i2c.mask_data_mask = 0x80;
+ i2c.a_clk_mask = (0x20 << 8);
+ i2c.a_data_mask = 0x80;
+ i2c.en_clk_mask = (0x20 << 8);
+ i2c.en_data_mask = 0x80;
+ i2c.y_clk_mask = (0x20 << 8);
+ i2c.y_data_mask = 0x80;
} else {
+ /* default masks for ddc pads */
i2c.mask_clk_mask = RADEON_GPIO_EN_1;
i2c.mask_data_mask = RADEON_GPIO_EN_0;
i2c.a_clk_mask = RADEON_GPIO_A_1;
@@ -716,7 +729,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
- clk, data);
+ (1 << clk), (1 << data));
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4dac4b0a02ee..8afaf7a7459e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
continue;
if (priority == true) {
- DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
- DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
+ DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
conflict->status = connector_status_disconnected;
radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
} else {
- DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
- DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict));
+ DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
current_status = connector_status_disconnected;
}
break;
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
mode->vdisplay == native_mode->vdisplay) {
*native_mode = *mode;
drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
- DRM_INFO("Determined LVDS native mode details from EDID\n");
+ DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
break;
}
}
}
if (!native_mode->clock) {
- DRM_INFO("No LVDS native mode details, disabling RMX\n");
+ DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
radeon_encoder->rmx_type = RMX_OFF;
}
}
@@ -1008,9 +1008,21 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
static int radeon_dp_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
int ret;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ }
ret = radeon_ddc_get_modes(radeon_connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ }
+
return ret;
}
@@ -1029,8 +1041,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
/* eDP is always DP */
radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else {
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
@@ -1116,7 +1134,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = true;
shared_ddc = true;
}
- if (radeon_connector->router_bus && router->valid &&
+ if (radeon_connector->router_bus && router->ddc_valid &&
(radeon_connector->router.router_id == router->router_id)) {
radeon_connector->shared_ddc = false;
shared_ddc = false;
@@ -1136,7 +1154,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
radeon_connector->router = *router;
- if (router->valid) {
+ if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
if (!radeon_connector->router_bus)
goto failed;
@@ -1157,6 +1175,8 @@ radeon_add_atom_connector(struct drm_device *dev,
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1172,6 +1192,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1208,6 +1230,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.load_detect_property,
1);
}
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
@@ -1238,6 +1265,11 @@ radeon_add_atom_connector(struct drm_device *dev,
0);
}
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
@@ -1275,6 +1307,9 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ connector->interlace_allowed = true;
+ /* in theory with a DP to VGA converter... */
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1290,6 +1325,8 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_atombios_get_tv_info(rdev));
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -1308,6 +1345,8 @@ radeon_add_atom_connector(struct drm_device *dev,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
}
@@ -1385,6 +1424,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1400,6 +1441,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1417,6 +1460,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
}
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1439,6 +1487,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_combios_get_tv_info(rdev));
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1452,6 +1502,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 8adfedfe547f..e12e79326cb1 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -180,7 +180,7 @@ int radeon_wb_init(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
@@ -286,7 +286,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
@@ -323,7 +323,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
}
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
- dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0383631da69c..1df4dc6c063c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector->ddc_bus->rec.en_data_reg,
radeon_connector->ddc_bus->rec.y_clk_reg,
radeon_connector->ddc_bus->rec.y_data_reg);
- if (radeon_connector->router_bus)
+ if (radeon_connector->router.ddc_valid)
DRM_INFO(" DDC Router 0x%x/0x%x\n",
- radeon_connector->router.mux_control_pin,
- radeon_connector->router.mux_state);
+ radeon_connector->router.ddc_mux_control_pin,
+ radeon_connector->router.ddc_mux_state);
+ if (radeon_connector->router.cd_valid)
+ DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
+ radeon_connector->router.cd_mux_control_pin,
+ radeon_connector->router.cd_mux_state);
} else {
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
int ret = 0;
/* on hw with routers, select right port */
- if (radeon_connector->router.valid)
- radeon_router_select_port(radeon_connector);
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector)
int ret = 0;
/* on hw with routers, select right port */
- if (radeon_connector->router.valid)
- radeon_router_select_port(radeon_connector);
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
if (!radeon_connector->ddc_bus)
return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ae58b6849a2e..041943df966b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -176,6 +176,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
return false;
}
}
+
void
radeon_link_encoder_connector(struct drm_device *dev)
{
@@ -228,6 +229,27 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
+struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *other_encoder;
+ struct radeon_encoder *other_radeon_encoder;
+
+ if (radeon_encoder->is_ext_encoder)
+ return NULL;
+
+ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+ if (other_encoder == encoder)
+ continue;
+ other_radeon_encoder = to_radeon_encoder(other_encoder);
+ if (other_radeon_encoder->is_ext_encoder &&
+ (radeon_encoder->devices & other_radeon_encoder->devices))
+ return other_encoder;
+ }
+ return NULL;
+}
+
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
@@ -426,52 +448,49 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
}
-void
-atombios_external_tmds_setup(struct drm_encoder *encoder, int action)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args;
- int index = 0;
-
- memset(&args, 0, sizeof(args));
-
- index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
-
- args.sXTmdsEncoder.ucEnable = action;
-
- if (radeon_encoder->pixel_clock > 165000)
- args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL;
-
- /*if (pScrn->rgbBits == 8)*/
- args.sXTmdsEncoder.ucMisc |= (1 << 1);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
+union dvo_encoder_control {
+ ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
+ DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
+ DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
+};
-static void
-atombios_ddia_setup(struct drm_encoder *encoder, int action)
+void
+atombios_dvo_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DVO_ENCODER_CONTROL_PS_ALLOCATION args;
- int index = 0;
+ union dvo_encoder_control args;
+ int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
memset(&args, 0, sizeof(args));
- index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+ if (ASIC_IS_DCE3(rdev)) {
+ /* DCE3+ */
+ args.dvo_v3.ucAction = action;
+ args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.dvo_v3.ucDVOConfig = 0; /* XXX */
+ } else if (ASIC_IS_DCE2(rdev)) {
+ /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
+ args.dvo.sDVOEncoder.ucAction = action;
+ args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ /* DFP1, CRT1, TV1 depending on the type of port */
+ args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+
+ if (radeon_encoder->pixel_clock > 165000)
+ args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+ } else {
+ /* R4xx, R5xx */
+ args.ext_tmds.sXTmdsEncoder.ucEnable = action;
- args.sDVOEncoder.ucAction = action;
- args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ if (radeon_encoder->pixel_clock > 165000)
+ args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
- if (radeon_encoder->pixel_clock > 165000)
- args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL;
+ /*if (pScrn->rgbBits == 8)*/
+ args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+ }
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
}
union lvds_encoder_control {
@@ -532,14 +551,14 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
- args.v1.ucMisc |= (1 << 1);
+ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
} else {
if (dig->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
/*if (pScrn->rgbBits == 8) */
- args.v1.ucMisc |= (1 << 1);
+ args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
}
break;
case 2:
@@ -595,6 +614,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -602,9 +622,20 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
struct radeon_connector_atom_dig *dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
- return 0;
-
+ if (!connector) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ return ATOM_ENCODER_MODE_DVI;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ default:
+ return ATOM_ENCODER_MODE_CRT;
+ }
+ }
radeon_connector = to_radeon_connector(connector);
switch (connector->connector_type) {
@@ -834,6 +865,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
memset(&args, 0, sizeof(args));
switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
@@ -978,6 +1012,105 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+void
+atombios_set_edp_panel_power(struct drm_connector *connector, int action)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ union dig_transmitter_control args;
+ int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+ uint8_t frev, crev;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+ return;
+
+ if (!ASIC_IS_DCE4(rdev))
+ return;
+
+ if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) ||
+ (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
+ return;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ memset(&args, 0, sizeof(args));
+
+ args.v1.ucAction = action;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union external_encoder_control {
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+};
+
+static void
+atombios_external_encoder_setup(struct drm_encoder *encoder,
+ struct drm_encoder *ext_encoder,
+ int action)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ union external_encoder_control args;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
+ u8 frev, crev;
+ int dp_clock = 0;
+ int dp_lane_count = 0;
+ int connector_object_id = 0;
+
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ dp_clock = dig_connector->dp_clock;
+ dp_lane_count = dig_connector->dp_lane_count;
+ connector_object_id =
+ (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+ }
+
+ memset(&args, 0, sizeof(args));
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
+
+ switch (frev) {
+ case 1:
+ /* no params on frev 1 */
+ break;
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ args.v1.sDigEncoder.ucAction = action;
+ args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+ if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dp_clock == 270000)
+ args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.sDigEncoder.ucLaneNum = 8;
+ else
+ args.v1.sDigEncoder.ucLaneNum = 4;
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+ return;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
static void
atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
{
@@ -1021,6 +1154,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
bool is_dig = false;
@@ -1043,9 +1177,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ if (ASIC_IS_DCE3(rdev))
+ is_dig = true;
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+ break;
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
break;
@@ -1082,34 +1221,85 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ if (connector &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector =
+ radeon_connector->con_priv;
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ radeon_dig_connector->edp_on = true;
+ }
dp_link_train(encoder, connector);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
}
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
+ if (connector &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector =
+ radeon_connector->con_priv;
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ radeon_dig_connector->edp_on = false;
+ }
}
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
break;
}
} else {
switch (mode) {
case DRM_MODE_DPMS_ON:
args.ucAction = ATOM_ENABLE;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.ucAction = ATOM_LCD_BLON;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
args.ucAction = ATOM_DISABLE;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ args.ucAction = ATOM_LCD_BLOFF;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
break;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+
+ if (ext_encoder) {
+ int action;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ default:
+ action = ATOM_ENABLE;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ action = ATOM_DISABLE;
+ break;
+ }
+ atombios_external_encoder_setup(encoder, ext_encoder, action);
+ }
+
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
}
@@ -1242,7 +1432,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
- break;
+ return;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -1357,6 +1547,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -1400,11 +1591,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
- atombios_ddia_setup(encoder, ATOM_ENABLE);
- break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- atombios_external_tmds_setup(encoder, ATOM_ENABLE);
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1419,6 +1608,11 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
break;
}
+
+ if (ext_encoder) {
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ }
+
atombios_apply_encoder_quirks(encoder, adjusted_mode);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
@@ -1520,6 +1714,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
if (radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1726,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
radeon_atom_output_lock(encoder, true);
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+ /* select the clock/data port if it uses a router */
+ if (connector) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (radeon_connector->router.cd_valid)
+ radeon_router_select_cd_port(radeon_connector);
+ }
+
/* this is needed for the pll/ss setup to work correctly in some cases */
atombios_set_encoder_crtc_source(encoder);
}
@@ -1547,6 +1749,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
+
+ /* check for pre-DCE3 cards with shared encoders;
+ * can't really use the links individually, so don't disable
+ * the encoder if it's in use by another connector
+ */
+ if (!ASIC_IS_DCE3(rdev)) {
+ struct drm_encoder *other_encoder;
+ struct radeon_encoder *other_radeon_encoder;
+
+ list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+ other_radeon_encoder = to_radeon_encoder(other_encoder);
+ if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+ drm_helper_encoder_in_use(other_encoder))
+ goto disable_done;
+ }
+ }
+
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
switch (radeon_encoder->encoder_id) {
@@ -1570,11 +1789,9 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
- atombios_ddia_setup(encoder, ATOM_DISABLE);
- break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- atombios_external_tmds_setup(encoder, ATOM_DISABLE);
+ atombios_dvo_setup(encoder, ATOM_DISABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1586,6 +1803,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
break;
}
+disable_done:
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
@@ -1595,6 +1813,53 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
radeon_encoder->active_device = 0;
}
+/* these are handled by the primary encoders */
+static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void radeon_atom_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void radeon_atom_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
+ .dpms = radeon_atom_ext_dpms,
+ .mode_fixup = radeon_atom_ext_mode_fixup,
+ .prepare = radeon_atom_ext_prepare,
+ .mode_set = radeon_atom_ext_mode_set,
+ .commit = radeon_atom_ext_commit,
+ .disable = radeon_atom_ext_disable,
+ /* no detect for TMDS/LVDS yet */
+};
+
static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
.dpms = radeon_atom_encoder_dpms,
.mode_fixup = radeon_atom_mode_fixup,
@@ -1704,6 +1969,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
+ radeon_encoder->is_ext_encoder = false;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -1745,6 +2011,9 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
@@ -1753,5 +2022,22 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
+ case ENCODER_OBJECT_ID_SI170B:
+ case ENCODER_OBJECT_ID_CH7303:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+ case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+ case ENCODER_OBJECT_ID_TITFP513:
+ case ENCODER_OBJECT_ID_VT1623:
+ case ENCODER_OBJECT_ID_HDMI_SI1930:
+ /* these are handled by the primary encoders */
+ radeon_encoder->is_ext_encoder = true;
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+ else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+ else
+ drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
+ break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 216392d0353b..daacb281dfaf 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -240,7 +240,8 @@ retry:
*/
if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
/* good news we believe it's a lockup */
- WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+ WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
+ fence->seq, seq);
/* FIXME: what should we do ? marking everyone
* as signaled for now
*/
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e65b90317fab..65016117d95f 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -79,8 +79,8 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.table.vram.robj == NULL) {
r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
- true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->gart.table.vram.robj);
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->gart.table.vram.robj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d1e595d91723..df95eb83dac6 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
+ r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6a13ee38a5b9..ded2a45bc95c 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
};
/* on hw with routers, select right port */
- if (radeon_connector->router.valid)
- radeon_router_select_port(radeon_connector);
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
if (ret == 2)
@@ -896,7 +896,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
((rdev->family <= CHIP_RS480) ||
((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
/* set the radeon hw i2c adapter */
- sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon i2c hw bus %s", name);
i2c->adapter.algo = &radeon_i2c_algo;
ret = i2c_add_adapter(&i2c->adapter);
if (ret) {
@@ -905,7 +906,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
}
} else {
/* set the radeon bit adapter */
- sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon i2c bit bus %s", name);
i2c->adapter.algo_data = &i2c->algo.bit;
i2c->algo.bit.pre_xfer = pre_xfer;
i2c->algo.bit.post_xfer = post_xfer;
@@ -946,6 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->dev = dev;
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "Radeon aux bus %s", name);
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.algo_data = &i2c->algo.dp;
i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
@@ -1084,26 +1088,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
addr, val);
}
-/* router switching */
-void radeon_router_select_port(struct radeon_connector *radeon_connector)
+/* ddc router switching */
+void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
{
u8 val;
- if (!radeon_connector->router.valid)
+ if (!radeon_connector->router.ddc_valid)
return;
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
- val &= radeon_connector->router.mux_control_pin;
+ val &= ~radeon_connector->router.ddc_mux_control_pin;
radeon_i2c_put_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, val);
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x1, &val);
- val &= radeon_connector->router.mux_control_pin;
- val |= radeon_connector->router.mux_state;
+ val &= ~radeon_connector->router.ddc_mux_control_pin;
+ val |= radeon_connector->router.ddc_mux_state;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, val);
+}
+
+/* clock/data router switching */
+void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
+{
+ u8 val;
+
+ if (!radeon_connector->router.cd_valid)
+ return;
+
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, &val);
+ val &= ~radeon_connector->router.cd_mux_control_pin;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, val);
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, &val);
+ val &= ~radeon_connector->router.cd_mux_control_pin;
+ val |= radeon_connector->router.cd_mux_state;
radeon_i2c_put_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x1, val);
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index 2f349a300195..465746bd51b7 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -76,7 +76,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
- return EINVAL;
+ return -EINVAL;
}
} else {
switch (crtc) {
@@ -89,7 +89,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc)
default:
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
- return EINVAL;
+ return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0b8397000f4c..59f834ba283d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -670,7 +670,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
if (rdev->is_atom_bios) {
radeon_encoder->pixel_clock = adjusted_mode->clock;
- atombios_external_tmds_setup(encoder, ATOM_ENABLE);
+ atombios_dvo_setup(encoder, ATOM_ENABLE);
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
} else {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92457163d070..e301c6f9e059 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -375,6 +375,7 @@ struct radeon_encoder {
int hdmi_config_offset;
int hdmi_audio_workaround;
int hdmi_buffer_status;
+ bool is_ext_encoder;
};
struct radeon_connector_atom_dig {
@@ -385,6 +386,7 @@ struct radeon_connector_atom_dig {
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
+ bool edp_on;
};
struct radeon_gpio_rec {
@@ -401,13 +403,19 @@ struct radeon_hpd {
};
struct radeon_router {
- bool valid;
u32 router_id;
struct radeon_i2c_bus_rec i2c_info;
u8 i2c_addr;
- u8 mux_type;
- u8 mux_control_pin;
- u8 mux_state;
+ /* i2c mux */
+ bool ddc_valid;
+ u8 ddc_mux_type;
+ u8 ddc_mux_control_pin;
+ u8 ddc_mux_state;
+ /* clock/data mux */
+ bool cd_valid;
+ u8 cd_mux_type;
+ u8 cd_mux_control_pin;
+ u8 cd_mux_state;
};
struct radeon_connector {
@@ -488,7 +496,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 slave_addr,
u8 addr,
u8 val);
-extern void radeon_router_select_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
@@ -516,9 +525,10 @@ struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev
struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
-extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
+extern void atombios_dvo_setup(struct drm_encoder *encoder, int action);
extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
+extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d7ab91416410..a598d0049aa5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
u32 c = 0;
rbo->placement.fpfn = 0;
- rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT;
+ rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
@@ -86,11 +86,13 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
}
int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
- unsigned long size, bool kernel, u32 domain,
- struct radeon_bo **bo_ptr)
+ unsigned long size, int byte_align, bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
+ unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ unsigned long max_size = 0;
int r;
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -102,6 +104,16 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
type = ttm_bo_type_device;
}
*bo_ptr = NULL;
+
+ /* maximun bo size is the minimun btw visible vram and gtt size */
+ max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ if ((page_align << PAGE_SHIFT) >= max_size) {
+ printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
+ __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
+ return -ENOMEM;
+ }
+
+retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
@@ -109,13 +121,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
bo->gobj = gobj;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
-
-retry:
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, 0, !kernel, NULL, size,
+ &bo->placement, page_align, 0, !kernel, NULL, size,
&radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 3481bc7f6f58..d143702b244a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -137,9 +137,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
}
extern int radeon_bo_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj, unsigned long size,
- bool kernel, u32 domain,
- struct radeon_bo **bo_ptr);
+ struct drm_gem_object *gobj, unsigned long size,
+ int byte_align,
+ bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 6ea798ce8218..06e79822a2bf 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -176,8 +176,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
- true, RADEON_GEM_DOMAIN_GTT,
- &rdev->ib_pool.robj);
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
+ &rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
return r;
@@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
+ r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->cp.ring_obj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 313c96bc09da..5b44f652145c 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- r = radeon_bo_create(rdev, NULL, size, true,
+ r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fe95bb35317e..1272e4b6a1d4 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
+ r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
&rdev->stollen_vga_memory);
if (r) {
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->offset = bo_mem->start << PAGE_SHIFT;
if (!gtt->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
+ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ gtt->num_pages, bo_mem, backend);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
gtt->num_pages, gtt->pages);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index f683e51a2a06..5512e4e5e636 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev)
int r;
if (rdev->gart.table.ram.ptr) {
- WARN(1, "RS400 GART already initialized.\n");
+ WARN(1, "RS400 GART already initialized\n");
return 0;
}
/* Check gart size */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b091a1f6fa4e..f1c6e02c2e6b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj) {
- WARN(1, "RS600 GART already initialized.\n");
+ WARN(1, "RS600 GART already initialized\n");
return 0;
}
/* Initialize common gart structure */
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev)
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
if (!rdev->irq.installed) {
- WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 245374e2b778..4dfead8cee33 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -915,8 +915,8 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
- true, RADEON_GEM_DOMAIN_VRAM,
- &rdev->vram_scratch.robj);
+ PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->vram_scratch.robj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a1cb783c7131..148a322d8f5d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,14 +27,6 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-/* Notes:
- *
- * We store bo pointer in drm_mm_node struct so we know which bo own a
- * specific node. There is no protection on the pointer, thus to make
- * sure things don't go berserk you have to access this pointer while
- * holding the global lru lock and make sure anytime you free a node you
- * reset the pointer to NULL.
- */
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
+#include <asm/atomic.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
@@ -231,6 +224,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
int ret;
while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+ /**
+ * Deadlock avoidance for multi-bo reserving.
+ */
if (use_sequence && bo->seq_valid &&
(sequence - bo->val_seq < (1 << 31))) {
return -EAGAIN;
@@ -248,6 +244,14 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
}
if (use_sequence) {
+ /**
+ * Wake up waiters that may need to recheck for deadlock,
+ * if we decreased the sequence number.
+ */
+ if (unlikely((bo->val_seq - sequence < (1 << 31))
+ || !bo->seq_valid))
+ wake_up_all(&bo->event_queue);
+
bo->val_seq = sequence;
bo->seq_valid = true;
} else {
@@ -452,6 +456,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
ttm_bo_mem_put(bo, &bo->mem);
atomic_set(&bo->reserved, 0);
+
+ /*
+ * Make processes trying to reserve really pick it up.
+ */
+ smp_mb__after_atomic_dec();
wake_up_all(&bo->event_queue);
}
@@ -460,7 +469,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_bo_driver *driver;
- void *sync_obj;
+ void *sync_obj = NULL;
void *sync_obj_arg;
int put_count;
int ret;
@@ -495,17 +504,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
spin_lock(&glob->lru_lock);
}
queue:
- sync_obj = bo->sync_obj;
- sync_obj_arg = bo->sync_obj_arg;
driver = bdev->driver;
+ if (bo->sync_obj)
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ sync_obj_arg = bo->sync_obj_arg;
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
spin_unlock(&bo->lock);
- if (sync_obj)
+ if (sync_obj) {
driver->sync_obj_flush(sync_obj, sync_obj_arg);
+ driver->sync_obj_unref(&sync_obj);
+ }
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}
@@ -822,7 +834,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
int ret;
@@ -832,12 +843,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- spin_lock(&glob->lru_lock);
- if (list_empty(&man->lru)) {
- spin_unlock(&glob->lru_lock);
- break;
- }
- spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
no_wait_reserve, no_wait_gpu);
if (unlikely(ret != 0))
@@ -1125,35 +1130,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- int i;
+ BUG_ON((placement->fpfn || placement->lpfn) &&
+ (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
- if (placement->fpfn || placement->lpfn) {
- if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
- printk(KERN_ERR TTM_PFX "Page number range to small "
- "Need %lu pages, range is [%u, %u]\n",
- bo->mem.num_pages, placement->fpfn,
- placement->lpfn);
- return -EINVAL;
- }
- }
- for (i = 0; i < placement->num_placement; i++) {
- if (!capable(CAP_SYS_ADMIN)) {
- if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
- printk(KERN_ERR TTM_PFX "Need to be root to "
- "modify NO_EVICT status.\n");
- return -EINVAL;
- }
- }
- }
- for (i = 0; i < placement->num_busy_placement; i++) {
- if (!capable(CAP_SYS_ADMIN)) {
- if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
- printk(KERN_ERR TTM_PFX "Need to be root to "
- "modify NO_EVICT status.\n");
- return -EINVAL;
- }
- }
- }
return 0;
}
@@ -1176,6 +1155,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
+ if (destroy)
+ (*destroy)(bo);
+ else
+ kfree(bo);
return -EINVAL;
}
bo->destroy = destroy;
@@ -1369,18 +1352,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
- if (type >= TTM_NUM_MEM_TYPES) {
- printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
- return ret;
- }
-
+ BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = &bdev->man[type];
- if (man->has_type) {
- printk(KERN_ERR TTM_PFX
- "Memory manager already initialized for type %d\n",
- type);
- return ret;
- }
+ BUG_ON(man->has_type);
ret = bdev->driver->init_mem_type(bdev, type, man);
if (ret)
@@ -1389,13 +1363,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
ret = 0;
if (type != TTM_PL_SYSTEM) {
- if (!p_size) {
- printk(KERN_ERR TTM_PFX
- "Zero size memory manager type %d\n",
- type);
- return ret;
- }
-
ret = (*man->func->init)(man, p_size);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 7410c190c891..038e947d00f9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,20 +31,29 @@
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
-#include <linux/jiffies.h>
+#include "drm_mm.h"
#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/file.h>
+#include <linux/spinlock.h>
#include <linux/module.h>
+/**
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+ struct drm_mm mm;
+ spinlock_t lock;
+};
+
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
- struct ttm_bo_global *glob = man->bdev->glob;
- struct drm_mm *mm = man->priv;
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL;
unsigned long lpfn;
int ret;
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (unlikely(ret))
return ret;
- spin_lock(&glob->lru_lock);
+ spin_lock(&rman->lock);
node = drm_mm_search_free_in_range(mm,
mem->num_pages, mem->page_alignment,
placement->fpfn, lpfn, 1);
if (unlikely(node == NULL)) {
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&rman->lock);
return 0;
}
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
- mem->page_alignment,
- placement->fpfn,
- lpfn);
- spin_unlock(&glob->lru_lock);
+ mem->page_alignment,
+ placement->fpfn,
+ lpfn);
+ spin_unlock(&rman->lock);
} while (node == NULL);
mem->mm_node = node;
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- struct ttm_bo_global *glob = man->bdev->glob;
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
if (mem->mm_node) {
- spin_lock(&glob->lru_lock);
+ spin_lock(&rman->lock);
drm_mm_put_block(mem->mm_node);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&rman->lock);
mem->mm_node = NULL;
}
}
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
- struct drm_mm *mm;
+ struct ttm_range_manager *rman;
int ret;
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm)
+ rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+ if (!rman)
return -ENOMEM;
- ret = drm_mm_init(mm, 0, p_size);
+ ret = drm_mm_init(&rman->mm, 0, p_size);
if (ret) {
- kfree(mm);
+ kfree(rman);
return ret;
}
- man->priv = mm;
+ spin_lock_init(&rman->lock);
+ man->priv = rman;
return 0;
}
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
{
- struct ttm_bo_global *glob = man->bdev->glob;
- struct drm_mm *mm = man->priv;
- int ret = 0;
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+ struct drm_mm *mm = &rman->mm;
- spin_lock(&glob->lru_lock);
+ spin_lock(&rman->lock);
if (drm_mm_clean(mm)) {
drm_mm_takedown(mm);
- kfree(mm);
+ spin_unlock(&rman->lock);
+ kfree(rman);
man->priv = NULL;
- } else
- ret = -EBUSY;
- spin_unlock(&glob->lru_lock);
- return ret;
+ return 0;
+ }
+ spin_unlock(&rman->lock);
+ return -EBUSY;
}
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
- struct ttm_bo_global *glob = man->bdev->glob;
- struct drm_mm *mm = man->priv;
+ struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
- spin_lock(&glob->lru_lock);
- drm_mm_debug_table(mm, prefix);
- spin_unlock(&glob->lru_lock);
+ spin_lock(&rman->lock);
+ drm_mm_debug_table(&rman->mm, prefix);
+ spin_unlock(&rman->lock);
}
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a7bab87a548b..af789dc869b9 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
return ret;
ret = be->func->bind(be, bo_mem);
- if (ret) {
- printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
+ if (unlikely(ret != 0))
return ret;
- }
ttm->state = tt_bound;
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 9b5b4d9dd62c..3e038a394c51 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
first_pfn + 1;
- if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
+ vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
+ if (NULL == vsg->pages)
return -ENOMEM;
- memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
down_read(&current->mm->mmap_sem);
ret = get_user_pages(current, current->mm,
(unsigned long)xfer->mem_addr,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 51d9f9f1d7f2..76954e3528c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
fence_rep.error = ret;
fence_rep.fence_seq = (uint64_t) sequence;
+ fence_rep.pad64 = 0;
user_fence_rep = (struct drm_vmw_fence_rep __user *)
(unsigned long)arg->fence_rep;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87c6e6156d7d..cceeb42789b6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
&vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free);
vmw_overlay_resume_all(dev_priv);
+ if (unlikely(ret != 0))
+ vfbs->buffer = NULL;
return ret;
}
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(&vfb->base);
+ if (unlikely(vfbs->buffer == NULL))
+ return 0;
+
bo = &vfbs->buffer->base;
ttm_bo_unref(&bo);
vfbs->buffer = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a01c47ddb5bc..29113c9b26a8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
return -EINVAL;
}
- dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
+ dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
if (!dev_priv->ldu_priv)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index df2036ed18d5..f1a52f9e7298 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
return -ENOSYS;
}
- overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
+ overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 36e129f0023f..5408b1b7996f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -862,7 +862,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
&vmw_vram_sys_placement, true,
&vmw_user_dmabuf_destroy);
if (unlikely(ret != 0))
- return ret;
+ goto out_no_dmabuf;
tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
@@ -870,19 +870,21 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
false,
ttm_buffer_type,
&vmw_user_dmabuf_release, NULL);
- if (unlikely(ret != 0)) {
- ttm_bo_unref(&tmp);
- } else {
+ if (unlikely(ret != 0))
+ goto out_no_base_object;
+ else {
rep->handle = vmw_user_bo->base.hash.key;
rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
rep->cur_gmr_id = vmw_user_bo->base.hash.key;
rep->cur_gmr_offset = 0;
}
- ttm_bo_unref(&tmp);
+out_no_base_object:
+ ttm_bo_unref(&tmp);
+out_no_dmabuf:
ttm_read_unlock(&vmaster->lock);
- return 0;
+ return ret;
}
int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,