summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c226
1 files changed, 132 insertions, 94 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c7bc93d28d84..12a36f0ca53d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -52,16 +52,27 @@ intel_ring_initialized(struct intel_engine_cs *ring)
int __intel_ring_space(int head, int tail, int size)
{
- int space = head - (tail + I915_RING_FREE_SPACE);
- if (space < 0)
+ int space = head - tail;
+ if (space <= 0)
space += size;
- return space;
+ return space - I915_RING_FREE_SPACE;
+}
+
+void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+{
+ if (ringbuf->last_retired_head != -1) {
+ ringbuf->head = ringbuf->last_retired_head;
+ ringbuf->last_retired_head = -1;
+ }
+
+ ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
+ ringbuf->tail, ringbuf->size);
}
int intel_ring_space(struct intel_ringbuffer *ringbuf)
{
- return __intel_ring_space(ringbuf->head & HEAD_ADDR,
- ringbuf->tail, ringbuf->size);
+ intel_ring_update_space(ringbuf);
+ return ringbuf->space;
}
bool intel_ring_stopped(struct intel_engine_cs *ring)
@@ -592,10 +603,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
goto out;
}
+ ringbuf->last_retired_head = -1;
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = intel_ring_space(ringbuf);
- ringbuf->last_retired_head = -1;
+ intel_ring_update_space(ringbuf);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -627,8 +638,7 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
{
int ret;
- if (ring->scratch.obj)
- return 0;
+ WARN_ON(ring->scratch.obj);
ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
if (ring->scratch.obj == NULL) {
@@ -672,7 +682,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
- if (WARN_ON(w->count == 0))
+ if (WARN_ON_ONCE(w->count == 0))
return 0;
ring->gpu_caches_dirty = true;
@@ -703,6 +713,22 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
return 0;
}
+static int intel_rcs_ctx_init(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ int ret;
+
+ ret = intel_ring_workarounds_emit(ring, ctx);
+ if (ret != 0)
+ return ret;
+
+ ret = i915_gem_render_state_init(ring);
+ if (ret)
+ DRM_ERROR("init render state: %d\n", ret);
+
+ return ret;
+}
+
static int wa_add(struct drm_i915_private *dev_priv,
const u32 addr, const u32 mask, const u32 val)
{
@@ -762,9 +788,12 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
+ /* WaForceEnableNonCoherent:bdw */
+ /* WaHdcDisableFetchWhenMasked:bdw */
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
/* Wa4x4STCOptimizationDisable:bdw */
@@ -861,12 +890,6 @@ static int init_render_ring(struct intel_engine_cs *ring)
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (INTEL_INFO(dev)->gen >= 5) {
- ret = intel_init_pipe_control(ring);
- if (ret)
- return ret;
- }
-
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -918,17 +941,20 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller,
return ret;
for_each_ring(waiter, dev_priv, i) {
+ u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
+ seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_FLUSH_ENABLE);
intel_ring_emit(signaller, lower_32_bits(gtt_offset));
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
@@ -956,16 +982,19 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller,
return ret;
for_each_ring(waiter, dev_priv, i) {
+ u32 seqno;
u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
+ seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
MI_FLUSH_DW_OP_STOREDW);
intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT);
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->id));
intel_ring_emit(signaller, 0);
@@ -994,9 +1023,11 @@ static int gen6_signal(struct intel_engine_cs *signaller,
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
if (mbox_reg != GEN6_NOSYNC) {
+ u32 seqno = i915_gem_request_get_seqno(
+ signaller->outstanding_lazy_request);
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
- intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, seqno);
}
}
@@ -1031,7 +1062,8 @@ gen6_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1149,7 +1181,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1168,7 +1201,8 @@ pc_render_add_request(struct intel_engine_cs *ring)
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, 0);
__intel_ring_advance(ring);
@@ -1408,7 +1442,8 @@ i9xx_add_request(struct intel_engine_cs *ring)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, ring->outstanding_lazy_seqno);
+ intel_ring_emit(ring,
+ i915_gem_request_get_seqno(ring->outstanding_lazy_request));
intel_ring_emit(ring, MI_USER_INTERRUPT);
__intel_ring_advance(ring);
@@ -1789,15 +1824,15 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_engine_cs *ring)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf;
int ret;
- if (ringbuf == NULL) {
- ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
- if (!ringbuf)
- return -ENOMEM;
- ring->buffer = ringbuf;
- }
+ WARN_ON(ring->buffer);
+
+ ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
+ if (!ringbuf)
+ return -ENOMEM;
+ ring->buffer = ringbuf;
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
@@ -1820,21 +1855,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- if (ringbuf->obj == NULL) {
- ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
- ring->name, ret);
- goto error;
- }
+ WARN_ON(ringbuf->obj);
- ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
- if (ret) {
- DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
- ring->name, ret);
- intel_destroy_ringbuffer_obj(ringbuf);
- goto error;
- }
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+ ring->name, ret);
+ goto error;
+ }
+
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+ if (ret) {
+ DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
+ ring->name, ret);
+ intel_destroy_ringbuffer_obj(ringbuf);
+ goto error;
}
/* Workaround an erratum on the i830 which causes a hang if
@@ -1849,10 +1884,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto error;
- ret = ring->init(ring);
- if (ret)
- goto error;
-
return 0;
error:
@@ -1877,8 +1908,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
- ring->preallocated_lazy_request = NULL;
- ring->outstanding_lazy_seqno = 0;
+ i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
if (ring->cleanup)
ring->cleanup(ring);
@@ -1895,38 +1925,27 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
- u32 seqno = 0;
int ret;
- if (ringbuf->last_retired_head != -1) {
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
-
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= n)
- return 0;
- }
+ if (intel_ring_space(ringbuf) >= n)
+ return 0;
list_for_each_entry(request, &ring->request_list, list) {
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= n) {
- seqno = request->seqno;
break;
}
}
- if (seqno == 0)
+ if (&request->list == &ring->request_list)
return -ENOSPC;
- ret = i915_wait_seqno(ring, seqno);
+ ret = i915_wait_request(request);
if (ret)
return ret;
i915_gem_retire_requests_ring(ring);
- ringbuf->head = ringbuf->last_retired_head;
- ringbuf->last_retired_head = -1;
- ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
@@ -1952,14 +1971,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
+ ret = 0;
trace_i915_ring_wait_begin(ring);
do {
+ if (intel_ring_space(ringbuf) >= n)
+ break;
ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = intel_ring_space(ringbuf);
- if (ringbuf->space >= n) {
- ret = 0;
+ if (intel_ring_space(ringbuf) >= n)
break;
- }
msleep(1);
@@ -2000,19 +2019,19 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = intel_ring_space(ringbuf);
+ intel_ring_update_space(ringbuf);
return 0;
}
int intel_ring_idle(struct intel_engine_cs *ring)
{
- u32 seqno;
+ struct drm_i915_gem_request *req;
int ret;
/* We need to add any requests required to flush the objects and ring */
- if (ring->outstanding_lazy_seqno) {
- ret = i915_add_request(ring, NULL);
+ if (ring->outstanding_lazy_request) {
+ ret = i915_add_request(ring);
if (ret)
return ret;
}
@@ -2021,30 +2040,39 @@ int intel_ring_idle(struct intel_engine_cs *ring)
if (list_empty(&ring->request_list))
return 0;
- seqno = list_entry(ring->request_list.prev,
+ req = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
- list)->seqno;
+ list);
- return i915_wait_seqno(ring, seqno);
+ return i915_wait_request(req);
}
static int
-intel_ring_alloc_seqno(struct intel_engine_cs *ring)
+intel_ring_alloc_request(struct intel_engine_cs *ring)
{
- if (ring->outstanding_lazy_seqno)
+ int ret;
+ struct drm_i915_gem_request *request;
+ struct drm_i915_private *dev_private = ring->dev->dev_private;
+
+ if (ring->outstanding_lazy_request)
return 0;
- if (ring->preallocated_lazy_request == NULL) {
- struct drm_i915_gem_request *request;
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
- request = kmalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
+ kref_init(&request->ref);
+ request->ring = ring;
+ request->uniq = dev_private->request_uniq++;
- ring->preallocated_lazy_request = request;
+ ret = i915_gem_get_seqno(ring->dev, &request->seqno);
+ if (ret) {
+ kfree(request);
+ return ret;
}
- return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
+ ring->outstanding_lazy_request = request;
+ return 0;
}
static int __intel_ring_prepare(struct intel_engine_cs *ring,
@@ -2084,7 +2112,7 @@ int intel_ring_begin(struct intel_engine_cs *ring,
return ret;
/* Preallocate the olr before touching the ring */
- ret = intel_ring_alloc_seqno(ring);
+ ret = intel_ring_alloc_request(ring);
if (ret)
return ret;
@@ -2119,7 +2147,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- BUG_ON(ring->outstanding_lazy_seqno);
+ BUG_ON(ring->outstanding_lazy_request);
if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -2341,7 +2369,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
}
}
- ring->init_context = intel_ring_workarounds_emit;
+ ring->init_context = intel_rcs_ctx_init;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
@@ -2426,7 +2454,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
- ring->init = init_render_ring;
+ ring->init_hw = init_render_ring;
ring->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
@@ -2448,7 +2476,17 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
- return intel_init_ring_buffer(dev, ring);
+ ret = intel_init_ring_buffer(dev, ring);
+ if (ret)
+ return ret;
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ ret = intel_init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
@@ -2519,7 +2557,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2558,7 +2596,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->semaphore.signal = gen8_xcs_signal;
GEN8_RING_SEMAPHORE_INIT;
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2615,7 +2653,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}
@@ -2666,7 +2704,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init = init_ring_common;
+ ring->init_hw = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}