summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_lrc.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-01-28 19:18:11 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-01-28 20:07:09 +0100
commit5013eb8cd601c31e6d7d1b9d3291b24e933b77b2 (patch)
tree1078ed4c54d8457b3768ee71422970144c05c053 /drivers/gpu/drm/i915/intel_lrc.c
parentdrm/i915: Share per-timeline HWSP using a slab suballocator (diff)
downloadlinux-5013eb8cd601c31e6d7d1b9d3291b24e933b77b2.tar.xz
linux-5013eb8cd601c31e6d7d1b9d3291b24e933b77b2.zip
drm/i915: Track the context's seqno in its own timeline HWSP
Now that we have allocated ourselves a cacheline to store a breadcrumb, we can emit a write from the GPU into the timeline's HWSP of the per-context seqno as we complete each request. This drops the mirroring of the per-engine HWSP and allows each context to operate independently. We do not need to unwind the per-context timeline, and so requests are always consistent with the timeline breadcrumb, greatly simplifying the completion checks as we no longer need to be concerned about the global_seqno changing mid check. One complication though is that we have to be wary that the request may outlive the HWSP and so avoid touching the potentially danging pointer after we have retired the fence. We also have to guard our access of the HWSP with RCU, the release of the obj->mm.pages should already be RCU-safe. At this point, we are emitting both per-context and global seqno and still using the single per-engine execution timeline for resolving interrupts. v2: s/fake_complete/mark_complete/ Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190128181812.22804-5-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c31
1 files changed, 20 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e388f37743a2..fdbb3fe8eac9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -832,10 +832,10 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (i915_request_signaled(rq))
- continue;
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
- dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
}
/* Flush the queued requests to the timeline list (for retiring). */
@@ -845,9 +845,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
priolist_for_each_request_consume(rq, rn, p, i) {
list_del_init(&rq->sched.link);
-
- dma_fence_set_error(&rq->fence, -EIO);
__i915_request_submit(rq);
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -2044,10 +2044,17 @@ static u32 *gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- cs = gen8_emit_ggtt_write(cs, request->global_seqno,
+ cs = gen8_emit_ggtt_write(cs,
+ request->fence.seqno,
+ request->timeline->hwsp_offset);
+
+ cs = gen8_emit_ggtt_write(cs,
+ request->global_seqno,
intel_hws_seqno_address(request->engine));
+
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
@@ -2056,18 +2063,20 @@ static u32 *gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
static u32 *gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
{
- /* We're using qword write, seqno should be aligned to 8 bytes. */
- BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
-
cs = gen8_emit_ggtt_write_rcs(cs,
- request->global_seqno,
- intel_hws_seqno_address(request->engine),
+ request->fence.seqno,
+ request->timeline->hwsp_offset,
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL);
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->global_seqno,
+ intel_hws_seqno_address(request->engine),
+ PIPE_CONTROL_CS_STALL);
+
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;