From 15e7f52a4596b496ce3da2fa4c1f94c6fb0023f2 Mon Sep 17 00:00:00 2001 From: Xiaolin Zhang Date: Mon, 3 Jun 2019 10:55:53 +0800 Subject: drm/i915/gvt: save RING_HEAD into vreg when vgpu switched out Save RING_HEAD into vgpu reg when vgpu switched out and report it's value back to guest. v6: addressed comment for ring head wrap count support. (Zhenyu) v5: ring head wrap count support. v4: updated HEAD/TAIL with guest value, not host value. (Yan Zhao) v3: save RING HEAD/TAIL vgpu reg in save_ring_hw_state. (Zhenyu Wang) v2: save RING_TAIL as well during vgpu mmio switch to meet ring_is_idle condition. (Fred Gao) v1: based on input from Weinan. (Weinan Li) [zhenyuw: Include this fix for possible future guest kernel that would utilize RING_HEAD for hangcheck.] Reviewed-by: Zhenyu Wang Signed-off-by: Xiaolin Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/reg.h | 2 ++ drivers/gpu/drm/i915/gvt/scheduler.c | 25 +++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/scheduler.h | 1 + 3 files changed, 28 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 33aaa14bfdde..5b66e14c5b7b 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -102,6 +102,8 @@ #define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88 #define FORCEWAKE_ACK_HSW_REG 0x130044 +#define RB_HEAD_WRAP_CNT_MAX ((1 << 11) - 1) +#define RB_HEAD_WRAP_CNT_OFF 21 #define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2)) #define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3)) #define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12)) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 13632dba8b2a..0f919f0a43d4 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -812,10 +812,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload) void *src; unsigned long context_gpa, context_page_num; int i; + struct drm_i915_private *dev_priv = gvt->dev_priv; + u32 ring_base; + u32 head, tail; + u16 wrap_count; gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, workload->ctx_desc.lrca); + head = workload->rb_head; + tail = workload->rb_tail; + wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF; + + if (tail < head) { + if (wrap_count == RB_HEAD_WRAP_CNT_MAX) + wrap_count = 0; + else + wrap_count += 1; + } + + head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail; + + ring_base = dev_priv->engine[workload->ring_id]->mmio_base; + vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail; + vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head; + context_page_num = rq->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; @@ -1415,6 +1436,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u64 ring_context_gpa; u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; + u32 guest_head; int ret; ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, @@ -1430,6 +1452,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ring_tail.val), &tail, 4); + guest_head = head; + head &= RB_HEAD_OFF_MASK; tail &= RB_TAIL_OFF_MASK; @@ -1462,6 +1486,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, workload->ctx_desc = *desc; workload->ring_context_gpa = ring_context_gpa; workload->rb_head = head; + workload->guest_rb_head = guest_head; workload->rb_tail = tail; workload->rb_start = start; workload->rb_ctl = ctl; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 90c6756f5453..c50d14a9ce85 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -100,6 +100,7 @@ struct intel_vgpu_workload { struct execlist_ctx_descriptor_format ctx_desc; struct execlist_ring_context *ring_context; unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len; + unsigned long guest_rb_head; bool restore_inhibit; struct intel_vgpu_elsp_dwords elsp_dwords; bool emulate_schedule_in; -- cgit v1.2.3