diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 55 |
1 files changed, 16 insertions, 39 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 0fb1df71c637..3c3b9842bbbd 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -348,7 +348,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) u32 *cs; int err; - if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context)) + if (IS_GEN(req->engine->i915, 9) && is_inhibit_context(req->context)) intel_vgpu_restore_inhibit_context(vgpu, req); /* @@ -509,26 +509,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) bb->bb_start_cmd_va = workload->shadow_ring_buffer_va + bb->bb_offset; - if (bb->ppgtt) { - /* for non-priv bb, scan&shadow is only for - * debugging purpose, so the content of shadow bb - * is the same as original bb. Therefore, - * here, rather than switch to shadow bb's gma - * address, we directly use original batch buffer's - * gma address, and send original bb to hardware - * directly - */ - if (bb->clflush & CLFLUSH_AFTER) { - drm_clflush_virt_range(bb->va, - bb->obj->base.size); - bb->clflush &= ~CLFLUSH_AFTER; - } - i915_gem_object_finish_access(bb->obj); - bb->accessing = false; - - } else { + /* + * For non-priv bb, scan&shadow is only for + * debugging purpose, so the content of shadow bb + * is the same as original bb. Therefore, + * here, rather than switch to shadow bb's gma + * address, we directly use original batch buffer's + * gma address, and send original bb to hardware + * directly + */ + if (!bb->ppgtt) { bb->vma = i915_gem_object_ggtt_pin(bb->obj, - NULL, 0, 0, 0); + NULL, 0, 0, 0); if (IS_ERR(bb->vma)) { ret = PTR_ERR(bb->vma); goto err; @@ -539,27 +531,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) if (gmadr_bytes == 8) bb->bb_start_cmd_va[2] = 0; - /* No one is going to touch shadow bb from now on. */ - if (bb->clflush & CLFLUSH_AFTER) { - drm_clflush_virt_range(bb->va, - bb->obj->base.size); - bb->clflush &= ~CLFLUSH_AFTER; - } - - ret = i915_gem_object_set_to_gtt_domain(bb->obj, - false); - if (ret) - goto err; - ret = i915_vma_move_to_active(bb->vma, workload->req, 0); if (ret) goto err; - - i915_gem_object_finish_access(bb->obj); - bb->accessing = false; } + + /* No one is going to touch shadow bb from now on. */ + i915_gem_object_flush_map(bb->obj); } return 0; err: @@ -630,9 +610,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { if (bb->obj) { - if (bb->accessing) - i915_gem_object_finish_access(bb->obj); - if (bb->va && !IS_ERR(bb->va)) i915_gem_object_unpin_map(bb->obj); @@ -939,7 +916,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) context_page_num = rq->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0) + if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0) context_page_num = 19; context_base = (void *) ctx->lrc_reg_state - |