diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cfg_space.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/execlist.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/opregion.c | 98 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/vgpu.c | 3 |
11 files changed, 144 insertions, 119 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 97bfc00d2a82..c62346fdc05d 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -119,16 +119,6 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) return 0; - if (map) { - vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz, - MEMREMAP_WC); - if (!vgpu->gm.aperture_va) - return -ENOMEM; - } else { - memunmap(vgpu->gm.aperture_va); - vgpu->gm.aperture_va = NULL; - } - val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); @@ -141,11 +131,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) aperture_pa >> PAGE_SHIFT, aperture_sz >> PAGE_SHIFT, map); - if (ret) { - memunmap(vgpu->gm.aperture_va); - vgpu->gm.aperture_va = NULL; + if (ret) return ret; - } vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; return 0; diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 769c1c24ae75..70494e394d2c 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -521,24 +521,23 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, _EL_OFFSET_STATUS_PTR); - ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); ctx_status_ptr.read_ptr = 0; ctx_status_ptr.write_ptr = 0x7; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } -static void clean_execlist(struct intel_vgpu *vgpu) +static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) { - enum intel_engine_id i; + unsigned int tmp; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; + struct intel_vgpu_submission *s = &vgpu->submission; - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - struct intel_vgpu_submission *s = &vgpu->submission; - - kfree(s->ring_scan_buffer[i]); - s->ring_scan_buffer[i] = NULL; - s->ring_scan_buffer_size[i] = 0; + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + kfree(s->ring_scan_buffer[engine->id]); + s->ring_scan_buffer[engine->id] = NULL; + s->ring_scan_buffer_size[engine->id] = 0; } } @@ -553,9 +552,10 @@ static void reset_execlist(struct intel_vgpu *vgpu, init_vgpu_execlist(vgpu, engine->id); } -static int init_execlist(struct intel_vgpu *vgpu) +static int init_execlist(struct intel_vgpu *vgpu, + unsigned long engine_mask) { - reset_execlist(vgpu, ALL_ENGINES); + reset_execlist(vgpu, engine_mask); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 7dc7a80213a8..c6197d990818 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -82,7 +82,6 @@ struct intel_gvt_device_info { struct intel_vgpu_gm { u64 aperture_sz; u64 hidden_sz; - void *aperture_va; struct drm_mm_node low_gm_node; struct drm_mm_node high_gm_node; }; @@ -127,7 +126,6 @@ struct intel_vgpu_irq { struct intel_vgpu_opregion { bool mapped; void *va; - void *va_gopregion; u32 gfn[INTEL_GVT_OPREGION_PAGES]; }; @@ -152,8 +150,8 @@ enum { struct intel_vgpu_submission_ops { const char *name; - int (*init)(struct intel_vgpu *vgpu); - void (*clean)(struct intel_vgpu *vgpu); + int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask); + void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask); void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 38f3b00d3a7a..9be639aa3b55 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct intel_vgpu_submission *s = &vgpu->submission; u32 data = *(u32 *)p_data; int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); bool enable_execlist; @@ -1523,11 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, if (!enable_execlist) return 0; - if (s->active) - return 0; - ret = intel_vgpu_select_submission_ops(vgpu, - INTEL_VGPU_EXECLIST_SUBMISSION); + ENGINE_MASK(ring_id), + INTEL_VGPU_EXECLIST_SUBMISSION); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index eb92572056c3..801a3375c7b4 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -651,6 +651,39 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, return ret; } +static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off) +{ + return off >= vgpu_aperture_offset(vgpu) && + off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu); +} + +static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off, + void *buf, unsigned long count, bool is_write) +{ + void *aperture_va; + + if (!intel_vgpu_in_aperture(vgpu, off) || + !intel_vgpu_in_aperture(vgpu, off + count)) { + gvt_vgpu_err("Invalid aperture offset %llu\n", off); + return -EINVAL; + } + + aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap, + ALIGN_DOWN(off, PAGE_SIZE), + count + offset_in_page(off)); + if (!aperture_va) + return -EIO; + + if (is_write) + memcpy(aperture_va + offset_in_page(off), buf, count); + else + memcpy(buf, aperture_va + offset_in_page(off), count); + + io_mapping_unmap(aperture_va); + + return 0; +} + static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, size_t count, loff_t *ppos, bool is_write) { @@ -679,8 +712,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, buf, count, is_write); break; case VFIO_PCI_BAR2_REGION_INDEX: - ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos, - buf, count, is_write); + ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write); break; case VFIO_PCI_BAR1_REGION_INDEX: case VFIO_PCI_BAR3_REGION_INDEX: diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 562b5ad857a4..5c869e3fdf3b 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) (reg >= gvt->device_info.gtt_start_offset \ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) -static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa) -{ - u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2); - u64 aperture_sz = vgpu_aperture_sz(vgpu); - - return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz; -} - -static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa, - void *pdata, unsigned int size, bool is_read) -{ - u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2); - u64 offset = gpa - aperture_gpa; - - if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) { - gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n", - offset, size); - return -EINVAL; - } - - if (!vgpu->gm.aperture_va) { - gvt_vgpu_err("BAR is not enabled\n"); - return -ENXIO; - } - - if (is_read) - memcpy(pdata, vgpu->gm.aperture_va + offset, size); - else - memcpy(vgpu->gm.aperture_va + offset, pdata, size); - return 0; -} - static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes, bool read) { @@ -144,11 +112,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, } mutex_lock(&gvt->lock); - if (vgpu_gpa_is_aperture(vgpu, pa)) { - ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true); - goto out; - } - offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) @@ -222,11 +185,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, mutex_lock(&gvt->lock); - if (vgpu_gpa_is_aperture(vgpu, pa)) { - ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false); - goto out; - } - offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 8420d1fc3ddb..fa75a2eead90 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa) { int i, ret = 0; - unsigned long pfn; gvt_dbg_core("emulate opregion from kernel\n"); switch (intel_gvt_host.hypervisor_type) { case INTEL_GVT_HYPERVISOR_KVM: - pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT); - vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT, - INTEL_GVT_OPREGION_SIZE, - MEMREMAP_WB); - if (!vgpu_opregion(vgpu)->va_gopregion) { - gvt_vgpu_err("failed to map guest opregion\n"); - ret = -EFAULT; - } - vgpu_opregion(vgpu)->mapped = true; + for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) + vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; break; case INTEL_GVT_HYPERVISOR_XEN: /** @@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (vgpu_opregion(vgpu)->mapped) map_vgpu_opregion(vgpu, false); } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { - if (vgpu_opregion(vgpu)->mapped) { - memunmap(vgpu_opregion(vgpu)->va_gopregion); - vgpu_opregion(vgpu)->va_gopregion = NULL; - } + /* Guest opregion is released by VFIO */ } free_pages((unsigned long)vgpu_opregion(vgpu)->va, get_order(INTEL_GVT_OPREGION_SIZE)); @@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic) */ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) { - u32 *scic, *parm; + u32 scic, parm; u32 func, subfunc; + u64 scic_pa = 0, parm_pa = 0; + int ret; switch (intel_gvt_host.hypervisor_type) { case INTEL_GVT_HYPERVISOR_XEN: - scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC; - parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; + scic = *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_SCIC); + parm = *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_PARM); break; case INTEL_GVT_HYPERVISOR_KVM: - scic = vgpu_opregion(vgpu)->va_gopregion + - INTEL_GVT_OPREGION_SCIC; - parm = vgpu_opregion(vgpu)->va_gopregion + - INTEL_GVT_OPREGION_PARM; + scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_SCIC; + parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_PARM; + + ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, + &scic, sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + + ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, + &parm, sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + break; default: gvt_vgpu_err("not supported hypervisor\n"); @@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) return 0; } - func = GVT_OPREGION_FUNC(*scic); - subfunc = GVT_OPREGION_SUBFUNC(*scic); - if (!querying_capabilities(*scic)) { + func = GVT_OPREGION_FUNC(scic); + subfunc = GVT_OPREGION_SUBFUNC(scic); + if (!querying_capabilities(scic)) { gvt_vgpu_err("requesting runtime service: func \"%s\"," " subfunc \"%s\"\n", opregion_func_name(func), @@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) * emulate exit status of function call, '0' means * "failure, generic, unsupported or unknown cause" */ - *scic &= ~OPREGION_SCIC_EXIT_MASK; - return 0; + scic &= ~OPREGION_SCIC_EXIT_MASK; + goto out; + } + + scic = 0; + parm = 0; + +out: + switch (intel_gvt_host.hypervisor_type) { + case INTEL_GVT_HYPERVISOR_XEN: + *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_SCIC) = scic; + *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_PARM) = parm; + break; + case INTEL_GVT_HYPERVISOR_KVM: + ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, + &scic, sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + + ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, + &parm, sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + + break; + default: + gvt_vgpu_err("not supported hypervisor\n"); + return -EINVAL; } - *scic = 0; - *parm = 0; return 0; } diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index d031f6486ce3..cc1ce361cd76 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) struct vgpu_sched_data { struct list_head lru_list; struct intel_vgpu *vgpu; + bool active; ktime_t sched_in_time; ktime_t sched_out_time; @@ -332,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), sched_data->period), HRTIMER_MODE_ABS); + vgpu_data->active = true; } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) @@ -339,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) struct vgpu_sched_data *vgpu_data = vgpu->sched_data; list_del_init(&vgpu_data->lru_list); + vgpu_data->active = false; } static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { @@ -374,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) { - gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); + if (!vgpu_data->active) { + gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); + vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); + } } void intel_gvt_kick_schedule(struct intel_gvt *gvt) @@ -389,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; int ring_id; + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + + if (!vgpu_data->active) + return; gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 0056638b0c16..b55b3580ca1d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -991,7 +991,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) { struct intel_vgpu_submission *s = &vgpu->submission; - intel_vgpu_select_submission_ops(vgpu, 0); + intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); i915_gem_context_put(s->shadow_ctx); kmem_cache_destroy(s->workloads); } @@ -1079,6 +1079,7 @@ out_shadow_ctx: * */ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, + unsigned long engine_mask, unsigned int interface) { struct intel_vgpu_submission *s = &vgpu->submission; @@ -1091,21 +1092,21 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, if (WARN_ON(interface >= ARRAY_SIZE(ops))) return -EINVAL; - if (s->active) { - s->ops->clean(vgpu); - s->active = false; - gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n", - vgpu->id, s->ops->name); - } + if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES)) + return -EINVAL; + + if (s->active) + s->ops->clean(vgpu, engine_mask); if (interface == 0) { s->ops = NULL; s->virtual_submission_interface = 0; - gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id); + s->active = false; + gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id); return 0; } - ret = ops[interface]->init(vgpu); + ret = ops[interface]->init(vgpu, engine_mask); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3de77dfa7c59..ff175a98b19e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -141,6 +141,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, + unsigned long engine_mask, unsigned int interface); extern const struct intel_vgpu_submission_ops diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index a8784fa91289..b87b19d8443c 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -520,8 +520,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_submission(vgpu, resetting_eng); /* full GPU reset or device model level reset */ if (engine_mask == ALL_ENGINES || dmlr) { - intel_vgpu_select_submission_ops(vgpu, 0); - + intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); /*fence will not be reset during virtual reset */ if (dmlr) { intel_vgpu_reset_gtt(vgpu); |