diff options
author | Dave Airlie <airlied@redhat.com> | 2021-12-10 06:35:20 +0100 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2021-12-10 06:35:20 +0100 |
commit | 211b4dbc070090b4183d6f9db7dd3bd4e6170447 (patch) | |
tree | a1751e7ee3411672af2efadac635984c719e3042 /drivers/gpu/drm/i915/gt | |
parent | Merge tag 'drm-misc-next-2021-12-09' of git://anongit.freedesktop.org/drm/drm... (diff) | |
parent | drm/i915/gt: Use hw_engine_masks as reset_domains (diff) | |
download | linux-211b4dbc070090b4183d6f9db7dd3bd4e6170447.tar.xz linux-211b4dbc070090b4183d6f9db7dd3bd4e6170447.zip |
Merge tag 'drm-intel-gt-next-2021-12-09' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Core Changes:
- Fix PENDING_ERROR leak in dma_fence_array_signaled() (Thomas Hellström)
Driver Changes:
- Fix runtime PM handling during PXP suspend (Tejas Upadhyay)
- Improve eviction performance on discrete by implementing async TTM moves (Thomas Hellström, Maarten Lankhorst)
- Improve robustness of error capture under memory pressure (Thomas Hellström)
- Fix GuC PMU versus GPU reset handling (Umesh Nerlige Ramappa)
- Use per device iommu check (Tvrtko Ursulin)
- Make error capture work with async migration (Thomas Hellström)
- Revert incorrect implementation of Wa_1508744258 causing hangs (José Roberto de Souza)
- Disable coarse power gating on some DG2 steppings workaround (Matt Roper)
- Add IC cache invalidation workaround on DG2 (Ramalingam C)
- Move two Icelake workarounds to the right place (Raviteja Goud Talla)
- Fix error pointer dereference in i915_gem_do_execbuffer() (Dan Carpenter)
- Fixup a couple of generic and DG2 specific issues in migration code (Matthew Auld)
- Fix kernel-doc warnings in i915_drm_object.c (Randy Dunlap)
- Drop stealing of bits from i915_sw_fence function pointer (Matthew Brost)
- Introduce new macros for i915 PTE (Michael Cheng)
- Prep work for engine reset by reset domain lookup (Tejas Upadhyay)
- Fixup drm-intel-gt-next build failure (Matthew Auld)
- Fix live_engine_busy_stats selftests in GuC mode (Umesh Nerlige Ramappa)
- Remove dma_resv_prune (Maarten Lankhorst)
- Preserve huge pages enablement after driver reload (Matthew Auld)
- Fix a NULL pointer dereference in igt_request_rewind() (selftests) (Zhou Qingyang)
- Add workaround numbers to GEN7_COMMON_SLICE_CHICKEN1 whitelisting (José Roberto de Souza)
- Increase timeouts in i915_gem_contexts selftests to handle GuC being slower (Bruce Chang)
Signed-off-by: Dave Airlie <airlied@redhat.com>
# Conflicts:
# drivers/gpu/drm/i915/display/intel_fbc.c
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YbIBOeqhn+nPzaYD@tursulin-mobl2
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
37 files changed, 1418 insertions, 297 deletions
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 890191f286e3..4a166d25fe60 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -185,7 +185,6 @@ static void gen6_alloc_va_range(struct i915_address_space *vm, pt = stash->pt[0]; __i915_gem_object_pin_pages(pt->base); - i915_gem_object_make_unshrinkable(pt->base); fill32_px(pt, vm->scratch[0]->encode); @@ -262,13 +261,10 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - __i915_vma_put(ppgtt->vma); - gen6_ppgtt_free_pd(ppgtt); free_scratch(vm); mutex_destroy(&ppgtt->flush); - mutex_destroy(&ppgtt->pin_mutex); free_pd(&ppgtt->base.vm, ppgtt->base.pd); } @@ -331,37 +327,6 @@ static const struct i915_vma_ops pd_vma_ops = { .unbind_vma = pd_vma_unbind, }; -static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) -{ - struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; - struct i915_vma *vma; - - GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); - GEM_BUG_ON(size > ggtt->vm.total); - - vma = i915_vma_alloc(); - if (!vma) - return ERR_PTR(-ENOMEM); - - i915_active_init(&vma->active, NULL, NULL, 0); - - kref_init(&vma->ref); - mutex_init(&vma->pages_mutex); - vma->vm = i915_vm_get(&ggtt->vm); - vma->ops = &pd_vma_ops; - vma->private = ppgtt; - - vma->size = size; - vma->fence_size = size; - atomic_set(&vma->flags, I915_VMA_GGTT); - vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ - - INIT_LIST_HEAD(&vma->obj_link); - INIT_LIST_HEAD(&vma->closed_link); - - return vma; -} - int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); @@ -378,42 +343,92 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) return 0; - if (mutex_lock_interruptible(&ppgtt->pin_mutex)) - return -EINTR; + /* grab the ppgtt resv to pin the object */ + err = i915_vm_lock_objects(&ppgtt->base.vm, ww); + if (err) + return err; /* * PPGTT PDEs reside in the GGTT and consists of 512 entries. The * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - err = 0; - if (!atomic_read(&ppgtt->pin_count)) + if (!atomic_read(&ppgtt->pin_count)) { err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH); + + GEM_BUG_ON(ppgtt->vma->fence); + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma)); + } if (!err) atomic_inc(&ppgtt->pin_count); - mutex_unlock(&ppgtt->pin_mutex); return err; } -void gen6_ppgtt_unpin(struct i915_ppgtt *base) +static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj) { - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + obj->mm.pages = ZERO_SIZE_PTR; + return 0; +} - GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); - if (atomic_dec_and_test(&ppgtt->pin_count)) - i915_vma_unpin(ppgtt->vma); +static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ } -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) +static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = { + .name = "pd_dummy_obj", + .get_pages = pd_dummy_obj_get_pages, + .put_pages = pd_dummy_obj_put_pages, +}; + +static struct i915_page_directory * +gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt) { - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt; + struct i915_page_directory *pd; + int err; - if (!atomic_read(&ppgtt->pin_count)) - return; + pd = __alloc_pd(I915_PDES); + if (unlikely(!pd)) + return ERR_PTR(-ENOMEM); - i915_vma_unpin(ppgtt->vma); - atomic_set(&ppgtt->pin_count, 0); + pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915, + &pd_dummy_obj_ops, + I915_PDES * SZ_4K); + if (IS_ERR(pd->pt.base)) { + err = PTR_ERR(pd->pt.base); + pd->pt.base = NULL; + goto err_pd; + } + + pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm); + pd->pt.base->shares_resv_from = &ppgtt->base.vm; + + ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL); + if (IS_ERR(ppgtt->vma)) { + err = PTR_ERR(ppgtt->vma); + ppgtt->vma = NULL; + goto err_pd; + } + + /* The dummy object we create is special, override ops.. */ + ppgtt->vma->ops = &pd_vma_ops; + ppgtt->vma->private = ppgtt; + return pd; + +err_pd: + free_pd(&ppgtt->base.vm, pd); + return ERR_PTR(err); +} + +void gen6_ppgtt_unpin(struct i915_ppgtt *base) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + + GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); + if (atomic_dec_and_test(&ppgtt->pin_count)) + i915_vma_unpin(ppgtt->vma); } struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) @@ -427,7 +442,6 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) return ERR_PTR(-ENOMEM); mutex_init(&ppgtt->flush); - mutex_init(&ppgtt->pin_mutex); ppgtt_init(&ppgtt->base, gt, 0); ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t)); @@ -442,19 +456,13 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma; ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; - ppgtt->base.pd = __alloc_pd(I915_PDES); - if (!ppgtt->base.pd) { - err = -ENOMEM; - goto err_free; - } - err = gen6_ppgtt_init_scratch(ppgtt); if (err) - goto err_pd; + goto err_free; - ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); - if (IS_ERR(ppgtt->vma)) { - err = PTR_ERR(ppgtt->vma); + ppgtt->base.pd = gen6_alloc_top_pd(ppgtt); + if (IS_ERR(ppgtt->base.pd)) { + err = PTR_ERR(ppgtt->base.pd); goto err_scratch; } @@ -462,10 +470,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) err_scratch: free_scratch(&ppgtt->base.vm); -err_pd: - free_pd(&ppgtt->base.vm, ppgtt->base.pd); err_free: - mutex_destroy(&ppgtt->pin_mutex); kfree(ppgtt); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h index 6a61a5c3a85a..5e5cf2ec3309 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h @@ -19,7 +19,6 @@ struct gen6_ppgtt { u32 pp_dir; atomic_t pin_count; - struct mutex pin_mutex; bool scan_for_unused_pt; }; @@ -71,7 +70,6 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww); void gen6_ppgtt_unpin(struct i915_ppgtt *base); -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); void gen6_ppgtt_enable(struct intel_gt *gt); void gen7_ppgtt_enable(struct intel_gt *gt); struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 461844dffd7e..e320610dd0b8 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -42,7 +42,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) vf_flush_wa = true; /* WaForGAMHang:kbl */ - if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0)) dc_flush_wa = true; } diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 037a9a6e4889..95c02096a61b 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -18,7 +18,7 @@ static u64 gen8_pde_encode(const dma_addr_t addr, const enum i915_cache_level level) { - u64 pde = addr | _PAGE_PRESENT | _PAGE_RW; + u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (level != I915_CACHE_NONE) pde |= PPAT_CACHED_PDE; @@ -32,10 +32,10 @@ static u64 gen8_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) { - gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; + gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (unlikely(flags & PTE_READ_ONLY)) - pte &= ~_PAGE_RW; + pte &= ~GEN8_PAGE_RW; if (flags & PTE_LM) pte |= GEN12_PPGTT_PTE_LM; @@ -301,7 +301,6 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm, pt = stash->pt[!!lvl]; __i915_gem_object_pin_pages(pt->base); - i915_gem_object_make_unshrinkable(pt->base); fill_px(pt, vm->scratch[lvl]->encode); @@ -652,7 +651,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) vm->scratch[0]->encode = gen8_pte_encode(px_dma(vm->scratch[0]), - I915_CACHE_LLC, pte_flags); + I915_CACHE_NONE, pte_flags); for (i = 1; i <= vm->top; i++) { struct drm_i915_gem_object *obj; @@ -668,7 +667,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) } fill_px(obj, vm->scratch[i - 1]->encode); - obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC); + obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE); vm->scratch[i] = obj; } diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 5634d14052bc..ba083d800a08 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -219,7 +219,7 @@ int __intel_context_do_pin_ww(struct intel_context *ce, */ err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); - if (!err && ce->ring->vma->obj) + if (!err) err = i915_gem_object_lock(ce->ring->vma->obj, ww); if (!err && ce->state) err = i915_gem_object_lock(ce->state->obj, ww); @@ -228,17 +228,17 @@ int __intel_context_do_pin_ww(struct intel_context *ce, if (err) return err; - err = i915_active_acquire(&ce->active); + err = ce->ops->pre_pin(ce, ww, &vaddr); if (err) goto err_ctx_unpin; - err = ce->ops->pre_pin(ce, ww, &vaddr); + err = i915_active_acquire(&ce->active); if (err) - goto err_release; + goto err_post_unpin; err = mutex_lock_interruptible(&ce->pin_mutex); if (err) - goto err_post_unpin; + goto err_release; intel_engine_pm_might_get(ce->engine); @@ -273,11 +273,11 @@ int __intel_context_do_pin_ww(struct intel_context *ce, err_unlock: mutex_unlock(&ce->pin_mutex); +err_release: + i915_active_release(&ce->active); err_post_unpin: if (!handoff) ce->ops->post_unpin(ce); -err_release: - i915_active_release(&ce->active); err_ctx_unpin: intel_context_post_unpin(ce); @@ -364,7 +364,7 @@ static int __intel_context_active(struct i915_active *active) return 0; } -static int __i915_sw_fence_call +static int sw_fence_dummy_notify(struct i915_sw_fence *sf, enum i915_sw_fence_notify state) { diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index ff6753ccb129..352254e001b4 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -325,6 +325,38 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, engine->id = id; engine->legacy_idx = INVALID_ENGINE; engine->mask = BIT(id); + if (GRAPHICS_VER(gt->i915) >= 11) { + static const u32 engine_reset_domains[] = { + [RCS0] = GEN11_GRDOM_RENDER, + [BCS0] = GEN11_GRDOM_BLT, + [VCS0] = GEN11_GRDOM_MEDIA, + [VCS1] = GEN11_GRDOM_MEDIA2, + [VCS2] = GEN11_GRDOM_MEDIA3, + [VCS3] = GEN11_GRDOM_MEDIA4, + [VCS4] = GEN11_GRDOM_MEDIA5, + [VCS5] = GEN11_GRDOM_MEDIA6, + [VCS6] = GEN11_GRDOM_MEDIA7, + [VCS7] = GEN11_GRDOM_MEDIA8, + [VECS0] = GEN11_GRDOM_VECS, + [VECS1] = GEN11_GRDOM_VECS2, + [VECS2] = GEN11_GRDOM_VECS3, + [VECS3] = GEN11_GRDOM_VECS4, + }; + GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || + !engine_reset_domains[id]); + engine->reset_domain = engine_reset_domains[id]; + } else { + static const u32 engine_reset_domains[] = { + [RCS0] = GEN6_GRDOM_RENDER, + [BCS0] = GEN6_GRDOM_BLT, + [VCS0] = GEN6_GRDOM_MEDIA, + [VCS1] = GEN8_GRDOM_MEDIA2, + [VECS0] = GEN6_GRDOM_VECS, + }; + GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || + !engine_reset_domains[id]); + engine->reset_domain = engine_reset_domains[id]; + } engine->i915 = i915; engine->gt = gt; engine->uncore = gt->uncore; @@ -363,7 +395,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, DRIVER_CAPS(i915)->has_logical_contexts = true; ewma__engine_latency_init(&engine->latency); - seqcount_init(&engine->stats.lock); + seqcount_init(&engine->stats.execlists.lock); ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); @@ -1676,14 +1708,18 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, static void print_request_ring(struct drm_printer *m, struct i915_request *rq) { + struct i915_vma_snapshot *vsnap = &rq->batch_snapshot; void *ring; int size; + if (!i915_vma_snapshot_present(vsnap)) + vsnap = NULL; + drm_printf(m, "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n", rq->head, rq->postfix, rq->tail, - rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, - rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); + vsnap ? upper_32_bits(vsnap->gtt_offset) : ~0u, + vsnap ? lower_32_bits(vsnap->gtt_offset) : ~0u); size = rq->tail - rq->head; if (rq->tail < rq->head) @@ -1915,22 +1951,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, intel_engine_print_breadcrumbs(engine, m); } -static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine, - ktime_t *now) -{ - ktime_t total = engine->stats.total; - - /* - * If the engine is executing something at the moment - * add it to the total. - */ - *now = ktime_get(); - if (READ_ONCE(engine->stats.active)) - total = ktime_add(total, ktime_sub(*now, engine->stats.start)); - - return total; -} - /** * intel_engine_get_busy_time() - Return current accumulated engine busyness * @engine: engine to report on @@ -1940,15 +1960,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine, */ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) { - unsigned int seq; - ktime_t total; - - do { - seq = read_seqcount_begin(&engine->stats.lock); - total = __intel_engine_get_busy_time(engine, now); - } while (read_seqcount_retry(&engine->stats.lock, seq)); - - return total; + return engine->busyness(engine, now); } struct intel_context * diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h index 24fbdd94351a..8e762d683e50 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_stats.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h @@ -15,45 +15,46 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine) { + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; unsigned long flags; - if (engine->stats.active) { - engine->stats.active++; + if (stats->active) { + stats->active++; return; } /* The writer is serialised; but the pmu reader may be from hardirq */ local_irq_save(flags); - write_seqcount_begin(&engine->stats.lock); + write_seqcount_begin(&stats->lock); - engine->stats.start = ktime_get(); - engine->stats.active++; + stats->start = ktime_get(); + stats->active++; - write_seqcount_end(&engine->stats.lock); + write_seqcount_end(&stats->lock); local_irq_restore(flags); - GEM_BUG_ON(!engine->stats.active); + GEM_BUG_ON(!stats->active); } static inline void intel_engine_context_out(struct intel_engine_cs *engine) { + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; unsigned long flags; - GEM_BUG_ON(!engine->stats.active); - if (engine->stats.active > 1) { - engine->stats.active--; + GEM_BUG_ON(!stats->active); + if (stats->active > 1) { + stats->active--; return; } local_irq_save(flags); - write_seqcount_begin(&engine->stats.lock); + write_seqcount_begin(&stats->lock); - engine->stats.active--; - engine->stats.total = - ktime_add(engine->stats.total, - ktime_sub(ktime_get(), engine->stats.start)); + stats->active--; + stats->total = ktime_add(stats->total, + ktime_sub(ktime_get(), stats->start)); - write_seqcount_end(&engine->stats.lock); + write_seqcount_end(&stats->lock); local_irq_restore(flags); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index e0f773585c29..36365bdbe1ee 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -257,6 +257,55 @@ struct intel_engine_execlists { #define INTEL_ENGINE_CS_MAX_NAME 8 +struct intel_engine_execlists_stats { + /** + * @active: Number of contexts currently scheduled in. + */ + unsigned int active; + + /** + * @lock: Lock protecting the below fields. + */ + seqcount_t lock; + + /** + * @total: Total time this engine was busy. + * + * Accumulated time not counting the most recent block in cases where + * engine is currently busy (active > 0). + */ + ktime_t total; + + /** + * @start: Timestamp of the last idle to active transition. + * + * Idle is defined as active == 0, active is active > 0. + */ + ktime_t start; +}; + +struct intel_engine_guc_stats { + /** + * @running: Active state of the engine when busyness was last sampled. + */ + bool running; + + /** + * @prev_total: Previous value of total runtime clock cycles. + */ + u32 prev_total; + + /** + * @total_gt_clks: Total gt clock cycles this engine was busy. + */ + u64 total_gt_clks; + + /** + * @start_gt_clk: GT clock time of last idle to active transition. + */ + u64 start_gt_clk; +}; + struct intel_engine_cs { struct drm_i915_private *i915; struct intel_gt *gt; @@ -269,6 +318,7 @@ struct intel_engine_cs { unsigned int guc_id; intel_engine_mask_t mask; + u32 reset_domain; /** * @logical_mask: logical mask of engine, reported to user space via * query IOCTL and used to communicate with the GuC in logical space. @@ -439,6 +489,12 @@ struct intel_engine_cs { void (*add_active_request)(struct i915_request *rq); void (*remove_active_request)(struct i915_request *rq); + /* + * Get engine busyness and the time at which the busyness was sampled. + */ + ktime_t (*busyness)(struct intel_engine_cs *engine, + ktime_t *now); + struct intel_engine_execlists execlists; /* @@ -488,30 +544,10 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); struct { - /** - * @active: Number of contexts currently scheduled in. - */ - unsigned int active; - - /** - * @lock: Lock protecting the below fields. - */ - seqcount_t lock; - - /** - * @total: Total time this engine was busy. - * - * Accumulated time not counting the most recent block in cases - * where engine is currently busy (active > 0). - */ - ktime_t total; - - /** - * @start: Timestamp of the last idle to active transition. - * - * Idle is defined as active == 0, active is active > 0. - */ - ktime_t start; + union { + struct intel_engine_execlists_stats execlists; + struct intel_engine_guc_stats guc; + }; /** * @rps: Utilisation at last RPS sampling. diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index bedb80057046..a69df5e9e77a 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -2186,7 +2186,8 @@ struct execlists_capture { static void execlists_capture_work(struct work_struct *work) { struct execlists_capture *cap = container_of(work, typeof(*cap), work); - const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + const gfp_t gfp = __GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | + __GFP_NOWARN; struct intel_engine_cs *engine = cap->rq->engine; struct intel_gt_coredump *gt = cap->error->gt; struct intel_engine_capture_vma *vma; @@ -3293,6 +3294,38 @@ static void execlists_release(struct intel_engine_cs *engine) lrc_fini_wa_ctx(engine); } +static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine, + ktime_t *now) +{ + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; + ktime_t total = stats->total; + + /* + * If the engine is executing something at the moment + * add it to the total. + */ + *now = ktime_get(); + if (READ_ONCE(stats->active)) + total = ktime_add(total, ktime_sub(*now, stats->start)); + + return total; +} + +static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine, + ktime_t *now) +{ + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; + unsigned int seq; + ktime_t total; + + do { + seq = read_seqcount_begin(&stats->lock); + total = __execlists_engine_busyness(engine, now); + } while (read_seqcount_retry(&stats->lock, seq)); + + return total; +} + static void logical_ring_default_vfuncs(struct intel_engine_cs *engine) { @@ -3349,6 +3382,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->emit_bb_start = gen8_emit_bb_start; else engine->emit_bb_start = gen8_emit_bb_start_noarb; + + engine->busyness = execlists_engine_busyness; } static void logical_ring_default_irqs(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 555111c3bee5..cbc6d2b1fd9e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -106,7 +106,7 @@ static bool needs_idle_maps(struct drm_i915_private *i915) * Query intel_iommu to see if we need the workaround. Presumably that * was loaded first. */ - if (!intel_vtd_active()) + if (!intel_vtd_active(i915)) return false; if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915)) @@ -209,7 +209,7 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) { - gen8_pte_t pte = addr | _PAGE_PRESENT; + gen8_pte_t pte = addr | GEN8_PAGE_PRESENT; if (flags & PTE_LM) pte |= GEN12_GGTT_PTE_LM; @@ -1233,7 +1233,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) if (ret) return ret; - if (intel_vtd_active()) + if (intel_vtd_active(i915)) drm_info(&i915->drm, "VT-d active for gfx access\n"); return 0; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 524eaf678790..c0fa41e4c803 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -86,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf) intel_rc6_unpark(>->rc6); intel_rps_unpark(>->rps); i915_pmu_gt_unparked(i915); + intel_guc_busyness_unpark(gt); intel_gt_unpark_requests(gt); runtime_begin(gt); @@ -104,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf) runtime_end(gt); intel_gt_park_requests(gt); + intel_guc_busyness_park(gt); i915_vma_parked(gt); i915_pmu_gt_parked(i915); intel_rps_park(>->rps); @@ -301,7 +303,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt) user_forcewake(gt, true); wait_for_suspend(gt); - intel_pxp_suspend(>->pxp, false); + intel_pxp_suspend_prepare(>->pxp); } static suspend_state_t pm_suspend_target(void) @@ -326,6 +328,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) GEM_BUG_ON(gt->awake); intel_uc_suspend(>->uc); + intel_pxp_suspend(>->pxp); /* * On disabling the device, we want to turn off HW access to memory @@ -353,7 +356,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) void intel_gt_runtime_suspend(struct intel_gt *gt) { - intel_pxp_suspend(>->pxp, true); + intel_pxp_runtime_suspend(>->pxp); intel_uc_runtime_suspend(>->uc); GT_TRACE(gt, "\n"); @@ -371,7 +374,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt) if (ret) return ret; - intel_pxp_resume(>->pxp); + intel_pxp_runtime_resume(>->pxp); return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 67d14afa6623..0dd254cb1f69 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -7,6 +7,8 @@ #include <linux/fault-inject.h> +#include <drm/drm_cache.h> + #include "gem/i915_gem_lmem.h" #include "i915_trace.h" #include "intel_gt.h" @@ -273,6 +275,7 @@ static void poison_scratch_page(struct drm_i915_gem_object *scratch) val = POISON_FREE; memset(vaddr, val, scratch->base.size); + drm_clflush_virt_range(vaddr, scratch->base.size); } int setup_scratch_page(struct i915_address_space *vm) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index dfeaef680aac..51afe66d00f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -135,6 +135,9 @@ typedef u64 gen8_pte_t; #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) +#define GEN8_PAGE_PRESENT BIT_ULL(0) +#define GEN8_PAGE_RW BIT_ULL(1) + #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 56156cf18c41..b3489599e4de 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1167,6 +1167,11 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) cs = gen12_emit_cmd_buf_wa(ce, cs); cs = gen12_emit_restore_scratch(ce, cs); + /* Wa_16013000631:dg2 */ + if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_G11(ce->engine->i915)) + cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); + return cs; } diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c index afb1cce9a352..19a01878fee3 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.c +++ b/drivers/gpu/drm/i915/gt/intel_migrate.c @@ -13,7 +13,6 @@ struct insert_pte_data { u64 offset; - bool is_lmem; }; #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */ @@ -40,7 +39,7 @@ static void insert_pte(struct i915_address_space *vm, struct insert_pte_data *d = data; vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, - d->is_lmem ? PTE_LM : 0); + i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0); d->offset += PAGE_SIZE; } @@ -134,8 +133,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt) goto err_vm; /* Now allow the GPU to rewrite the PTE via its own ppGTT */ - d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]); - vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d); + vm->vm.foreach(&vm->vm, base, d.offset - base, insert_pte, &d); } return &vm->vm; @@ -281,10 +279,10 @@ static int emit_pte(struct i915_request *rq, GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); /* Compute the page directory offset for the target address range */ - offset += (u64)rq->engine->instance << 32; offset >>= 12; offset *= sizeof(u64); offset += 2 * CHUNK_SZ; + offset += (u64)rq->engine->instance << 32; cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 15f9ada28a7a..9c253ba593c6 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -424,7 +424,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, table->unused_entries_index = I915_MOCS_PTE; if (IS_DG2(i915)) { - if (IS_DG2_GT_STEP(i915, G10, STEP_A0, STEP_B0)) { + if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) { table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax); table->table = dg2_mocs_table_g10_ax; } else { diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 43093dd2d0c9..c3155ee58689 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -117,10 +117,17 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_RC6_ENABLE | GEN6_RC_CTL_EI_MODE(1); - pg_enable = - GEN9_RENDER_PG_ENABLE | - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE; + /* Wa_16011777198 - Render powergating must remain disabled */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) + pg_enable = + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; + else + pg_enable = + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; if (GRAPHICS_VER(gt->i915) >= 12) { for (i = 0; i < I915_MAX_VCS; i++) diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index afb35d2e5c73..9ea49e0a27c0 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -66,12 +66,16 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem) DMA_ATTR_FORCE_CONTIGUOUS); } -static void +static int region_lmem_release(struct intel_memory_region *mem) { - intel_region_ttm_fini(mem); + int ret; + + ret = intel_region_ttm_fini(mem); io_mapping_fini(&mem->iomap); release_fake_lmem_bar(mem); + + return ret; } static int @@ -158,7 +162,7 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt) static bool get_legacy_lowmem_region(struct intel_uncore *uncore, u64 *start, u32 *size) { - if (!IS_DG1_GT_STEP(uncore->i915, STEP_A0, STEP_C0)) + if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0)) return false; *start = 0; @@ -231,7 +235,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) return mem; err_region_put: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 91200c43951f..63199f0550e6 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -297,13 +297,6 @@ static int gen6_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - static const u32 hw_engine_mask[] = { - [RCS0] = GEN6_GRDOM_RENDER, - [BCS0] = GEN6_GRDOM_BLT, - [VCS0] = GEN6_GRDOM_MEDIA, - [VCS1] = GEN8_GRDOM_MEDIA2, - [VECS0] = GEN6_GRDOM_VECS, - }; struct intel_engine_cs *engine; u32 hw_mask; @@ -314,8 +307,7 @@ static int gen6_reset_engines(struct intel_gt *gt, hw_mask = 0; for_each_engine_masked(engine, gt, engine_mask, tmp) { - GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); - hw_mask |= hw_engine_mask[engine->id]; + hw_mask |= engine->reset_domain; } } @@ -492,22 +484,6 @@ static int gen11_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - static const u32 hw_engine_mask[] = { - [RCS0] = GEN11_GRDOM_RENDER, - [BCS0] = GEN11_GRDOM_BLT, - [VCS0] = GEN11_GRDOM_MEDIA, - [VCS1] = GEN11_GRDOM_MEDIA2, - [VCS2] = GEN11_GRDOM_MEDIA3, - [VCS3] = GEN11_GRDOM_MEDIA4, - [VCS4] = GEN11_GRDOM_MEDIA5, - [VCS5] = GEN11_GRDOM_MEDIA6, - [VCS6] = GEN11_GRDOM_MEDIA7, - [VCS7] = GEN11_GRDOM_MEDIA8, - [VECS0] = GEN11_GRDOM_VECS, - [VECS1] = GEN11_GRDOM_VECS2, - [VECS2] = GEN11_GRDOM_VECS3, - [VECS3] = GEN11_GRDOM_VECS4, - }; struct intel_engine_cs *engine; intel_engine_mask_t tmp; u32 reset_mask, unlock_mask = 0; @@ -518,8 +494,7 @@ static int gen11_reset_engines(struct intel_gt *gt, } else { reset_mask = 0; for_each_engine_masked(engine, gt, engine_mask, tmp) { - GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); - reset_mask |= hw_engine_mask[engine->id]; + reset_mask |= engine->reset_domain; ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask); if (ret) goto sfc_unlock; @@ -1367,20 +1342,27 @@ void intel_gt_handle_error(struct intel_gt *gt, /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */ synchronize_rcu_expedited(); - /* Prevent any other reset-engine attempt. */ - for_each_engine(engine, gt, tmp) { - while (test_and_set_bit(I915_RESET_ENGINE + engine->id, - >->reset.flags)) - wait_on_bit(>->reset.flags, - I915_RESET_ENGINE + engine->id, - TASK_UNINTERRUPTIBLE); + /* + * Prevent any other reset-engine attempt. We don't do this for GuC + * submission the GuC owns the per-engine reset, not the i915. + */ + if (!intel_uc_uses_guc_submission(>->uc)) { + for_each_engine(engine, gt, tmp) { + while (test_and_set_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags)) + wait_on_bit(>->reset.flags, + I915_RESET_ENGINE + engine->id, + TASK_UNINTERRUPTIBLE); + } } intel_gt_reset_global(gt, engine_mask, msg); - for_each_engine(engine, gt, tmp) - clear_bit_unlock(I915_RESET_ENGINE + engine->id, - >->reset.flags); + if (!intel_uc_uses_guc_submission(>->uc)) { + for_each_engine(engine, gt, tmp) + clear_bit_unlock(I915_RESET_ENGINE + engine->id, + >->reset.flags); + } clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); smp_mb__after_atomic(); wake_up_all(>->reset.queue); @@ -1441,6 +1423,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES > I915_WEDGED_ON_INIT); intel_gt_set_wedged(gt); + i915_disable_error_state(gt->i915, -ENODEV); set_bit(I915_WEDGED_ON_INIT, >->reset.flags); /* Wedged on init is non-recoverable */ @@ -1450,6 +1433,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) void intel_gt_set_wedged_on_fini(struct intel_gt *gt) { intel_gt_set_wedged(gt); + i915_disable_error_state(gt->i915, -ENODEV); set_bit(I915_WEDGED_ON_FINI, >->reset.flags); intel_gt_retire_requests(gt); /* cleanup any wedged requests */ } diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 586dca1731ce..3e6fac0340ef 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -1357,7 +1357,7 @@ retry: err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); if (!err && gen7_wa_vma) err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); - if (!err && engine->legacy.ring->vma->obj) + if (!err) err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); if (!err) err = intel_timeline_pin(timeline, &ww); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 5e275f8dda8c..07ff7ba7b2b7 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -936,8 +936,70 @@ void intel_rps_park(struct intel_rps *rps) GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); } +u32 intel_rps_get_boost_frequency(struct intel_rps *rps) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + return slpc->boost_freq; + } else { + return intel_gpu_freq(rps, rps->boost_freq); + } +} + +static int rps_set_boost_freq(struct intel_rps *rps, u32 val) +{ + bool boost = false; + + /* Validate against (static) hardware limits */ + val = intel_freq_opcode(rps, val); + if (val < rps->min_freq || val > rps->max_freq) + return -EINVAL; + + mutex_lock(&rps->lock); + if (val != rps->boost_freq) { + rps->boost_freq = val; + boost = atomic_read(&rps->num_waiters); + } + mutex_unlock(&rps->lock); + if (boost) + schedule_work(&rps->work); + + return 0; +} + +int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + return intel_guc_slpc_set_boost_freq(slpc, freq); + } else { + return rps_set_boost_freq(rps, freq); + } +} + +void intel_rps_dec_waiters(struct intel_rps *rps) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + intel_guc_slpc_dec_waiters(slpc); + } else { + atomic_dec(&rps->num_waiters); + } +} + void intel_rps_boost(struct i915_request *rq) { + struct intel_guc_slpc *slpc; + if (i915_request_signaled(rq) || i915_request_has_waitboost(rq)) return; @@ -945,6 +1007,16 @@ void intel_rps_boost(struct i915_request *rq) if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + /* Return if old value is non zero */ + if (!atomic_fetch_inc(&slpc->num_waiters)) + schedule_work(&slpc->boost_work); + + return; + } + if (atomic_fetch_inc(&rps->num_waiters)) return; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h index 11960d64ca82..aee12f37d38a 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.h +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -23,6 +23,9 @@ void intel_rps_disable(struct intel_rps *rps); void intel_rps_park(struct intel_rps *rps); void intel_rps_unpark(struct intel_rps *rps); void intel_rps_boost(struct i915_request *rq); +void intel_rps_dec_waiters(struct intel_rps *rps); +u32 intel_rps_get_boost_frequency(struct intel_rps *rps); +int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq); int intel_rps_set(struct intel_rps *rps, u8 val); void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index e1f362530889..3113266c286e 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -482,7 +482,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, gen9_ctx_workarounds_init(engine, wal); /* WaToEnableHwFixForPushConstHWBug:kbl */ - if (IS_KBL_GT_STEP(i915, STEP_C0, STEP_FOREVER)) + if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER)) wa_masked_en(wal, COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -560,6 +560,22 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, /* * These settings aren't actually workarounds, but general tuning settings that + * need to be programmed on dg2 platform. + */ +static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, + REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)); + wa_add(wal, + FF_MODE2, + FF_MODE2_TDS_TIMER_MASK, + FF_MODE2_TDS_TIMER_128, + 0, false); +} + +/* + * These settings aren't actually workarounds, but general tuning settings that * need to be programmed on several platforms. */ static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine, @@ -621,13 +637,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0, false); - - /* - * Wa_14012131227:dg1 - * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p - */ - wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1, - GEN9_RHWO_OPTIMIZATION_DISABLE); } static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -644,6 +653,42 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE); } +static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + dg2_ctx_gt_tuning_init(engine, wal); + + /* Wa_16011186671:dg2_g11 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + wa_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH); + wa_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_14010469329:dg2_g10 */ + wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, + XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE); + + /* + * Wa_22010465075:dg2_g10 + * Wa_22010613112:dg2_g10 + * Wa_14010698770:dg2_g10 + */ + wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, + GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); + } + + /* Wa_16013271637:dg2 */ + wa_masked_en(wal, SLICE_COMMON_ECO_CHICKEN1, + MSC_MSAA_REODER_BUF_BYPASS_DISABLE); + + /* Wa_22012532006:dg2 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) + wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, + DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA); +} + static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal) { @@ -730,7 +775,11 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (engine->class != RENDER_CLASS) goto done; - if (IS_DG1(i915)) + if (IS_DG2(i915)) + dg2_ctx_workarounds_init(engine, wal); + else if (IS_XEHPSDV(i915)) + ; /* noop; none at this time */ + else if (IS_DG1(i915)) dg1_ctx_workarounds_init(engine, wal); else if (GRAPHICS_VER(i915) == 12) gen12_ctx_workarounds_init(engine, wal); @@ -878,10 +927,51 @@ hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) } static void +gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + const struct sseu_dev_info *sseu = &i915->gt.info.sseu; + unsigned int slice, subslice; + u32 mcr, mcr_mask; + + GEM_BUG_ON(GRAPHICS_VER(i915) != 9); + + /* + * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml + * Before any MMIO read into slice/subslice specific registers, MCR + * packet control register needs to be programmed to point to any + * enabled s/ss pair. Otherwise, incorrect values will be returned. + * This means each subsequent MMIO read will be forwarded to an + * specific s/ss combination, but this is OK since these registers + * are consistent across s/ss in almost all cases. In the rare + * occasions, such as INSTDONE, where this value is dependent + * on s/ss combo, the read should be done with read_subslice_reg. + */ + slice = ffs(sseu->slice_mask) - 1; + GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); + subslice = ffs(intel_sseu_get_subslices(sseu, slice)); + GEM_BUG_ON(!subslice); + subslice--; + + /* + * We use GEN8_MCR..() macros to calculate the |mcr| value for + * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads + */ + mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); + mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; + + drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr); + + wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); +} + +static void gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { struct drm_i915_private *i915 = gt->i915; + /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */ + gen9_wa_init_mcr(i915, wal); + /* WaDisableKillLogic:bxt,skl,kbl */ if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915)) wa_write_or(wal, @@ -916,7 +1006,7 @@ skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); /* WaInPlaceDecompressionHang:skl */ - if (IS_SKL_GT_STEP(gt->i915, STEP_A0, STEP_H0)) + if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0)) wa_write_or(wal, GEN9_GAMT_ECO_REG_RW_IA, GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); @@ -928,7 +1018,7 @@ kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) gen9_gt_workarounds_init(gt, wal); /* WaDisableDynamicCreditSharing:kbl */ - if (IS_KBL_GT_STEP(gt->i915, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0)) wa_write_or(wal, GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); @@ -1134,9 +1224,18 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_L3_COH_PIPE); + /* Wa_1407352427:icl,ehl */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, + PSDUNIT_CLKGATE_DIS); + + /* Wa_1406680159:icl,ehl */ + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); + /* Wa_1607087056:icl,ehl,jsl */ if (IS_ICELAKE(i915) || - IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0)) + IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); @@ -1190,19 +1289,19 @@ tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) gen12_gt_workarounds_init(gt, wal); /* Wa_1409420604:tgl */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS); /* Wa_1607087056:tgl also know as BUG:1409180338 */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); /* Wa_1408615072:tgl[a0] */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); } @@ -1215,7 +1314,7 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) gen12_gt_workarounds_init(gt, wal); /* Wa_1607087056:dg1 */ - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); @@ -1236,7 +1335,179 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) static void xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { + struct drm_i915_private *i915 = gt->i915; + + xehp_init_mcr(gt, wal); + + /* Wa_1409757795:xehpsdv */ + wa_write_or(wal, SCCGCTL94DC, CG3DDISURB); + + /* Wa_18011725039:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) { + wa_masked_dis(wal, MLTICTXCTL, TDONRENDER); + wa_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH); + } + + /* Wa_16011155590:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + TSGUNIT_CLKGATE_DIS); + + /* Wa_14011780169:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) { + wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | + GAMTLBVDBOX7_CLKGATE_DIS | + GAMTLBVDBOX6_CLKGATE_DIS | + GAMTLBVDBOX5_CLKGATE_DIS | + GAMTLBVDBOX4_CLKGATE_DIS | + GAMTLBVDBOX3_CLKGATE_DIS | + GAMTLBVDBOX2_CLKGATE_DIS | + GAMTLBVDBOX1_CLKGATE_DIS | + GAMTLBVDBOX0_CLKGATE_DIS | + GAMTLBKCR_CLKGATE_DIS | + GAMTLBGUC_CLKGATE_DIS | + GAMTLBBLT_CLKGATE_DIS); + wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | + GAMTLBGFXA1_CLKGATE_DIS | + GAMTLBCOMPA0_CLKGATE_DIS | + GAMTLBCOMPA1_CLKGATE_DIS | + GAMTLBCOMPB0_CLKGATE_DIS | + GAMTLBCOMPB1_CLKGATE_DIS | + GAMTLBCOMPC0_CLKGATE_DIS | + GAMTLBCOMPC1_CLKGATE_DIS | + GAMTLBCOMPD0_CLKGATE_DIS | + GAMTLBCOMPD1_CLKGATE_DIS | + GAMTLBMERT_CLKGATE_DIS | + GAMTLBVEBOX3_CLKGATE_DIS | + GAMTLBVEBOX2_CLKGATE_DIS | + GAMTLBVEBOX1_CLKGATE_DIS | + GAMTLBVEBOX0_CLKGATE_DIS); + } + + /* Wa_14012362059:xehpsdv */ + wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB); + + /* Wa_16012725990:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER)) + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS); + + /* Wa_14011060649:xehpsdv */ + wa_14011060649(gt, wal); + + /* Wa_14014368820:xehpsdv */ + wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS | + GLOBAL_INVALIDATION_MODE); +} + +static void +dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) +{ + struct intel_engine_cs *engine; + int id; + xehp_init_mcr(gt, wal); + + /* Wa_14011060649:dg2 */ + wa_14011060649(gt, wal); + + /* + * Although there are per-engine instances of these registers, + * they technically exist outside the engine itself and are not + * impacted by engine resets. Furthermore, they're part of the + * GuC blacklist so trying to treat them as engine workarounds + * will result in GuC initialization failure and a wedged GPU. + */ + for_each_engine(engine, gt, id) { + if (engine->class != VIDEO_DECODE_CLASS) + continue; + + /* Wa_16010515920:dg2_g10 */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) + wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base), + ALNUNIT_CLKGATE_DIS); + } + + if (IS_DG2_G10(gt->i915)) { + /* Wa_22010523718:dg2 */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + CG3DDISCFEG_CLKGATE_DIS); + + /* Wa_14011006942:dg2 */ + wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE, + DSS_ROUTER_CLKGATE_DIS); + } + + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_14010680813:dg2_g10 */ + wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS | + EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS); + + /* Wa_14010948348:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS); + + /* Wa_14011037102:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS); + + /* Wa_14011371254:dg2_g10 */ + wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS); + + /* Wa_14011431319:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | + GAMTLBVDBOX7_CLKGATE_DIS | + GAMTLBVDBOX6_CLKGATE_DIS | + GAMTLBVDBOX5_CLKGATE_DIS | + GAMTLBVDBOX4_CLKGATE_DIS | + GAMTLBVDBOX3_CLKGATE_DIS | + GAMTLBVDBOX2_CLKGATE_DIS | + GAMTLBVDBOX1_CLKGATE_DIS | + GAMTLBVDBOX0_CLKGATE_DIS | + GAMTLBKCR_CLKGATE_DIS | + GAMTLBGUC_CLKGATE_DIS | + GAMTLBBLT_CLKGATE_DIS); + wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | + GAMTLBGFXA1_CLKGATE_DIS | + GAMTLBCOMPA0_CLKGATE_DIS | + GAMTLBCOMPA1_CLKGATE_DIS | + GAMTLBCOMPB0_CLKGATE_DIS | + GAMTLBCOMPB1_CLKGATE_DIS | + GAMTLBCOMPC0_CLKGATE_DIS | + GAMTLBCOMPC1_CLKGATE_DIS | + GAMTLBCOMPD0_CLKGATE_DIS | + GAMTLBCOMPD1_CLKGATE_DIS | + GAMTLBMERT_CLKGATE_DIS | + GAMTLBVEBOX3_CLKGATE_DIS | + GAMTLBVEBOX2_CLKGATE_DIS | + GAMTLBVEBOX1_CLKGATE_DIS | + GAMTLBVEBOX0_CLKGATE_DIS); + + /* Wa_14010569222:dg2_g10 */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + GAMEDIA_CLKGATE_DIS); + + /* Wa_14011028019:dg2_g10 */ + wa_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS); + } + + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) || + IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14012362059:dg2 */ + wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB); + } + + /* Wa_1509235366:dg2 */ + wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS | + GLOBAL_INVALIDATION_MODE); + + /* Wa_14014830051:dg2 */ + wa_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN); + + /* + * The following are not actually "workarounds" but rather + * recommended tuning settings documented in the bspec's + * performance guide section. + */ + wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); + wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS); } static void @@ -1244,7 +1515,9 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) { struct drm_i915_private *i915 = gt->i915; - if (IS_XEHPSDV(i915)) + if (IS_DG2(i915)) + dg2_gt_workarounds_init(gt, wal); + else if (IS_XEHPSDV(i915)) xehpsdv_gt_workarounds_init(gt, wal); else if (IS_DG1(i915)) dg1_gt_workarounds_init(gt, wal); @@ -1518,7 +1791,7 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine) RING_FORCE_TO_NONPRIV_RANGE_4); } -static void cml_whitelist_build(struct intel_engine_cs *engine) +static void allow_read_ctx_timestamp(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; @@ -1526,6 +1799,11 @@ static void cml_whitelist_build(struct intel_engine_cs *engine) whitelist_reg_ext(w, RING_CTX_TIMESTAMP(engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); +} + +static void cml_whitelist_build(struct intel_engine_cs *engine) +{ + allow_read_ctx_timestamp(engine); cfl_whitelist_build(engine); } @@ -1534,6 +1812,8 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; + allow_read_ctx_timestamp(engine); + switch (engine->class) { case RENDER_CLASS: /* WaAllowUMDToModifyHalfSliceChicken7:icl */ @@ -1569,15 +1849,9 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) /* hucStatus2RegOffset */ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; default: - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; } } @@ -1586,6 +1860,8 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; + allow_read_ctx_timestamp(engine); + switch (engine->class) { case RENDER_CLASS: /* @@ -1602,16 +1878,17 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) RING_FORCE_TO_NONPRIV_ACCESS_RD | RING_FORCE_TO_NONPRIV_RANGE_4); - /* Wa_1808121037:tgl */ + /* + * Wa_1808121037:tgl + * Wa_14012131227:dg1 + * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p + */ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1); /* Wa_1806527549:tgl */ whitelist_reg(w, HIZ_CHICKEN); break; default: - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; } } @@ -1623,13 +1900,46 @@ static void dg1_whitelist_build(struct intel_engine_cs *engine) tgl_whitelist_build(engine); /* GEN:BUG:1409280441:dg1 */ - if (IS_DG1_GT_STEP(engine->i915, STEP_A0, STEP_B0) && + if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) && (engine->class == RENDER_CLASS || engine->class == COPY_ENGINE_CLASS)) whitelist_reg_ext(w, RING_ID(engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); } +static void xehpsdv_whitelist_build(struct intel_engine_cs *engine) +{ + allow_read_ctx_timestamp(engine); +} + +static void dg2_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + allow_read_ctx_timestamp(engine); + + switch (engine->class) { + case RENDER_CLASS: + /* + * Wa_1507100340:dg2_g10 + * + * This covers 4 registers which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); + + break; + default: + break; + } +} + void intel_engine_init_whitelist(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; @@ -1637,7 +1947,11 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) wa_init_start(w, "whitelist", engine->name); - if (IS_DG1(i915)) + if (IS_DG2(i915)) + dg2_whitelist_build(engine); + else if (IS_XEHPSDV(i915)) + xehpsdv_whitelist_build(engine); + else if (IS_DG1(i915)) dg1_whitelist_build(engine); else if (GRAPHICS_VER(i915) == 12) tgl_whitelist_build(engine); @@ -1711,13 +2025,119 @@ engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) CMD_CCTL_MOCS_OVERRIDE(mocs, mocs)); } } + +static bool needs_wa_1308578152(struct intel_engine_cs *engine) +{ + u64 dss_mask = intel_sseu_get_subslices(&engine->gt->info.sseu, 0); + + return (dss_mask & GENMASK(GEN_DSS_PER_GSLICE - 1, 0)) == 0; +} + static void rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || - IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) { + if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14013392000:dg2_g11 */ + wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE); + + /* Wa_16011620976:dg2_g11 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14012419201:dg2 */ + wa_masked_en(wal, GEN9_ROW_CHICKEN4, + GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_G11(engine->i915)) { + /* + * Wa_22012826095:dg2 + * Wa_22013059131:dg2 + */ + wa_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW, + MAXREQS_PER_BANK, + REG_FIELD_PREP(MAXREQS_PER_BANK, 2)); + + /* Wa_22013059131:dg2 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0, + FORCE_1_SUB_MESSAGE_PER_FRAGMENT); + } + + /* Wa_1308578152:dg2_g10 when first gslice is fused off */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) && + needs_wa_1308578152(engine)) { + wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON, + GEN12_REPLAY_MODE_GRANULARITY); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) || + IS_DG2_G11(engine->i915)) { + /* Wa_22013037850:dg2 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, + DISABLE_128B_EVICTION_COMMAND_UDW); + + /* Wa_22012856258:dg2 */ + wa_masked_en(wal, GEN7_ROW_CHICKEN2, + GEN12_DISABLE_READ_SUPPRESSION); + + /* + * Wa_22010960976:dg2 + * Wa_14013347512:dg2 + */ + wa_masked_dis(wal, GEN12_HDC_CHICKEN0, + LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* + * Wa_1608949956:dg2_g10 + * Wa_14010198302:dg2_g10 + */ + wa_masked_en(wal, GEN8_ROW_CHICKEN, + MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE); + + /* + * Wa_14010918519:dg2_g10 + * + * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping, + * so ignoring verification. + */ + wa_add(wal, LSC_CHICKEN_BIT_0_UDW, 0, + FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE, + 0, false); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_22010430635:dg2 */ + wa_masked_en(wal, + GEN9_ROW_CHICKEN4, + GEN12_DISABLE_GRF_CLEAR); + + /* Wa_14010648519:dg2 */ + wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_G11(engine->i915)) { + /* Wa_22012654132:dg2 */ + wa_add(wal, GEN10_CACHE_MODE_SS, 0, + _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC), + 0 /* write-only, so skip validation */, + true); + } + + /* Wa_14013202645:dg2 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) + wa_write_or(wal, RT_CTRL, DIS_NULL_QUERY); + + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || + IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { /* * Wa_1607138336:tgl[a0],dg1[a0] * Wa_1607063988:tgl[a0],dg1[a0] @@ -1727,7 +2147,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); } - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) { + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { /* * Wa_1606679103:tgl * (see also Wa_1606682166:icl) @@ -1762,7 +2182,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) } if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || - IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || + IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */ wa_masked_en(wal, GEN7_ROW_CHICKEN2, @@ -1775,8 +2195,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); } - - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* * Wa_1607030317:tgl @@ -1859,15 +2278,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); - /* Wa_1407352427:icl,ehl */ - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, - PSDUNIT_CLKGATE_DIS); - - /* Wa_1406680159:icl,ehl */ - wa_write_or(wal, - SUBSLICE_UNIT_LEVEL_CLKGATE, - GWUNIT_CLKGATE_DIS); - /* * Wa_1408767742:icl[a2..forever],ehl[all] * Wa_1605460711:icl[a0..c0] @@ -2138,7 +2548,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) struct drm_i915_private *i915 = engine->i915; /* WaKBLVECSSemaphoreWaitPoll:kbl */ - if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_F0)) { + if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) { wa_write(wal, RING_SEMA_WAIT_POLL(engine->mmio_base), 1); diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 8b89215afe46..bb99fc03f503 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -35,9 +35,31 @@ static void mock_timeline_unpin(struct intel_timeline *tl) atomic_dec(&tl->pin_count); } +static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) +{ + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return vma; +} + static struct intel_ring *mock_ring(struct intel_engine_cs *engine) { - const unsigned long sz = PAGE_SIZE / 2; + const unsigned long sz = PAGE_SIZE; struct intel_ring *ring; ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); @@ -50,15 +72,11 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) ring->vaddr = (void *)(ring + 1); atomic_set(&ring->pin_count, 1); - ring->vma = i915_vma_alloc(); - if (!ring->vma) { + ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE); + if (IS_ERR(ring->vma)) { kfree(ring); return NULL; } - i915_active_init(&ring->vma->active, NULL, NULL, 0); - __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma)); - __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags); - ring->vma->node.size = sz; intel_ring_update_space(ring); @@ -67,8 +85,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) static void mock_ring_free(struct intel_ring *ring) { - i915_active_fini(&ring->vma->active); - i915_vma_free(ring->vma); + i915_vma_put(ring->vma); kfree(ring); } @@ -125,6 +142,7 @@ static void mock_context_unpin(struct intel_context *ce) static void mock_context_post_unpin(struct intel_context *ce) { + i915_vma_unpin(ce->ring->vma); } static void mock_context_destroy(struct kref *ref) @@ -169,7 +187,7 @@ static int mock_context_alloc(struct intel_context *ce) static int mock_context_pre_pin(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **unused) { - return 0; + return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH); } static int mock_context_pin(struct intel_context *ce, void *unused) diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 75569666105d..75f6efc9882f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg) return 0; } +static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness) +{ + ktime_t start, unused, dt; + + if (!intel_engine_uses_guc(engine)) + return 0; + + /* + * In GuC mode of submission, the busyness stats may get updated after + * the batch starts running. Poll for a change in busyness and timeout + * after 500 us. + */ + start = ktime_get(); + while (intel_engine_get_busy_time(engine, &unused) == busyness) { + dt = ktime_get() - start; + if (dt > 500000) { + pr_err("active wait timed out %lld\n", dt); + ENGINE_TRACE(engine, "active wait time out %lld\n", dt); + return -ETIME; + } + } + + return 0; +} + static int live_engine_busy_stats(void *arg) { struct intel_gt *gt = arg; @@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg) GEM_BUG_ON(intel_gt_pm_is_awake(gt)); for_each_engine(engine, gt, id) { struct i915_request *rq; + ktime_t busyness, dummy; ktime_t de, dt; ktime_t t[2]; @@ -274,16 +300,23 @@ static int live_engine_busy_stats(void *arg) } i915_request_add(rq); + busyness = intel_engine_get_busy_time(engine, &dummy); if (!igt_wait_for_spinner(&spin, rq)) { intel_gt_set_wedged(engine->gt); err = -ETIME; goto end; } + err = __spin_until_busier(engine, busyness); + if (err) { + GEM_TRACE_DUMP(); + goto end; + } + ENGINE_TRACE(engine, "measuring busy time\n"); preempt_disable(); de = intel_engine_get_busy_time(engine, &t[0]); - udelay(100); + mdelay(10); de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de); preempt_enable(); dt = ktime_sub(t[1], t[0]); diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 7e2d99dd012d..e5ad4d5a91c0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -471,7 +471,8 @@ static int igt_reset_nop_engine(void *arg) count = 0; st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); do { int i; @@ -528,7 +529,7 @@ static int igt_reset_nop_engine(void *arg) break; } } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); pr_info("%s(%s): %d resets\n", __func__, engine->name, count); @@ -582,7 +583,8 @@ static int igt_reset_fail_engine(void *arg) } st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); force_reset_timeout(engine); err = intel_engine_reset(engine, NULL); @@ -679,7 +681,7 @@ static int igt_reset_fail_engine(void *arg) out: pr_info("%s(%s): %d resets\n", __func__, engine->name, count); skip: - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); intel_context_put(ce); @@ -734,7 +736,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) reset_engine_count = i915_reset_engine_count(global, engine); st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); count = 0; do { struct i915_request *rq = NULL; @@ -824,7 +827,7 @@ restore: if (err) break; } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); pr_info("%s: Completed %lu %s resets\n", engine->name, count, active ? "active" : "idle"); @@ -1042,7 +1045,8 @@ static int __igt_reset_engines(struct intel_gt *gt, yield(); /* start all threads before we begin */ st_engine_heartbeat_disable_no_pm(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); do { struct i915_request *rq = NULL; struct intel_selftest_saved_policy saved; @@ -1165,7 +1169,7 @@ restore: if (err) break; } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable_no_pm(engine); pr_info("i915_reset_engine(%s:%s): %lu resets\n", diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c index 12ef2837c89b..e21787301bbd 100644 --- a/drivers/gpu/drm/i915/gt/selftest_migrate.c +++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c @@ -49,6 +49,7 @@ static int copy(struct intel_migrate *migrate, if (IS_ERR(src)) return 0; + sz = src->base.size; dst = i915_gem_object_create_internal(i915, sz); if (IS_ERR(dst)) goto err_free_src; diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index ba10bd374cee..fe5d7d261797 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -144,6 +144,7 @@ enum intel_guc_action { INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600, INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601, INTEL_GUC_ACTION_RESET_CLIENT = 0x5507, + INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A, INTEL_GUC_ACTION_LIMIT }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 31cf9fb48c7e..1cb46098030d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -138,6 +138,8 @@ struct intel_guc { u32 ads_regset_size; /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ u32 ads_golden_ctxt_size; + /** @ads_engine_usage_size: size of engine usage in the ADS */ + u32 ads_engine_usage_size; /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */ struct i915_vma *lrc_desc_pool; @@ -172,6 +174,34 @@ struct intel_guc { /** @send_mutex: used to serialize the intel_guc_send actions */ struct mutex send_mutex; + + /** + * @timestamp: GT timestamp object that stores a copy of the timestamp + * and adjusts it for overflow using a worker. + */ + struct { + /** + * @lock: Lock protecting the below fields and the engine stats. + */ + spinlock_t lock; + + /** + * @gt_stamp: 64 bit extended value of the GT timestamp. + */ + u64 gt_stamp; + + /** + * @ping_delay: Period for polling the GT timestamp for + * overflow. + */ + unsigned long ping_delay; + + /** + * @work: Periodic work to adjust GT timestamp, engine and + * context usage for overflows. + */ + struct delayed_work work; + } timestamp; }; static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 621c893a009f..1a1edae67e4e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -26,6 +26,8 @@ * | guc_policies | * +---------------------------------------+ * | guc_gt_system_info | + * +---------------------------------------+ + * | guc_engine_usage | * +---------------------------------------+ <== static * | guc_mmio_reg[countA] (engine 0.0) | * | guc_mmio_reg[countB] (engine 0.1) | @@ -47,6 +49,7 @@ struct __guc_ads_blob { struct guc_ads ads; struct guc_policies policies; struct guc_gt_system_info system_info; + struct guc_engine_usage engine_usage; /* From here on, location is dynamic! Refer to above diagram. */ struct guc_mmio_reg regset[0]; } __packed; @@ -628,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc) guc_ads_private_data_reset(guc); } + +u32 intel_guc_engine_usage_offset(struct intel_guc *guc) +{ + struct __guc_ads_blob *blob = guc->ads_blob; + u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma); + u32 offset = base + ptr_offset(blob, engine_usage); + + return offset; +} + +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine) +{ + struct intel_guc *guc = &engine->gt->uc.guc; + struct __guc_ads_blob *blob = guc->ads_blob; + u8 guc_class = engine_class_to_guc_class(engine->class); + + return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)]; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h index 3d85051d57e4..e74c110facff 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h @@ -6,8 +6,11 @@ #ifndef _INTEL_GUC_ADS_H_ #define _INTEL_GUC_ADS_H_ +#include <linux/types.h> + struct intel_guc; struct drm_printer; +struct intel_engine_cs; int intel_guc_ads_create(struct intel_guc *guc); void intel_guc_ads_destroy(struct intel_guc *guc); @@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc); void intel_guc_ads_reset(struct intel_guc *guc); void intel_guc_ads_print_policy_info(struct intel_guc *guc, struct drm_printer *p); +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine); +u32 intel_guc_engine_usage_offset(struct intel_guc *guc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 722933e26347..7072e30e99f4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -294,6 +294,19 @@ struct guc_ads { u32 reserved[15]; } __packed; +/* Engine usage stats */ +struct guc_engine_usage_record { + u32 current_context_index; + u32 last_switch_in_stamp; + u32 reserved0; + u32 total_runtime; + u32 reserved1[4]; +} __packed; + +struct guc_engine_usage { + struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; +} __packed; + /* GuC logging structures */ enum guc_log_buffer_type { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 65a3e7fdb2b2..22c1c12369f2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -79,29 +79,6 @@ static void slpc_mem_set_disabled(struct slpc_shared_data *data, slpc_mem_set_param(data, enable_id, 0); } -int intel_guc_slpc_init(struct intel_guc_slpc *slpc) -{ - struct intel_guc *guc = slpc_to_guc(slpc); - struct drm_i915_private *i915 = slpc_to_i915(slpc); - u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); - int err; - - GEM_BUG_ON(slpc->vma); - - err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); - if (unlikely(err)) { - drm_err(&i915->drm, - "Failed to allocate SLPC struct (err=%pe)\n", - ERR_PTR(err)); - return err; - } - - slpc->max_freq_softlimit = 0; - slpc->min_freq_softlimit = 0; - - return err; -} - static u32 slpc_get_state(struct intel_guc_slpc *slpc) { struct slpc_shared_data *data; @@ -203,6 +180,86 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc, return guc_action_slpc_unset_param(guc, id); } +static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq) +{ + struct drm_i915_private *i915 = slpc_to_i915(slpc); + struct intel_guc *guc = slpc_to_guc(slpc); + intel_wakeref_t wakeref; + int ret = 0; + + lockdep_assert_held(&slpc->lock); + + if (!intel_guc_is_ready(guc)) + return -ENODEV; + + /* + * This function is a little different as compared to + * intel_guc_slpc_set_min_freq(). Softlimit will not be updated + * here since this is used to temporarily change min freq, + * for example, during a waitboost. Caller is responsible for + * checking bounds. + */ + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + ret = slpc_set_param(slpc, + SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, + freq); + if (ret) + drm_err(&i915->drm, "Unable to force min freq to %u: %d", + freq, ret); + } + + return ret; +} + +static void slpc_boost_work(struct work_struct *work) +{ + struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work); + + /* + * Raise min freq to boost. It's possible that + * this is greater than current max. But it will + * certainly be limited by RP0. An error setting + * the min param is not fatal. + */ + mutex_lock(&slpc->lock); + if (atomic_read(&slpc->num_waiters)) { + slpc_force_min_freq(slpc, slpc->boost_freq); + slpc->num_boosts++; + } + mutex_unlock(&slpc->lock); +} + +int intel_guc_slpc_init(struct intel_guc_slpc *slpc) +{ + struct intel_guc *guc = slpc_to_guc(slpc); + struct drm_i915_private *i915 = slpc_to_i915(slpc); + u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); + int err; + + GEM_BUG_ON(slpc->vma); + + err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); + if (unlikely(err)) { + drm_err(&i915->drm, + "Failed to allocate SLPC struct (err=%pe)\n", + ERR_PTR(err)); + return err; + } + + slpc->max_freq_softlimit = 0; + slpc->min_freq_softlimit = 0; + + slpc->boost_freq = 0; + atomic_set(&slpc->num_waiters, 0); + slpc->num_boosts = 0; + + mutex_init(&slpc->lock); + INIT_WORK(&slpc->boost_work, slpc_boost_work); + + return err; +} + static const char *slpc_global_state_to_string(enum slpc_global_state state) { switch (state) { @@ -393,7 +450,11 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) val > slpc->max_freq_softlimit) return -EINVAL; + /* Need a lock now since waitboost can be modifying min as well */ + mutex_lock(&slpc->lock); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + ret = slpc_set_param(slpc, SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, val); @@ -406,6 +467,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) if (!ret) slpc->min_freq_softlimit = val; + mutex_unlock(&slpc->lock); + return ret; } @@ -522,6 +585,9 @@ static void slpc_get_rp_values(struct intel_guc_slpc *slpc) GT_FREQUENCY_MULTIPLIER; slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) * GT_FREQUENCY_MULTIPLIER; + + if (!slpc->boost_freq) + slpc->boost_freq = slpc->rp0_freq; } /* @@ -588,6 +654,47 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) return 0; } +int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val) +{ + int ret = 0; + + if (val < slpc->min_freq || val > slpc->rp0_freq) + return -EINVAL; + + mutex_lock(&slpc->lock); + + if (slpc->boost_freq != val) { + /* Apply only if there are active waiters */ + if (atomic_read(&slpc->num_waiters)) { + ret = slpc_force_min_freq(slpc, val); + if (ret) { + ret = -EIO; + goto done; + } + } + + slpc->boost_freq = val; + } + +done: + mutex_unlock(&slpc->lock); + return ret; +} + +void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc) +{ + /* + * Return min back to the softlimit. + * This is called during request retire, + * so we don't need to fail that if the + * set_param fails. + */ + mutex_lock(&slpc->lock); + if (atomic_dec_and_test(&slpc->num_waiters)) + slpc_force_min_freq(slpc, slpc->min_freq_softlimit); + mutex_unlock(&slpc->lock); +} + int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p) { struct drm_i915_private *i915 = slpc_to_i915(slpc); @@ -611,6 +718,8 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p slpc_decode_max_freq(slpc)); drm_printf(p, "\tMin freq: %u MHz\n", slpc_decode_min_freq(slpc)); + drm_printf(p, "\twaitboosts: %u\n", + slpc->num_boosts); } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h index e45054d5b9b4..0caa8fee3c04 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h @@ -34,9 +34,12 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc); void intel_guc_slpc_fini(struct intel_guc_slpc *slpc); int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val); int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val); +int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val); int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val); int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val); int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p); void intel_guc_pm_intrmsk_enable(struct intel_gt *gt); +void intel_guc_slpc_boost(struct intel_guc_slpc *slpc); +void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h index 41d13527666f..bf5b9a563c09 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h @@ -6,6 +6,9 @@ #ifndef _INTEL_GUC_SLPC_TYPES_H_ #define _INTEL_GUC_SLPC_TYPES_H_ +#include <linux/atomic.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> #include <linux/types.h> #define SLPC_RESET_TIMEOUT_MS 5 @@ -20,10 +23,20 @@ struct intel_guc_slpc { u32 min_freq; u32 rp0_freq; u32 rp1_freq; + u32 boost_freq; /* frequency softlimits */ u32 min_freq_softlimit; u32 max_freq_softlimit; + + /* Protects set/reset of boost freq + * and value of num_waiters + */ + struct mutex lock; + + struct work_struct boost_work; + atomic_t num_waiters; + u32 num_boosts; }; #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index c48557dfa04c..1f9d4fde421f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -13,6 +13,7 @@ #include "gt/intel_engine_heartbeat.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_clock_utils.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" @@ -21,6 +22,7 @@ #include "gt/intel_mocs.h" #include "gt/intel_ring.h" +#include "intel_guc_ads.h" #include "intel_guc_submission.h" #include "i915_drv.h" @@ -1077,6 +1079,271 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) xa_unlock_irqrestore(&guc->context_lookup, flags); } +/* + * GuC stores busyness stats for each engine at context in/out boundaries. A + * context 'in' logs execution start time, 'out' adds in -> out delta to total. + * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with + * GuC. + * + * __i915_pmu_event_read samples engine busyness. When sampling, if context id + * is valid (!= ~0) and start is non-zero, the engine is considered to be + * active. For an active engine total busyness = total + (now - start), where + * 'now' is the time at which the busyness is sampled. For inactive engine, + * total busyness = total. + * + * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain. + * + * The start and total values provided by GuC are 32 bits and wrap around in a + * few minutes. Since perf pmu provides busyness as 64 bit monotonically + * increasing ns values, there is a need for this implementation to account for + * overflows and extend the GuC provided values to 64 bits before returning + * busyness to the user. In order to do that, a worker runs periodically at + * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in + * 27 seconds for a gt clock frequency of 19.2 MHz). + */ + +#define WRAP_TIME_CLKS U32_MAX +#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3) + +static void +__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) +{ + u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); + u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp); + + if (new_start == lower_32_bits(*prev_start)) + return; + + if (new_start < gt_stamp_last && + (new_start - gt_stamp_last) <= POLL_TIME_CLKS) + gt_stamp_hi++; + + if (new_start > gt_stamp_last && + (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi) + gt_stamp_hi--; + + *prev_start = ((u64)gt_stamp_hi << 32) | new_start; +} + +static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) +{ + struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine); + struct intel_engine_guc_stats *stats = &engine->stats.guc; + struct intel_guc *guc = &engine->gt->uc.guc; + u32 last_switch = rec->last_switch_in_stamp; + u32 ctx_id = rec->current_context_index; + u32 total = rec->total_runtime; + + lockdep_assert_held(&guc->timestamp.lock); + + stats->running = ctx_id != ~0U && last_switch; + if (stats->running) + __extend_last_switch(guc, &stats->start_gt_clk, last_switch); + + /* + * Instead of adjusting the total for overflow, just add the + * difference from previous sample stats->total_gt_clks + */ + if (total && total != ~0U) { + stats->total_gt_clks += (u32)(total - stats->prev_total); + stats->prev_total = total; + } +} + +static void guc_update_pm_timestamp(struct intel_guc *guc, + struct intel_engine_cs *engine, + ktime_t *now) +{ + u32 gt_stamp_now, gt_stamp_hi; + + lockdep_assert_held(&guc->timestamp.lock); + + gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); + gt_stamp_now = intel_uncore_read(engine->uncore, + RING_TIMESTAMP(engine->mmio_base)); + *now = ktime_get(); + + if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp)) + gt_stamp_hi++; + + guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now; +} + +/* + * Unlike the execlist mode of submission total and active times are in terms of + * gt clocks. The *now parameter is retained to return the cpu time at which the + * busyness was sampled. + */ +static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) +{ + struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc; + struct i915_gpu_error *gpu_error = &engine->i915->gpu_error; + struct intel_gt *gt = engine->gt; + struct intel_guc *guc = >->uc.guc; + u64 total, gt_stamp_saved; + unsigned long flags; + u32 reset_count; + bool in_reset; + + spin_lock_irqsave(&guc->timestamp.lock, flags); + + /* + * If a reset happened, we risk reading partially updated engine + * busyness from GuC, so we just use the driver stored copy of busyness. + * Synchronize with gt reset using reset_count and the + * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count + * after I915_RESET_BACKOFF flag, so ensure that the reset_count is + * usable by checking the flag afterwards. + */ + reset_count = i915_reset_count(gpu_error); + in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags); + + *now = ktime_get(); + + /* + * The active busyness depends on start_gt_clk and gt_stamp. + * gt_stamp is updated by i915 only when gt is awake and the + * start_gt_clk is derived from GuC state. To get a consistent + * view of activity, we query the GuC state only if gt is awake. + */ + if (intel_gt_pm_get_if_awake(gt) && !in_reset) { + stats_saved = *stats; + gt_stamp_saved = guc->timestamp.gt_stamp; + guc_update_engine_gt_clks(engine); + guc_update_pm_timestamp(guc, engine, now); + intel_gt_pm_put_async(gt); + if (i915_reset_count(gpu_error) != reset_count) { + *stats = stats_saved; + guc->timestamp.gt_stamp = gt_stamp_saved; + } + } + + total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks); + if (stats->running) { + u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk; + + total += intel_gt_clock_interval_to_ns(gt, clk); + } + + spin_unlock_irqrestore(&guc->timestamp.lock, flags); + + return ns_to_ktime(total); +} + +static void __reset_guc_busyness_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long flags; + ktime_t unused; + + cancel_delayed_work_sync(&guc->timestamp.work); + + spin_lock_irqsave(&guc->timestamp.lock, flags); + + for_each_engine(engine, gt, id) { + guc_update_pm_timestamp(guc, engine, &unused); + guc_update_engine_gt_clks(engine); + engine->stats.guc.prev_total = 0; + } + + spin_unlock_irqrestore(&guc->timestamp.lock, flags); +} + +static void __update_guc_busyness_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long flags; + ktime_t unused; + + spin_lock_irqsave(&guc->timestamp.lock, flags); + for_each_engine(engine, gt, id) { + guc_update_pm_timestamp(guc, engine, &unused); + guc_update_engine_gt_clks(engine); + } + spin_unlock_irqrestore(&guc->timestamp.lock, flags); +} + +static void guc_timestamp_ping(struct work_struct *wrk) +{ + struct intel_guc *guc = container_of(wrk, typeof(*guc), + timestamp.work.work); + struct intel_uc *uc = container_of(guc, typeof(*uc), guc); + struct intel_gt *gt = guc_to_gt(guc); + intel_wakeref_t wakeref; + int srcu, ret; + + /* + * Synchronize with gt reset to make sure the worker does not + * corrupt the engine/guc stats. + */ + ret = intel_gt_reset_trylock(gt, &srcu); + if (ret) + return; + + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + __update_guc_busyness_stats(guc); + + intel_gt_reset_unlock(gt, srcu); + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); +} + +static int guc_action_enable_usage_stats(struct intel_guc *guc) +{ + u32 offset = intel_guc_engine_usage_offset(guc); + u32 action[] = { + INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF, + offset, + 0, + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static void guc_init_engine_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + intel_wakeref_t wakeref; + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); + + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) { + int ret = guc_action_enable_usage_stats(guc); + + if (ret) + drm_err(>->i915->drm, + "Failed to enable usage stats: %d!\n", ret); + } +} + +void intel_guc_busyness_park(struct intel_gt *gt) +{ + struct intel_guc *guc = >->uc.guc; + + if (!guc_submission_initialized(guc)) + return; + + cancel_delayed_work(&guc->timestamp.work); + __update_guc_busyness_stats(guc); +} + +void intel_guc_busyness_unpark(struct intel_gt *gt) +{ + struct intel_guc *guc = >->uc.guc; + + if (!guc_submission_initialized(guc)) + return; + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); +} + static inline bool submission_disabled(struct intel_guc *guc) { @@ -1138,6 +1405,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) intel_gt_park_heartbeats(guc_to_gt(guc)); disable_submission(guc); guc->interrupts.disable(guc); + __reset_guc_busyness_stats(guc); /* Flush IRQ handler */ spin_lock_irq(&guc_to_gt(guc)->irq_lock); @@ -1484,6 +1752,7 @@ static void destroyed_worker_func(struct work_struct *w); */ int intel_guc_submission_init(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); int ret; if (guc->lrc_desc_pool) @@ -1512,6 +1781,10 @@ int intel_guc_submission_init(struct intel_guc *guc) if (!guc->submission_state.guc_ids_bitmap) return -ENOMEM; + spin_lock_init(&guc->timestamp.lock); + INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping); + guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; + return 0; } @@ -3369,7 +3642,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) engine->emit_flush = gen12_emit_flush_xcs; } engine->set_default_submission = guc_set_default_submission; + engine->busyness = guc_engine_busyness; + engine->flags |= I915_ENGINE_SUPPORTS_STATS; engine->flags |= I915_ENGINE_HAS_PREEMPTION; engine->flags |= I915_ENGINE_HAS_TIMESLICES; @@ -3468,6 +3743,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) void intel_guc_submission_enable(struct intel_guc *guc) { guc_init_lrc_mapping(guc); + guc_init_engine_stats(guc); } void intel_guc_submission_disable(struct intel_guc *guc) @@ -3695,6 +3971,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) { struct intel_context *ce; + unsigned long flags; int desc_idx; if (unlikely(len != 1)) { @@ -3703,11 +3980,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, } desc_idx = msg[0]; + + /* + * The context lookup uses the xarray but lookups only require an RCU lock + * not the full spinlock. So take the lock explicitly and keep it until the + * context has been reference count locked to ensure it can't be destroyed + * asynchronously until the reset is done. + */ + xa_lock_irqsave(&guc->context_lookup, flags); ce = g2h_context_lookup(guc, desc_idx); + if (ce) + intel_context_get(ce); + xa_unlock_irqrestore(&guc->context_lookup, flags); + if (unlikely(!ce)) return -EPROTO; guc_handle_context_reset(guc, ce); + intel_context_put(ce); return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index c7ef44fa0c36..5a95a9f0a8e3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, void intel_guc_dump_active_requests(struct intel_engine_cs *engine, struct i915_request *hung_rq, struct drm_printer *m); +void intel_guc_busyness_park(struct intel_gt *gt); +void intel_guc_busyness_unpark(struct intel_gt *gt); bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve); |