diff options
author | Francois Dugast <francois.dugast@intel.com> | 2023-07-27 16:55:29 +0200 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2023-12-21 17:39:17 +0100 |
commit | 99fea6828879381405dba598627aea79fa6edd78 (patch) | |
tree | 1ae501df46eb3a5274eec4f4a5006eae1ddf08b9 /drivers/gpu/drm/xe | |
parent | drm/xe/macro: Remove unused constant (diff) | |
download | linux-99fea6828879381405dba598627aea79fa6edd78.tar.xz linux-99fea6828879381405dba598627aea79fa6edd78.zip |
drm/xe: Prefer WARN() over BUG() to avoid crashing the kernel
Replace calls to XE_BUG_ON() with calls XE_WARN_ON() which in turn calls
WARN() instead of BUG(). BUG() crashes the kernel and should only be
used when it is absolutely unavoidable in case of catastrophic and
unrecoverable failures, which is not the case here.
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe')
33 files changed, 218 insertions, 219 deletions
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c index f9b6b7adf99f..b15a7cb7db4c 100644 --- a/drivers/gpu/drm/xe/xe_bb.c +++ b/drivers/gpu/drm/xe/xe_bb.c @@ -78,7 +78,7 @@ struct xe_sched_job *xe_bb_create_wa_job(struct xe_engine *wa_eng, { u64 addr = batch_base_ofs + drm_suballoc_soffset(bb->bo); - XE_BUG_ON(!(wa_eng->vm->flags & XE_VM_FLAG_MIGRATION)); + XE_WARN_ON(!(wa_eng->vm->flags & XE_VM_FLAG_MIGRATION)); return __xe_bb_create_job(wa_eng, bb, &addr); } @@ -94,8 +94,8 @@ struct xe_sched_job *xe_bb_create_migration_job(struct xe_engine *kernel_eng, 4 * second_idx, }; - BUG_ON(second_idx > bb->len); - BUG_ON(!(kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION)); + XE_WARN_ON(second_idx > bb->len); + XE_WARN_ON(!(kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION)); return __xe_bb_create_job(kernel_eng, bb, addr); } @@ -105,7 +105,7 @@ struct xe_sched_job *xe_bb_create_job(struct xe_engine *kernel_eng, { u64 addr = xe_sa_bo_gpu_addr(bb->bo); - BUG_ON(kernel_eng->vm && kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION); + XE_WARN_ON(kernel_eng->vm && kernel_eng->vm->flags & XE_VM_FLAG_MIGRATION); return __xe_bb_create_job(kernel_eng, bb, &addr); } diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 65b56e7a2fde..cf0faaefd03d 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -103,7 +103,7 @@ static bool xe_bo_is_user(struct xe_bo *bo) static struct xe_tile * mem_type_to_tile(struct xe_device *xe, u32 mem_type) { - XE_BUG_ON(mem_type != XE_PL_STOLEN && !mem_type_is_vram(mem_type)); + XE_WARN_ON(mem_type != XE_PL_STOLEN && !mem_type_is_vram(mem_type)); return &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; } @@ -142,7 +142,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo, struct ttm_place place = { .mem_type = mem_type }; u64 io_size = tile->mem.vram.io_size; - XE_BUG_ON(!tile->mem.vram.usable_size); + XE_WARN_ON(!tile->mem.vram.usable_size); /* * For eviction / restore on suspend / resume objects @@ -285,7 +285,7 @@ static int xe_tt_map_sg(struct ttm_tt *tt) unsigned long num_pages = tt->num_pages; int ret; - XE_BUG_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL); + XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL); if (xe_tt->sg) return 0; @@ -544,8 +544,8 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo, ttm); struct sg_table *sg; - XE_BUG_ON(!attach); - XE_BUG_ON(!ttm_bo->ttm); + XE_WARN_ON(!attach); + XE_WARN_ON(!ttm_bo->ttm); if (new_res->mem_type == XE_PL_SYSTEM) goto out; @@ -707,8 +707,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, else if (mem_type_is_vram(old_mem_type)) tile = mem_type_to_tile(xe, old_mem_type); - XE_BUG_ON(!tile); - XE_BUG_ON(!tile->migrate); + XE_WARN_ON(!tile); + XE_WARN_ON(!tile->migrate); trace_xe_bo_move(bo); xe_device_mem_access_get(xe); @@ -738,7 +738,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, goto out; } - XE_BUG_ON(new_mem->start != + XE_WARN_ON(new_mem->start != bo->placements->fpfn); iosys_map_set_vaddr_iomem(&bo->vmap, new_addr); @@ -1198,7 +1198,7 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, int err; /* Only kernel objects should set GT */ - XE_BUG_ON(tile && type != ttm_bo_type_kernel); + XE_WARN_ON(tile && type != ttm_bo_type_kernel); if (XE_WARN_ON(!size)) return ERR_PTR(-EINVAL); @@ -1350,7 +1350,7 @@ xe_bo_create_locked_range(struct xe_device *xe, if (!tile && flags & XE_BO_CREATE_STOLEN_BIT) tile = xe_device_get_root_tile(xe); - XE_BUG_ON(!tile); + XE_WARN_ON(!tile); if (flags & XE_BO_CREATE_STOLEN_BIT && flags & XE_BO_FIXED_PLACEMENT_BIT) { @@ -1481,8 +1481,8 @@ int xe_bo_pin_external(struct xe_bo *bo) struct xe_device *xe = xe_bo_device(bo); int err; - XE_BUG_ON(bo->vm); - XE_BUG_ON(!xe_bo_is_user(bo)); + XE_WARN_ON(bo->vm); + XE_WARN_ON(!xe_bo_is_user(bo)); if (!xe_bo_is_pinned(bo)) { err = xe_bo_validate(bo, NULL, false); @@ -1514,20 +1514,20 @@ int xe_bo_pin(struct xe_bo *bo) int err; /* We currently don't expect user BO to be pinned */ - XE_BUG_ON(xe_bo_is_user(bo)); + XE_WARN_ON(xe_bo_is_user(bo)); /* Pinned object must be in GGTT or have pinned flag */ - XE_BUG_ON(!(bo->flags & (XE_BO_CREATE_PINNED_BIT | + XE_WARN_ON(!(bo->flags & (XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_GGTT_BIT))); /* * No reason we can't support pinning imported dma-bufs we just don't * expect to pin an imported dma-buf. */ - XE_BUG_ON(bo->ttm.base.import_attach); + XE_WARN_ON(bo->ttm.base.import_attach); /* We only expect at most 1 pin */ - XE_BUG_ON(xe_bo_is_pinned(bo)); + XE_WARN_ON(xe_bo_is_pinned(bo)); err = xe_bo_validate(bo, NULL, false); if (err) @@ -1543,7 +1543,7 @@ int xe_bo_pin(struct xe_bo *bo) struct ttm_place *place = &(bo->placements[0]); if (mem_type_is_vram(place->mem_type)) { - XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS)); + XE_WARN_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS)); place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; @@ -1580,9 +1580,9 @@ void xe_bo_unpin_external(struct xe_bo *bo) { struct xe_device *xe = xe_bo_device(bo); - XE_BUG_ON(bo->vm); - XE_BUG_ON(!xe_bo_is_pinned(bo)); - XE_BUG_ON(!xe_bo_is_user(bo)); + XE_WARN_ON(bo->vm); + XE_WARN_ON(!xe_bo_is_pinned(bo)); + XE_WARN_ON(!xe_bo_is_user(bo)); if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) { spin_lock(&xe->pinned.lock); @@ -1603,15 +1603,15 @@ void xe_bo_unpin(struct xe_bo *bo) { struct xe_device *xe = xe_bo_device(bo); - XE_BUG_ON(bo->ttm.base.import_attach); - XE_BUG_ON(!xe_bo_is_pinned(bo)); + XE_WARN_ON(bo->ttm.base.import_attach); + XE_WARN_ON(!xe_bo_is_pinned(bo)); if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && bo->flags & XE_BO_INTERNAL_TEST)) { struct ttm_place *place = &(bo->placements[0]); if (mem_type_is_vram(place->mem_type)) { - XE_BUG_ON(list_empty(&bo->pinned_link)); + XE_WARN_ON(list_empty(&bo->pinned_link)); spin_lock(&xe->pinned.lock); list_del_init(&bo->pinned_link); @@ -1675,12 +1675,12 @@ dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size) struct xe_res_cursor cur; u64 page; - XE_BUG_ON(page_size > PAGE_SIZE); + XE_WARN_ON(page_size > PAGE_SIZE); page = offset >> PAGE_SHIFT; offset &= (PAGE_SIZE - 1); if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) { - XE_BUG_ON(!bo->ttm.ttm); + XE_WARN_ON(!bo->ttm.ttm); xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT, page_size, &cur); @@ -1874,7 +1874,7 @@ int xe_bo_lock(struct xe_bo *bo, struct ww_acquire_ctx *ww, LIST_HEAD(objs); LIST_HEAD(dups); - XE_BUG_ON(!ww); + XE_WARN_ON(!ww); tv_bo.num_shared = num_resv; tv_bo.bo = &bo->ttm; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index a9a32d680208..76b8c836deb7 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -166,7 +166,7 @@ void xe_bo_unlock(struct xe_bo *bo, struct ww_acquire_ctx *ww); static inline void xe_bo_unlock_vm_held(struct xe_bo *bo) { if (bo) { - XE_BUG_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm)); + XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm)); if (bo->vm) xe_vm_assert_held(bo->vm); else @@ -178,8 +178,8 @@ static inline void xe_bo_lock_no_vm(struct xe_bo *bo, struct ww_acquire_ctx *ctx) { if (bo) { - XE_BUG_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg && - bo->ttm.base.resv != &bo->ttm.base._resv)); + XE_WARN_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg && + bo->ttm.base.resv != &bo->ttm.base._resv)); dma_resv_lock(bo->ttm.base.resv, ctx); } } @@ -187,8 +187,8 @@ static inline void xe_bo_lock_no_vm(struct xe_bo *bo, static inline void xe_bo_unlock_no_vm(struct xe_bo *bo) { if (bo) { - XE_BUG_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg && - bo->ttm.base.resv != &bo->ttm.base._resv)); + XE_WARN_ON(bo->vm || (bo->ttm.type != ttm_bo_type_sg && + bo->ttm.base.resv != &bo->ttm.base._resv)); dma_resv_unlock(bo->ttm.base.resv); } } @@ -228,8 +228,8 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size) static inline u32 xe_bo_ggtt_addr(struct xe_bo *bo) { - XE_BUG_ON(bo->ggtt_node.size > bo->size); - XE_BUG_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32)); + XE_WARN_ON(bo->ggtt_node.size > bo->size); + XE_WARN_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32)); return bo->ggtt_node.start; } diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index f559a7f3eb3e..0d5c3a208ab4 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -160,8 +160,8 @@ int xe_bo_restore_kernel(struct xe_device *xe) * We expect validate to trigger a move VRAM and our move code * should setup the iosys map. */ - XE_BUG_ON(iosys_map_is_null(&bo->vmap)); - XE_BUG_ON(!xe_bo_is_vram(bo)); + XE_WARN_ON(iosys_map_is_null(&bo->vmap)); + XE_WARN_ON(!xe_bo_is_vram(bo)); xe_bo_put(bo); diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c index b15d095b395b..a4d9531e3516 100644 --- a/drivers/gpu/drm/xe/xe_execlist.c +++ b/drivers/gpu/drm/xe/xe_execlist.c @@ -50,10 +50,10 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, lrc_desc = xe_lrc_descriptor(lrc); if (GRAPHICS_VERx100(xe) >= 1250) { - XE_BUG_ON(!FIELD_FIT(XEHP_SW_CTX_ID, ctx_id)); + XE_WARN_ON(!FIELD_FIT(XEHP_SW_CTX_ID, ctx_id)); lrc_desc |= FIELD_PREP(XEHP_SW_CTX_ID, ctx_id); } else { - XE_BUG_ON(!FIELD_FIT(GEN11_SW_CTX_ID, ctx_id)); + XE_WARN_ON(!FIELD_FIT(GEN11_SW_CTX_ID, ctx_id)); lrc_desc |= FIELD_PREP(GEN11_SW_CTX_ID, ctx_id); } @@ -213,9 +213,9 @@ static void xe_execlist_make_active(struct xe_execlist_engine *exl) struct xe_execlist_port *port = exl->port; enum xe_engine_priority priority = exl->active_priority; - XE_BUG_ON(priority == XE_ENGINE_PRIORITY_UNSET); - XE_BUG_ON(priority < 0); - XE_BUG_ON(priority >= ARRAY_SIZE(exl->port->active)); + XE_WARN_ON(priority == XE_ENGINE_PRIORITY_UNSET); + XE_WARN_ON(priority < 0); + XE_WARN_ON(priority >= ARRAY_SIZE(exl->port->active)); spin_lock_irq(&port->lock); @@ -321,7 +321,7 @@ static int execlist_engine_init(struct xe_engine *e) struct xe_device *xe = gt_to_xe(e->gt); int err; - XE_BUG_ON(xe_device_guc_submission_enabled(xe)); + XE_WARN_ON(xe_device_guc_submission_enabled(xe)); drm_info(&xe->drm, "Enabling execlist submission (GuC submission disabled)\n"); @@ -387,7 +387,7 @@ static void execlist_engine_fini_async(struct work_struct *w) struct xe_execlist_engine *exl = e->execlist; unsigned long flags; - XE_BUG_ON(xe_device_guc_submission_enabled(gt_to_xe(e->gt))); + XE_WARN_ON(xe_device_guc_submission_enabled(gt_to_xe(e->gt))); spin_lock_irqsave(&exl->port->lock, flags); if (WARN_ON(exl->active_priority != XE_ENGINE_PRIORITY_UNSET)) diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index aba0784b608e..e563de862581 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -45,7 +45,7 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) mutex_init(&fw->lock); /* Assuming gen11+ so assert this assumption is correct */ - XE_BUG_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11); + XE_WARN_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11); if (xe->info.graphics_verx100 >= 1270) { domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT], @@ -67,7 +67,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) int i, j; /* Assuming gen11+ so assert this assumption is correct */ - XE_BUG_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11); + XE_WARN_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11); if (!xe_gt_is_media_type(gt)) domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER], diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h index 7c534cdd5fe9..7f304704190e 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.h +++ b/drivers/gpu/drm/xe/xe_force_wake.h @@ -24,7 +24,7 @@ static inline int xe_force_wake_ref(struct xe_force_wake *fw, enum xe_force_wake_domains domain) { - XE_BUG_ON(!domain); + XE_WARN_ON(!domain); return fw->domains[ffs(domain) - 1].ref; } @@ -32,7 +32,7 @@ static inline void xe_force_wake_assert_held(struct xe_force_wake *fw, enum xe_force_wake_domains domain) { - XE_BUG_ON(!(fw->awake_domains & domain)); + XE_WARN_ON(!(fw->awake_domains & domain)); } #endif diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index bf46b90a76ad..286f36b9e229 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -58,8 +58,8 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev) void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte) { - XE_BUG_ON(addr & XE_PTE_MASK); - XE_BUG_ON(addr >= ggtt->size); + XE_WARN_ON(addr & XE_PTE_MASK); + XE_WARN_ON(addr >= ggtt->size); writeq(pte, &ggtt->gsm[addr >> XE_PTE_SHIFT]); } @@ -69,7 +69,7 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size) u64 end = start + size - 1; u64 scratch_pte; - XE_BUG_ON(start >= end); + XE_WARN_ON(start >= end); if (ggtt->scratch) scratch_pte = xe_ggtt_pte_encode(ggtt->scratch, 0); @@ -266,7 +266,7 @@ void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix) for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) { unsigned int i = addr / XE_PAGE_SIZE; - XE_BUG_ON(addr > U32_MAX); + XE_WARN_ON(addr > U32_MAX); if (ggtt->gsm[i] == scratch_pte) continue; @@ -315,7 +315,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, if (XE_WARN_ON(bo->ggtt_node.size)) { /* Someone's already inserted this BO in the GGTT */ - XE_BUG_ON(bo->ggtt_node.size != bo->size); + XE_WARN_ON(bo->ggtt_node.size != bo->size); return 0; } @@ -378,7 +378,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) return; /* This BO is not currently in the GGTT */ - XE_BUG_ON(bo->ggtt_node.size != bo->size); + XE_WARN_ON(bo->ggtt_node.size != bo->size); xe_ggtt_remove_node(ggtt, &bo->ggtt_node); } diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c index 932b61e0cf67..2f77b8bbcf53 100644 --- a/drivers/gpu/drm/xe/xe_gt_clock.c +++ b/drivers/gpu/drm/xe/xe_gt_clock.c @@ -47,7 +47,7 @@ static u32 get_crystal_clock_freq(u32 rpm_config_reg) case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ: return f25_mhz; default: - XE_BUG_ON("NOT_POSSIBLE"); + XE_WARN_ON("NOT_POSSIBLE"); return 0; } } @@ -58,7 +58,7 @@ int xe_gt_clock_init(struct xe_gt *gt) u32 freq = 0; /* Assuming gen11+ so assert this assumption is correct */ - XE_BUG_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11); + XE_WARN_ON(GRAPHICS_VER(gt_to_xe(gt)) < 11); if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) { freq = read_reference_ts_freq(gt); diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index e622174a866d..b871e45af813 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -157,7 +157,7 @@ void xe_gt_debugfs_register(struct xe_gt *gt) char name[8]; int i; - XE_BUG_ON(!minor->debugfs_root); + XE_WARN_ON(!minor->debugfs_root); sprintf(name, "gt%d", gt->info.id); root = debugfs_create_dir(name, minor->debugfs_root); diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index cad0ade595ec..bcbeea62d510 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -250,7 +250,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, u32 action[MAX_TLB_INVALIDATION_LEN]; int len = 0; - XE_BUG_ON(!vma); + XE_WARN_ON(!vma); action[len++] = XE_GUC_ACTION_TLB_INVALIDATION; action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */ @@ -288,10 +288,10 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, start = ALIGN_DOWN(xe_vma_start(vma), length); } - XE_BUG_ON(length < SZ_4K); - XE_BUG_ON(!is_power_of_2(length)); - XE_BUG_ON(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1)); - XE_BUG_ON(!IS_ALIGNED(start, length)); + XE_WARN_ON(length < SZ_4K); + XE_WARN_ON(!is_power_of_2(length)); + XE_WARN_ON(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1)); + XE_WARN_ON(!IS_ALIGNED(start, length)); action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE); action[len++] = xe_vma_vm(vma)->usm.asid; @@ -300,7 +300,7 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, action[len++] = ilog2(length) - ilog2(SZ_4K); } - XE_BUG_ON(len > MAX_TLB_INVALIDATION_LEN); + XE_WARN_ON(len > MAX_TLB_INVALIDATION_LEN); return send_tlb_invalidation(>->uc.guc, fence, action, len); } diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 2530b6243661..2493c5859948 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -43,9 +43,9 @@ static u32 guc_bo_ggtt_addr(struct xe_guc *guc, { u32 addr = xe_bo_ggtt_addr(bo); - XE_BUG_ON(addr < xe_wopcm_size(guc_to_xe(guc))); - XE_BUG_ON(addr >= GUC_GGTT_TOP); - XE_BUG_ON(bo->size > GUC_GGTT_TOP - addr); + XE_WARN_ON(addr < xe_wopcm_size(guc_to_xe(guc))); + XE_WARN_ON(addr >= GUC_GGTT_TOP); + XE_WARN_ON(bo->size > GUC_GGTT_TOP - addr); return addr; } @@ -612,13 +612,13 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT); - XE_BUG_ON(guc->ct.enabled); - XE_BUG_ON(!len); - XE_BUG_ON(len > VF_SW_FLAG_COUNT); - XE_BUG_ON(len > MED_VF_SW_FLAG_COUNT); - XE_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != + XE_WARN_ON(guc->ct.enabled); + XE_WARN_ON(!len); + XE_WARN_ON(len > VF_SW_FLAG_COUNT); + XE_WARN_ON(len > MED_VF_SW_FLAG_COUNT); + XE_WARN_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST); - XE_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != + XE_WARN_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST); retry: @@ -724,8 +724,8 @@ static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val) }; int ret; - XE_BUG_ON(len > 2); - XE_BUG_ON(len == 1 && upper_32_bits(val)); + XE_WARN_ON(len > 2); + XE_WARN_ON(len == 1 && upper_32_bits(val)); /* Self config must go over MMIO */ ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request)); diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index d4c3a5ce3252..a7da29be2e51 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -118,7 +118,7 @@ struct __guc_ads_blob { static size_t guc_ads_regset_size(struct xe_guc_ads *ads) { - XE_BUG_ON(!ads->regset_size); + XE_WARN_ON(!ads->regset_size); return ads->regset_size; } @@ -312,7 +312,7 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads) struct xe_gt *gt = ads_to_gt(ads); u32 prev_regset_size = ads->regset_size; - XE_BUG_ON(!ads->bo); + XE_WARN_ON(!ads->bo); ads->golden_lrc_size = calculate_golden_lrc_size(ads); ads->regset_size = calculate_regset_size(gt); @@ -518,7 +518,7 @@ static void guc_mmio_reg_state_init(struct xe_guc_ads *ads) regset_used += count * sizeof(struct guc_mmio_reg); } - XE_BUG_ON(regset_used > ads->regset_size); + XE_WARN_ON(regset_used > ads->regset_size); } static void guc_um_init_params(struct xe_guc_ads *ads) @@ -573,7 +573,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads) offsetof(struct __guc_ads_blob, system_info)); u32 base = xe_bo_ggtt_addr(ads->bo); - XE_BUG_ON(!ads->bo); + XE_WARN_ON(!ads->bo); xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); guc_policies_init(ads); @@ -597,7 +597,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) offsetof(struct __guc_ads_blob, system_info)); u32 base = xe_bo_ggtt_addr(ads->bo); - XE_BUG_ON(!ads->bo); + XE_WARN_ON(!ads->bo); xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); guc_policies_init(ads); @@ -647,7 +647,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads) engine_enabled_masks[guc_class])) continue; - XE_BUG_ON(!gt->default_lrc[class]); + XE_WARN_ON(!gt->default_lrc[class]); real_size = xe_lrc_size(xe, class); alloc_size = PAGE_ALIGN(real_size); @@ -676,7 +676,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads) offset += alloc_size; } - XE_BUG_ON(total_size != ads->golden_lrc_size); + XE_WARN_ON(total_size != ads->golden_lrc_size); } void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads) diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index d322eadbe75a..7fb2690425f8 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -135,7 +135,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) struct xe_bo *bo; int err; - XE_BUG_ON(guc_ct_size() % PAGE_SIZE); + XE_WARN_ON(guc_ct_size() % PAGE_SIZE); mutex_init(&ct->lock); spin_lock_init(&ct->fast_lock); @@ -283,7 +283,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) struct xe_device *xe = ct_to_xe(ct); int err; - XE_BUG_ON(ct->enabled); + XE_WARN_ON(ct->enabled); guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); @@ -376,7 +376,7 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len) static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) { - XE_BUG_ON(g2h_len > ct->ctbs.g2h.info.space); + XE_WARN_ON(g2h_len > ct->ctbs.g2h.info.space); if (g2h_len) { lockdep_assert_held(&ct->fast_lock); @@ -419,8 +419,8 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, full_len = len + GUC_CTB_HDR_LEN; lockdep_assert_held(&ct->lock); - XE_BUG_ON(full_len > (GUC_CTB_MSG_MAX_LEN - GUC_CTB_HDR_LEN)); - XE_BUG_ON(tail > h2g->info.size); + XE_WARN_ON(full_len > (GUC_CTB_MSG_MAX_LEN - GUC_CTB_HDR_LEN)); + XE_WARN_ON(tail > h2g->info.size); /* Command will wrap, zero fill (NOPs), return and check credits again */ if (tail + full_len > h2g->info.size) { @@ -478,10 +478,10 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, { int ret; - XE_BUG_ON(g2h_len && g2h_fence); - XE_BUG_ON(num_g2h && g2h_fence); - XE_BUG_ON(g2h_len && !num_g2h); - XE_BUG_ON(!g2h_len && num_g2h); + XE_WARN_ON(g2h_len && g2h_fence); + XE_WARN_ON(num_g2h && g2h_fence); + XE_WARN_ON(g2h_len && !num_g2h); + XE_WARN_ON(!g2h_len && num_g2h); lockdep_assert_held(&ct->lock); if (unlikely(ct->ctbs.h2g.info.broken)) { @@ -552,7 +552,7 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, unsigned int sleep_period_ms = 1; int ret; - XE_BUG_ON(g2h_len && g2h_fence); + XE_WARN_ON(g2h_len && g2h_fence); lockdep_assert_held(&ct->lock); xe_device_assert_mem_access(ct_to_xe(ct)); @@ -622,7 +622,7 @@ static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, { int ret; - XE_BUG_ON(g2h_len && g2h_fence); + XE_WARN_ON(g2h_len && g2h_fence); mutex_lock(&ct->lock); ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence); diff --git a/drivers/gpu/drm/xe/xe_guc_hwconfig.c b/drivers/gpu/drm/xe/xe_guc_hwconfig.c index c8f875e970ab..76aed9c348ab 100644 --- a/drivers/gpu/drm/xe/xe_guc_hwconfig.c +++ b/drivers/gpu/drm/xe/xe_guc_hwconfig.c @@ -120,7 +120,7 @@ void xe_guc_hwconfig_copy(struct xe_guc *guc, void *dst) { struct xe_device *xe = guc_to_xe(guc); - XE_BUG_ON(!guc->hwconfig.bo); + XE_WARN_ON(!guc->hwconfig.bo); xe_map_memcpy_from(xe, dst, &guc->hwconfig.bo->vmap, 0, guc->hwconfig.size); diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c index 403aaafcaba6..63904007af0a 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.c +++ b/drivers/gpu/drm/xe/xe_guc_log.c @@ -55,12 +55,12 @@ void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p) size_t size; int i, j; - XE_BUG_ON(!log->bo); + XE_WARN_ON(!log->bo); size = log->bo->size; #define DW_PER_READ 128 - XE_BUG_ON(size % (DW_PER_READ * sizeof(u32))); + XE_WARN_ON(size % (DW_PER_READ * sizeof(u32))); for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) { u32 read[DW_PER_READ]; diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index a2eeb3ffe548..9a4c96cb3f42 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -329,7 +329,7 @@ static void __guc_engine_policy_start_klv(struct engine_policy *policy, static void __guc_engine_policy_add_##func(struct engine_policy *policy, \ u32 data) \ { \ - XE_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \ + XE_WARN_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \ \ policy->h2g.klv[policy->count].kl = \ FIELD_PREP(GUC_KLV_0_KEY, \ @@ -358,7 +358,7 @@ static void init_policies(struct xe_guc *guc, struct xe_engine *e) u32 timeslice_us = e->sched_props.timeslice_us; u32 preempt_timeout_us = e->sched_props.preempt_timeout_us; - XE_BUG_ON(!engine_registered(e)); + XE_WARN_ON(!engine_registered(e)); __guc_engine_policy_start_klv(&policy, e->guc->id); __guc_engine_policy_add_priority(&policy, xe_engine_prio_to_guc[prio]); @@ -396,7 +396,7 @@ static void __register_mlrc_engine(struct xe_guc *guc, int len = 0; int i; - XE_BUG_ON(!xe_engine_is_parallel(e)); + XE_WARN_ON(!xe_engine_is_parallel(e)); action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC; action[len++] = info->flags; @@ -419,7 +419,7 @@ static void __register_mlrc_engine(struct xe_guc *guc, action[len++] = upper_32_bits(xe_lrc_descriptor(lrc)); } - XE_BUG_ON(len > MAX_MLRC_REG_SIZE); + XE_WARN_ON(len > MAX_MLRC_REG_SIZE); #undef MAX_MLRC_REG_SIZE xe_guc_ct_send(&guc->ct, action, len, 0, 0); @@ -453,7 +453,7 @@ static void register_engine(struct xe_engine *e) struct xe_lrc *lrc = e->lrc; struct guc_ctxt_registration_info info; - XE_BUG_ON(engine_registered(e)); + XE_WARN_ON(engine_registered(e)); memset(&info, 0, sizeof(info)); info.context_idx = e->guc->id; @@ -543,7 +543,7 @@ static int wq_noop_append(struct xe_engine *e) if (wq_wait_for_space(e, wq_space_until_wrap(e))) return -ENODEV; - XE_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw)); + XE_WARN_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw)); parallel_write(xe, map, wq[e->guc->wqi_tail / sizeof(u32)], FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) | @@ -583,13 +583,13 @@ static void wq_item_append(struct xe_engine *e) wqi[i++] = lrc->ring.tail / sizeof(u64); } - XE_BUG_ON(i != wqi_size / sizeof(u32)); + XE_WARN_ON(i != wqi_size / sizeof(u32)); iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch, wq[e->guc->wqi_tail / sizeof(u32)])); xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size); e->guc->wqi_tail += wqi_size; - XE_BUG_ON(e->guc->wqi_tail > WQ_SIZE); + XE_WARN_ON(e->guc->wqi_tail > WQ_SIZE); xe_device_wmb(xe); @@ -608,7 +608,7 @@ static void submit_engine(struct xe_engine *e) int len = 0; bool extra_submit = false; - XE_BUG_ON(!engine_registered(e)); + XE_WARN_ON(!engine_registered(e)); if (xe_engine_is_parallel(e)) wq_item_append(e); @@ -656,8 +656,8 @@ guc_engine_run_job(struct drm_sched_job *drm_job) struct xe_engine *e = job->engine; bool lr = xe_engine_is_lr(e); - XE_BUG_ON((engine_destroyed(e) || engine_pending_disable(e)) && - !engine_banned(e) && !engine_suspended(e)); + XE_WARN_ON((engine_destroyed(e) || engine_pending_disable(e)) && + !engine_banned(e) && !engine_suspended(e)); trace_xe_sched_job_run(job); @@ -984,7 +984,7 @@ static void __guc_engine_process_msg_cleanup(struct xe_sched_msg *msg) struct xe_engine *e = msg->private_data; struct xe_guc *guc = engine_to_guc(e); - XE_BUG_ON(e->flags & ENGINE_FLAG_KERNEL); + XE_WARN_ON(e->flags & ENGINE_FLAG_KERNEL); trace_xe_engine_cleanup_entity(e); if (engine_registered(e)) @@ -1012,9 +1012,9 @@ static void suspend_fence_signal(struct xe_engine *e) { struct xe_guc *guc = engine_to_guc(e); - XE_BUG_ON(!engine_suspended(e) && !engine_killed(e) && - !guc_read_stopped(guc)); - XE_BUG_ON(!e->guc->suspend_pending); + XE_WARN_ON(!engine_suspended(e) && !engine_killed(e) && + !guc_read_stopped(guc)); + XE_WARN_ON(!e->guc->suspend_pending); e->guc->suspend_pending = false; smp_wmb(); @@ -1100,7 +1100,7 @@ static void guc_engine_process_msg(struct xe_sched_msg *msg) __guc_engine_process_msg_resume(msg); break; default: - XE_BUG_ON("Unknown message type"); + XE_WARN_ON("Unknown message type"); } } @@ -1122,7 +1122,7 @@ static int guc_engine_init(struct xe_engine *e) long timeout; int err; - XE_BUG_ON(!xe_device_guc_submission_enabled(guc_to_xe(guc))); + XE_WARN_ON(!xe_device_guc_submission_enabled(guc_to_xe(guc))); ge = kzalloc(sizeof(*ge), GFP_KERNEL); if (!ge) @@ -1286,9 +1286,9 @@ static int guc_engine_set_job_timeout(struct xe_engine *e, u32 job_timeout_ms) { struct xe_gpu_scheduler *sched = &e->guc->sched; - XE_BUG_ON(engine_registered(e)); - XE_BUG_ON(engine_banned(e)); - XE_BUG_ON(engine_killed(e)); + XE_WARN_ON(engine_registered(e)); + XE_WARN_ON(engine_banned(e)); + XE_WARN_ON(engine_killed(e)); sched->base.timeout = job_timeout_ms; @@ -1320,7 +1320,7 @@ static void guc_engine_resume(struct xe_engine *e) { struct xe_sched_msg *msg = e->guc->static_msgs + STATIC_MSG_RESUME; - XE_BUG_ON(e->guc->suspend_pending); + XE_WARN_ON(e->guc->suspend_pending); guc_engine_add_msg(e, msg, RESUME); } @@ -1416,7 +1416,7 @@ int xe_guc_submit_stop(struct xe_guc *guc) struct xe_engine *e; unsigned long index; - XE_BUG_ON(guc_read_stopped(guc) != 1); + XE_WARN_ON(guc_read_stopped(guc) != 1); mutex_lock(&guc->submission_state.lock); @@ -1454,7 +1454,7 @@ int xe_guc_submit_start(struct xe_guc *guc) struct xe_engine *e; unsigned long index; - XE_BUG_ON(guc_read_stopped(guc) != 1); + XE_WARN_ON(guc_read_stopped(guc) != 1); mutex_lock(&guc->submission_state.lock); atomic_dec(&guc->submission_state.stopped); @@ -1484,7 +1484,7 @@ g2h_engine_lookup(struct xe_guc *guc, u32 guc_id) return NULL; } - XE_BUG_ON(e->guc->id != guc_id); + XE_WARN_ON(e->guc->id != guc_id); return e; } diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c index dc1708b4e94a..177cda14864e 100644 --- a/drivers/gpu/drm/xe/xe_huc.c +++ b/drivers/gpu/drm/xe/xe_huc.c @@ -72,7 +72,7 @@ int xe_huc_auth(struct xe_huc *huc) if (xe_uc_fw_is_disabled(&huc->fw)) return 0; - XE_BUG_ON(xe_uc_fw_is_running(&huc->fw)); + XE_WARN_ON(xe_uc_fw_is_running(&huc->fw)); if (!xe_uc_fw_is_loaded(&huc->fw)) return -ENOEXEC; diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index 1af5cccd1142..ead5aa285619 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -237,7 +237,7 @@ static void hw_engine_fini(struct drm_device *drm, void *arg) static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg, u32 val) { - XE_BUG_ON(reg.addr & hwe->mmio_base); + XE_WARN_ON(reg.addr & hwe->mmio_base); xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain); reg.addr += hwe->mmio_base; @@ -247,7 +247,7 @@ static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg, static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg) { - XE_BUG_ON(reg.addr & hwe->mmio_base); + XE_WARN_ON(reg.addr & hwe->mmio_base); xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain); reg.addr += hwe->mmio_base; @@ -351,7 +351,7 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe, info = &engine_infos[id]; - XE_BUG_ON(hwe->gt); + XE_WARN_ON(hwe->gt); hwe->gt = gt; hwe->class = info->class; @@ -377,8 +377,8 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe, struct xe_tile *tile = gt_to_tile(gt); int err; - XE_BUG_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name); - XE_BUG_ON(!(gt->info.engine_mask & BIT(id))); + XE_WARN_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name); + XE_WARN_ON(!(gt->info.engine_mask & BIT(id))); xe_reg_sr_apply_mmio(&hwe->reg_sr, gt); xe_reg_sr_apply_whitelist(hwe); diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c index ffe1a3992ef5..a6094c81f2ad 100644 --- a/drivers/gpu/drm/xe/xe_hw_fence.c +++ b/drivers/gpu/drm/xe/xe_hw_fence.c @@ -188,7 +188,7 @@ static void xe_hw_fence_release(struct dma_fence *dma_fence) struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence); trace_xe_hw_fence_free(fence); - XE_BUG_ON(!list_empty(&fence->irq_link)); + XE_WARN_ON(!list_empty(&fence->irq_link)); call_rcu(&dma_fence->rcu, fence_free); } diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index b726599f6228..05f3d8d68379 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -108,7 +108,7 @@ static void set_offsets(u32 *regs, *regs |= MI_LRI_LRM_CS_MMIO; regs++; - XE_BUG_ON(!count); + XE_WARN_ON(!count); do { u32 offset = 0; u8 v; @@ -528,7 +528,7 @@ static inline struct iosys_map __xe_lrc_##elem##_map(struct xe_lrc *lrc) \ { \ struct iosys_map map = lrc->bo->vmap; \ \ - XE_BUG_ON(iosys_map_is_null(&map)); \ + XE_WARN_ON(iosys_map_is_null(&map)); \ iosys_map_incr(&map, __xe_lrc_##elem##_offset(lrc)); \ return map; \ } \ @@ -759,12 +759,12 @@ void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size) u32 rhs; size_t aligned_size; - XE_BUG_ON(!IS_ALIGNED(size, 4)); + XE_WARN_ON(!IS_ALIGNED(size, 4)); aligned_size = ALIGN(size, 8); ring = __xe_lrc_ring_map(lrc); - XE_BUG_ON(lrc->ring.tail >= lrc->ring.size); + XE_WARN_ON(lrc->ring.tail >= lrc->ring.size); rhs = lrc->ring.size - lrc->ring.tail; if (size > rhs) { __xe_lrc_write_ring(lrc, ring, data, rhs); diff --git a/drivers/gpu/drm/xe/xe_macros.h b/drivers/gpu/drm/xe/xe_macros.h index a7105050bce0..daf56c846d03 100644 --- a/drivers/gpu/drm/xe/xe_macros.h +++ b/drivers/gpu/drm/xe/xe_macros.h @@ -9,7 +9,6 @@ #include <linux/bug.h> #define XE_WARN_ON WARN_ON -#define XE_BUG_ON BUG_ON #define XE_IOCTL_DBG(xe, cond) \ ((cond) && (drm_dbg(&(xe)->drm, \ diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 0405136bc0b1..9c4b432d496f 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -106,7 +106,7 @@ static void xe_migrate_fini(struct drm_device *dev, void *arg) static u64 xe_migrate_vm_addr(u64 slot, u32 level) { - XE_BUG_ON(slot >= NUM_PT_SLOTS); + XE_WARN_ON(slot >= NUM_PT_SLOTS); /* First slot is reserved for mapping of PT bo and bb, start from 1 */ return (slot + 1ULL) << xe_pt_shift(level + 1); @@ -171,7 +171,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1)); /* Need to be sure everything fits in the first PT, or create more */ - XE_BUG_ON(m->batch_base_ofs + batch->size >= SZ_2M); + XE_WARN_ON(m->batch_base_ofs + batch->size >= SZ_2M); bo = xe_bo_create_pin_map(vm->xe, tile, vm, num_entries * XE_PAGE_SIZE, @@ -205,7 +205,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, } if (!IS_DGFX(xe)) { - XE_BUG_ON(xe->info.supports_usm); + XE_WARN_ON(xe->info.supports_usm); /* Write out batch too */ m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; @@ -487,7 +487,7 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb, num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size), NUM_CCS_BYTES_PER_BLOCK); - XE_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER); + XE_WARN_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER); *cs++ = XY_CTRL_SURF_COPY_BLT | (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT | (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT | @@ -507,9 +507,9 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, u64 dst_ofs, unsigned int size, unsigned int pitch) { - XE_BUG_ON(size / pitch > S16_MAX); - XE_BUG_ON(pitch / 4 > S16_MAX); - XE_BUG_ON(pitch > U16_MAX); + XE_WARN_ON(size / pitch > S16_MAX); + XE_WARN_ON(pitch / 4 > S16_MAX); + XE_WARN_ON(pitch > U16_MAX); bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2); bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch; @@ -569,7 +569,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m, * At the moment, we don't support copying CCS metadata from * system to system. */ - XE_BUG_ON(!src_is_vram && !dst_is_vram); + XE_WARN_ON(!src_is_vram && !dst_is_vram); emit_copy_ccs(gt, bb, dst_ofs, dst_is_vram, src_ofs, src_is_vram, dst_size); @@ -781,7 +781,7 @@ static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs *cs++ = upper_32_bits(src_ofs); *cs++ = FIELD_PREP(PVC_MS_MOCS_INDEX_MASK, mocs); - XE_BUG_ON(cs - bb->cs != len + bb->len); + XE_WARN_ON(cs - bb->cs != len + bb->len); bb->len += len; } @@ -819,7 +819,7 @@ static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb, *cs++ = 0; } - XE_BUG_ON(cs - bb->cs != len + bb->len); + XE_WARN_ON(cs - bb->cs != len + bb->len); bb->len += len; } @@ -992,9 +992,9 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, * PDE. This requires a BO that is almost vm->size big. * * This shouldn't be possible in practice.. might change when 16K - * pages are used. Hence the BUG_ON. + * pages are used. Hence the XE_WARN_ON. */ - XE_BUG_ON(update->qwords > 0x1ff); + XE_WARN_ON(update->qwords > 0x1ff); if (!ppgtt_ofs) { ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0, XE_PAGE_SIZE)); @@ -1184,7 +1184,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, * Worst case: Sum(2 * (each lower level page size) + (top level page size)) * Should be reasonably bound.. */ - XE_BUG_ON(batch_size >= SZ_128K); + XE_WARN_ON(batch_size >= SZ_128K); bb = xe_bb_new(gt, batch_size, !eng && xe->info.supports_usm); if (IS_ERR(bb)) @@ -1194,7 +1194,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, if (!IS_DGFX(xe)) { ppgtt_ofs = NUM_KERNEL_PDE - 1; if (eng) { - XE_BUG_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT); + XE_WARN_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT); sa_bo = drm_suballoc_new(&m->vm_update_sa, 1, GFP_KERNEL, true, 0); @@ -1223,7 +1223,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m, for (i = 0; i < num_updates; i++) { struct xe_bo *pt_bo = updates[i].pt_bo; - BUG_ON(pt_bo->size != SZ_4K); + XE_WARN_ON(pt_bo->size != SZ_4K); addr = xe_pte_encode(pt_bo, 0, XE_CACHE_WB, 0); bb->cs[bb->len++] = lower_32_bits(addr); diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index d9192bf50362..b82ce01cc4cb 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -106,7 +106,7 @@ static u64 __pte_encode(u64 pte, enum xe_cache_level cache, pte |= XE_PDPE_PS_1G; /* XXX: Does hw support 1 GiB pages? */ - XE_BUG_ON(pt_level > 2); + XE_WARN_ON(pt_level > 2); return pte; } @@ -196,7 +196,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, pt->level = level; pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL; - XE_BUG_ON(level > XE_VM_MAX_LEVEL); + XE_WARN_ON(level > XE_VM_MAX_LEVEL); return pt; @@ -265,7 +265,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) if (!pt) return; - XE_BUG_ON(!list_empty(&pt->bo->vmas)); + XE_WARN_ON(!list_empty(&pt->bo->vmas)); xe_bo_unpin(pt->bo); xe_bo_put_deferred(pt->bo, deferred); @@ -849,8 +849,8 @@ static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset, struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); pgoff_t end_offset; - XE_BUG_ON(!*child); - XE_BUG_ON(!level && xe_child->is_compact); + XE_WARN_ON(!*child); + XE_WARN_ON(!level && xe_child->is_compact); /* * Note that we're called from an entry callback, and we're dealing @@ -1004,7 +1004,7 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma, *num_entries = 0; err = xe_pt_stage_bind(tile, vma, entries, num_entries); if (!err) - BUG_ON(!*num_entries); + XE_WARN_ON(!*num_entries); else /* abort! */ xe_pt_abort_bind(vma, entries, *num_entries); @@ -1026,7 +1026,7 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe, u64 end; u64 start; - XE_BUG_ON(entry->pt->is_compact); + XE_WARN_ON(entry->pt->is_compact); start = entry->ofs * page_size; end = start + page_size * entry->qwords; vm_dbg(&xe->drm, @@ -1356,7 +1356,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e, err = xe_pt_prepare_bind(tile, vma, entries, &num_entries, rebind); if (err) goto err; - XE_BUG_ON(num_entries > ARRAY_SIZE(entries)); + XE_WARN_ON(num_entries > ARRAY_SIZE(entries)); xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries); xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries, @@ -1515,8 +1515,8 @@ static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset, { struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base); - XE_BUG_ON(!*child); - XE_BUG_ON(!level && xe_child->is_compact); + XE_WARN_ON(!*child); + XE_WARN_ON(!level && xe_child->is_compact); xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk); @@ -1707,7 +1707,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e xe_vma_start(vma), xe_vma_end(vma) - 1, e); num_entries = xe_pt_stage_unbind(tile, vma, entries); - XE_BUG_ON(num_entries > ARRAY_SIZE(entries)); + XE_WARN_ON(num_entries > ARRAY_SIZE(entries)); xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries); xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries, diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h index dda963fe3300..5cb4b66a5d74 100644 --- a/drivers/gpu/drm/xe/xe_res_cursor.h +++ b/drivers/gpu/drm/xe/xe_res_cursor.h @@ -79,7 +79,7 @@ static inline void xe_res_first(struct ttm_resource *res, if (!res) goto fallback; - XE_BUG_ON(start + size > res->size); + XE_WARN_ON(start + size > res->size); cur->mem_type = res->mem_type; @@ -139,7 +139,7 @@ static inline void __xe_res_sg_next(struct xe_res_cursor *cur) while (start >= sg_dma_len(sgl)) { start -= sg_dma_len(sgl); sgl = sg_next(sgl); - XE_BUG_ON(!sgl); + XE_WARN_ON(!sgl); } cur->start = start; @@ -161,9 +161,9 @@ static inline void xe_res_first_sg(const struct sg_table *sg, u64 start, u64 size, struct xe_res_cursor *cur) { - XE_BUG_ON(!sg); - XE_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE) || - !IS_ALIGNED(size, PAGE_SIZE)); + XE_WARN_ON(!sg); + XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) || + !IS_ALIGNED(size, PAGE_SIZE)); cur->node = NULL; cur->start = start; cur->remaining = size; @@ -187,7 +187,7 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size) struct list_head *next; u64 start; - XE_BUG_ON(size > cur->remaining); + XE_WARN_ON(size > cur->remaining); cur->remaining -= size; if (!cur->remaining) diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index 4cfd78e1ffa5..2d0d392cd691 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -233,7 +233,7 @@ static void __emit_job_gen12_copy(struct xe_sched_job *job, struct xe_lrc *lrc, i = emit_user_interrupt(dw, i); - XE_BUG_ON(i > MAX_JOB_SIZE_DW); + XE_WARN_ON(i > MAX_JOB_SIZE_DW); xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); } @@ -291,7 +291,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, i = emit_user_interrupt(dw, i); - XE_BUG_ON(i > MAX_JOB_SIZE_DW); + XE_WARN_ON(i > MAX_JOB_SIZE_DW); xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); } @@ -339,7 +339,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job, i = emit_user_interrupt(dw, i); - XE_BUG_ON(i > MAX_JOB_SIZE_DW); + XE_WARN_ON(i > MAX_JOB_SIZE_DW); xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); } @@ -369,7 +369,7 @@ static void emit_migration_job_gen12(struct xe_sched_job *job, i = emit_user_interrupt(dw, i); - XE_BUG_ON(i > MAX_JOB_SIZE_DW); + XE_WARN_ON(i > MAX_JOB_SIZE_DW); xe_lrc_write_ring(lrc, dw, i * sizeof(*dw)); } diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index c87f65c98b3d..85fd5980191c 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -142,7 +142,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_engine *e, /* Sanity check */ for (j = 0; j < e->width; ++j) - XE_BUG_ON(cf->base.seqno != fences[j]->seqno); + XE_WARN_ON(cf->base.seqno != fences[j]->seqno); job->fence = &cf->base; } diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c index 7bba8fff5a5d..be0a25e23929 100644 --- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c @@ -205,7 +205,7 @@ u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset) struct xe_ttm_stolen_mgr *mgr = to_stolen_mgr(ttm_mgr); struct xe_res_cursor cur; - XE_BUG_ON(!mgr->io_base); + XE_WARN_ON(!mgr->io_base); if (xe_ttm_stolen_cpu_access_needs_ggtt(xe)) return mgr->io_base + xe_bo_ggtt_addr(bo) + offset; @@ -245,7 +245,7 @@ static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe, #ifdef CONFIG_X86 struct xe_bo *bo = ttm_to_xe_bo(mem->bo); - XE_BUG_ON(IS_DGFX(xe)); + XE_WARN_ON(IS_DGFX(xe)); /* XXX: Require BO to be mapped to GGTT? */ if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT))) diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 5801c10f3ccc..4b04f6e5388d 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -158,7 +158,7 @@ __uc_fw_to_gt(struct xe_uc_fw *uc_fw, enum xe_uc_fw_type type) if (type == XE_UC_FW_TYPE_GUC) return container_of(uc_fw, struct xe_gt, uc.guc.fw); - XE_BUG_ON(type != XE_UC_FW_TYPE_HUC); + XE_WARN_ON(type != XE_UC_FW_TYPE_HUC); return container_of(uc_fw, struct xe_gt, uc.huc.fw); } @@ -194,7 +194,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw) u32 count; int i; - XE_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all)); + XE_WARN_ON(uc_fw->type >= ARRAY_SIZE(blobs_all)); entries = blobs_all[uc_fw->type].entries; count = blobs_all[uc_fw->type].count; @@ -223,8 +223,8 @@ size_t xe_uc_fw_copy_rsa(struct xe_uc_fw *uc_fw, void *dst, u32 max_len) struct xe_device *xe = uc_fw_to_xe(uc_fw); u32 size = min_t(u32, uc_fw->rsa_size, max_len); - XE_BUG_ON(size % 4); - XE_BUG_ON(!xe_uc_fw_is_available(uc_fw)); + XE_WARN_ON(size % 4); + XE_WARN_ON(!xe_uc_fw_is_available(uc_fw)); xe_map_memcpy_from(xe, dst, &uc_fw->bo->vmap, xe_uc_fw_rsa_offset(uc_fw), size); @@ -248,7 +248,7 @@ static void guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css) struct xe_gt *gt = uc_fw_to_gt(uc_fw); struct xe_guc *guc = >->uc.guc; - XE_BUG_ON(uc_fw->type != XE_UC_FW_TYPE_GUC); + XE_WARN_ON(uc_fw->type != XE_UC_FW_TYPE_GUC); XE_WARN_ON(uc_fw->major_ver_found < 70); if (uc_fw->major_ver_found > 70 || uc_fw->minor_ver_found >= 6) { @@ -335,8 +335,8 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw) * before we're looked at the HW caps to see if we have uc support */ BUILD_BUG_ON(XE_UC_FIRMWARE_UNINITIALIZED); - XE_BUG_ON(uc_fw->status); - XE_BUG_ON(uc_fw->path); + XE_WARN_ON(uc_fw->status); + XE_WARN_ON(uc_fw->path); uc_fw_auto_select(xe, uc_fw); xe_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ? @@ -502,7 +502,7 @@ int xe_uc_fw_upload(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags) int err; /* make sure the status was cleared the last time we reset the uc */ - XE_BUG_ON(xe_uc_fw_is_loaded(uc_fw)); + XE_WARN_ON(xe_uc_fw_is_loaded(uc_fw)); if (!xe_uc_fw_is_loadable(uc_fw)) return -ENOEXEC; diff --git a/drivers/gpu/drm/xe/xe_uc_fw.h b/drivers/gpu/drm/xe/xe_uc_fw.h index e16267e71280..a519c77d4962 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.h +++ b/drivers/gpu/drm/xe/xe_uc_fw.h @@ -104,7 +104,7 @@ static inline enum xe_uc_fw_status __xe_uc_fw_status(struct xe_uc_fw *uc_fw) { /* shouldn't call this before checking hw/blob availability */ - XE_BUG_ON(uc_fw->status == XE_UC_FIRMWARE_UNINITIALIZED); + XE_WARN_ON(uc_fw->status == XE_UC_FIRMWARE_UNINITIALIZED); return uc_fw->status; } diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 205795823555..0bebdac2287c 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -63,7 +63,7 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) bool read_only = xe_vma_read_only(vma); lockdep_assert_held(&vm->lock); - XE_BUG_ON(!xe_vma_is_userptr(vma)); + XE_WARN_ON(!xe_vma_is_userptr(vma)); retry: if (vma->gpuva.flags & XE_VMA_DESTROYED) return 0; @@ -252,7 +252,7 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) struct dma_fence *fence; link = list->next; - XE_BUG_ON(link == list); + XE_WARN_ON(link == list); fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link), e, e->compute.context, @@ -329,7 +329,7 @@ int xe_vm_add_compute_engine(struct xe_vm *vm, struct xe_engine *e) int err; bool wait; - XE_BUG_ON(!xe_vm_in_compute_mode(vm)); + XE_WARN_ON(!xe_vm_in_compute_mode(vm)); down_write(&vm->lock); @@ -549,7 +549,7 @@ static void preempt_rebind_work_func(struct work_struct *w) long wait; int __maybe_unused tries = 0; - XE_BUG_ON(!xe_vm_in_compute_mode(vm)); + XE_WARN_ON(!xe_vm_in_compute_mode(vm)); trace_xe_vm_rebind_worker_enter(vm); down_write(&vm->lock); @@ -708,7 +708,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, struct dma_fence *fence; long err; - XE_BUG_ON(!xe_vma_is_userptr(vma)); + XE_WARN_ON(!xe_vma_is_userptr(vma)); trace_xe_vma_userptr_invalidate(vma); if (!mmu_notifier_range_blockable(range)) @@ -877,8 +877,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, struct xe_tile *tile; u8 id; - XE_BUG_ON(start >= end); - XE_BUG_ON(end >= vm->size); + XE_WARN_ON(start >= end); + XE_WARN_ON(end >= vm->size); if (!bo && !is_null) /* userptr */ vma = kzalloc(sizeof(*vma), GFP_KERNEL); @@ -1075,7 +1075,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) struct xe_vm *vm = xe_vma_vm(vma); lockdep_assert_held_write(&vm->lock); - XE_BUG_ON(!list_empty(&vma->combined_links.destroy)); + XE_WARN_ON(!list_empty(&vma->combined_links.destroy)); if (xe_vma_is_userptr(vma)) { XE_WARN_ON(!(vma->gpuva.flags & XE_VMA_DESTROYED)); @@ -1153,7 +1153,7 @@ xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) if (xe_vm_is_closed_or_banned(vm)) return NULL; - XE_BUG_ON(start + range > vm->size); + XE_WARN_ON(start + range > vm->size); gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); @@ -1164,7 +1164,7 @@ static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) { int err; - XE_BUG_ON(xe_vma_vm(vma) != vm); + XE_WARN_ON(xe_vma_vm(vma) != vm); lockdep_assert_held(&vm->lock); err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); @@ -1175,7 +1175,7 @@ static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) { - XE_BUG_ON(xe_vma_vm(vma) != vm); + XE_WARN_ON(xe_vma_vm(vma) != vm); lockdep_assert_held(&vm->lock); drm_gpuva_remove(&vma->gpuva); @@ -1422,7 +1422,7 @@ void xe_vm_close_and_put(struct xe_vm *vm) struct drm_gpuva *gpuva, *next; u8 id; - XE_BUG_ON(vm->preempt.num_engines); + XE_WARN_ON(vm->preempt.num_engines); xe_vm_close(vm); flush_async_ops(vm); @@ -1795,7 +1795,7 @@ int xe_vm_async_fence_wait_start(struct dma_fence *fence) struct async_op_fence *afence = container_of(fence, struct async_op_fence, fence); - XE_BUG_ON(xe_vm_no_dma_fences(afence->vm)); + XE_WARN_ON(xe_vm_no_dma_fences(afence->vm)); smp_rmb(); return wait_event_interruptible(afence->wq, afence->started); @@ -1821,7 +1821,7 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, } else { int i; - XE_BUG_ON(!xe_vm_in_fault_mode(vm)); + XE_WARN_ON(!xe_vm_in_fault_mode(vm)); fence = dma_fence_get_stub(); if (last_op) { @@ -2100,7 +2100,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, { int err; - XE_BUG_ON(region > ARRAY_SIZE(region_to_mem_type)); + XE_WARN_ON(region > ARRAY_SIZE(region_to_mem_type)); if (!xe_vma_has_no_bo(vma)) { err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]); @@ -2181,7 +2181,7 @@ static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo, return -ENODATA; break; default: - XE_BUG_ON("NOT POSSIBLE"); + XE_WARN_ON("NOT POSSIBLE"); return -EINVAL; } @@ -2239,7 +2239,7 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op) (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma)); break; default: - XE_BUG_ON("NOT POSSIBLE"); + XE_WARN_ON("NOT POSSIBLE"); } } #else @@ -2315,7 +2315,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, } break; case XE_VM_BIND_OP_UNMAP_ALL: - XE_BUG_ON(!bo); + XE_WARN_ON(!bo); err = xe_bo_lock(bo, &ww, 0, true); if (err) @@ -2338,7 +2338,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, } break; default: - XE_BUG_ON("NOT POSSIBLE"); + XE_WARN_ON("NOT POSSIBLE"); ops = ERR_PTR(-EINVAL); } @@ -2425,7 +2425,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e, int err, i; lockdep_assert_held_write(&vm->lock); - XE_BUG_ON(num_ops_list > 1 && !async); + XE_WARN_ON(num_ops_list > 1 && !async); if (num_syncs && async) { u64 seqno; @@ -2454,7 +2454,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e, struct xe_vma_op *op = gpuva_op_to_vma_op(__op); bool first = !async_list; - XE_BUG_ON(!first && !async); + XE_WARN_ON(!first && !async); INIT_LIST_HEAD(&op->link); if (first) @@ -2566,7 +2566,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_engine *e, /* Nothing to do */ break; default: - XE_BUG_ON("NOT POSSIBLE"); + XE_WARN_ON("NOT POSSIBLE"); } last_op = op; @@ -2628,7 +2628,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) /* Nothing to do */ break; default: - XE_BUG_ON("NOT POSSIBLE"); + XE_WARN_ON("NOT POSSIBLE"); } op->flags |= XE_VMA_OP_COMMITTED; @@ -2746,7 +2746,7 @@ again: op->flags & XE_VMA_OP_LAST); break; default: - XE_BUG_ON("NOT POSSIBLE"); + XE_WARN_ON("NOT POSSIBLE"); } ttm_eu_backoff_reservation(&ww, &objs); @@ -2805,7 +2805,7 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) op); break; default: - XE_BUG_ON("NOT POSSIBLE"); + XE_WARN_ON("NOT POSSIBLE"); } return ret; @@ -2881,7 +2881,7 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, /* Nothing to do */ break; default: - XE_BUG_ON("NOT POSSIBLE"); + XE_WARN_ON("NOT POSSIBLE"); } } @@ -3413,7 +3413,7 @@ int xe_vm_lock(struct xe_vm *vm, struct ww_acquire_ctx *ww, LIST_HEAD(objs); LIST_HEAD(dups); - XE_BUG_ON(!ww); + XE_WARN_ON(!ww); tv_vm.num_shared = num_resv; tv_vm.bo = xe_vm_ttm_bo(vm); @@ -3447,7 +3447,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma) u8 id; int ret; - XE_BUG_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma))); + XE_WARN_ON(!xe_vm_in_fault_mode(xe_vma_vm(vma))); XE_WARN_ON(xe_vma_is_null(vma)); trace_xe_vma_usm_invalidate(vma); diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c index c4202df1d4f0..761eed3a022f 100644 --- a/drivers/gpu/drm/xe/xe_wait_user_fence.c +++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c @@ -45,7 +45,7 @@ static int do_compare(u64 addr, u64 value, u64 mask, u16 op) passed = (rvalue & mask) <= (value & mask); break; default: - XE_BUG_ON("Not possible"); + XE_WARN_ON("Not possible"); } return passed ? 0 : 1; diff --git a/drivers/gpu/drm/xe/xe_wopcm.c b/drivers/gpu/drm/xe/xe_wopcm.c index d9acf8783b83..9a85bcc18830 100644 --- a/drivers/gpu/drm/xe/xe_wopcm.c +++ b/drivers/gpu/drm/xe/xe_wopcm.c @@ -144,10 +144,10 @@ static int __wopcm_init_regs(struct xe_device *xe, struct xe_gt *gt, u32 mask; int err; - XE_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK)); - XE_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK); - XE_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); - XE_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); + XE_WARN_ON(!(base & GUC_WOPCM_OFFSET_MASK)); + XE_WARN_ON(base & ~GUC_WOPCM_OFFSET_MASK); + XE_WARN_ON(!(size & GUC_WOPCM_SIZE_MASK)); + XE_WARN_ON(size & ~GUC_WOPCM_SIZE_MASK); mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; err = xe_mmio_write32_and_verify(gt, GUC_WOPCM_SIZE, size, mask, @@ -213,9 +213,9 @@ int xe_wopcm_init(struct xe_wopcm *wopcm) drm_dbg(&xe->drm, "WOPCM: %uK\n", wopcm->size / SZ_1K); xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); - XE_BUG_ON(guc_fw_size >= wopcm->size); - XE_BUG_ON(huc_fw_size >= wopcm->size); - XE_BUG_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm->size); + XE_WARN_ON(guc_fw_size >= wopcm->size); + XE_WARN_ON(huc_fw_size >= wopcm->size); + XE_WARN_ON(ctx_rsvd + WOPCM_RESERVED_SIZE >= wopcm->size); locked = __wopcm_regs_locked(gt, &guc_wopcm_base, &guc_wopcm_size); if (locked) { @@ -256,8 +256,8 @@ check: guc_fw_size, huc_fw_size)) { wopcm->guc.base = guc_wopcm_base; wopcm->guc.size = guc_wopcm_size; - XE_BUG_ON(!wopcm->guc.base); - XE_BUG_ON(!wopcm->guc.size); + XE_WARN_ON(!wopcm->guc.base); + XE_WARN_ON(!wopcm->guc.size); } else { drm_notice(&xe->drm, "Unsuccessful WOPCM partitioning\n"); return -E2BIG; |