diff options
author | Dave Airlie <airlied@redhat.com> | 2019-05-28 00:25:46 +0200 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2019-05-28 00:59:11 +0200 |
commit | 88cd7a2c1b29f61a2a3fab76216a43f3b779e0cd (patch) | |
tree | b8a4311c8f7edfc92c1055b6a5a86b1a5552878e /drivers/gpu/drm/v3d | |
parent | Linux 5.2-rc2 (diff) | |
parent | dt-bindings: fix up for vendor prefixes file conversion (diff) | |
download | linux-88cd7a2c1b29f61a2a3fab76216a43f3b779e0cd.tar.xz linux-88cd7a2c1b29f61a2a3fab76216a43f3b779e0cd.zip |
Merge tag 'drm-misc-next-2019-05-24' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for v5.3, try #2:
UAPI Changes:
- Add HDR source metadata property.
- Make drm.h compile on GNU/kFreeBSD by including stdint.h
- Clarify how the userspace reviewer has to review new kernel UAPI.
- Clarify that for using new UAPI, merging to drm-next or drm-misc-next should be enough.
Cross-subsystem Changes:
- video/hdmi: Add unpack function for DRM infoframes.
- Device tree bindings:
* Updating a property for Mali Midgard GPUs
* Updating a property for STM32 DSI panel
* Adding support for FriendlyELEC HD702E 800x1280 panel
* Adding support for Evervision VGG804821 800x480 5.0" WVGA TFT panel
* Adding support for the EDT ET035012DM6 3.5" 320x240 QVGA 24-bit RGB TFT.
* Adding support for Three Five displays TFC S9700RTWV43TR-01B 800x480 panel
with resistive touch found on TI's AM335X-EVM.
* Adding support for EDT ETM0430G0DH6 480x272 panel.
- Add OSD101T2587-53TS driver with DT bindings.
- Add Samsung S6E63M0 panel driver with DT bindings.
- Add VXT VL050-8048NT-C01 800x480 panel with DT bindings.
- Dma-buf:
- Make mmap callback actually optional.
- Documentation updates.
- Fix debugfs refcount inbalance.
- Remove unused sync_dump function.
- Fix device tree bindings in drm-misc-next after a botched merge.
Core Changes:
- Add support for HDR infoframes and related EDID parsing.
- Remove prime sg_table caching, now done inside dma-buf.
- Add shiny new drm_gem_vram helpers for simple VRAM drivers;
with some fixes to the new API on top.
- Small fix to job cleanup without timeout handler.
- Documentation fixes to drm_fourcc.
- Replace lookups of drm_format with struct drm_format_info;
remove functions that become obsolete by this conversion.
- Remove double include in bridge/panel.c and some drivers.
- Remove drmP.h include from drm/edid and drm/dp.
- Fix null pointer deref in drm_fb_helper_hotplug_event().
- Remove most members from drm_fb_helper_crtc, only mode_set is kept.
- Remove race of fb helpers with userspace; only restore mode
when userspace is not master.
- Move legacy setup from drm_file.c to drm_legacy_misc.c
- Rework scheduler job destruction.
- drm/bus was removed, remove from TODO.
- Add __drm_atomic_helper_crtc_reset() to subclass crtc_state,
and convert some drivers to use it (conversion is not complete yet).
- Bump vblank timeout wait to 100 ms for atomic.
- Docbook fix for drm_hdmi_infoframe_set_hdr_metadata.
Driver Changes:
- sun4i: Use DRM_GEM_CMA_VMAP_DRIVER_OPS instead of definining manually.
- v3d: Small cleanups, adding support for compute shaders,
reservation/synchronization fixes and job management refactoring,
fixes MMU and debugfs.
- lima: Fix null pointer in irq handler on startup, set default timeout for scheduled jobs.
- stm/ltdc: Assorted fixes and adding FB modifier support.
- amdgpu: Avoid hw reset if guilty job was already signaled.
- virtio: Add seqno to fences, add trace events, use correct flags for fence allocation.
- Convert AST, bochs, mgag200, vboxvideo, hisilicon to the new drm_gem_vram API.
- sun6i_mipi_dsi: Support DSI GENERIC_SHORT_WRITE_2 transfers.
- bochs: Small fix to use PTR_RET_OR_ZERO and driver unload.
- gma500: header fixes
- cirrus: Remove unused files.
- mediatek: Fix compiler warning after merging the HDR series.
- vc4: Rework binner bo handling.
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/052875a5-27ba-3832-60c2-193d950afdff@linux.intel.com
Diffstat (limited to 'drivers/gpu/drm/v3d')
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_debugfs.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_drv.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_drv.h | 106 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_fence.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_gem.c | 552 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_irq.c | 55 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_mmu.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_regs.h | 122 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_sched.c | 382 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_trace.h | 94 |
10 files changed, 982 insertions, 390 deletions
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index a24af2d2f574..78a78938e81f 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -26,6 +26,11 @@ static const struct v3d_reg_def v3d_hub_reg_defs[] = { REGDEF(V3D_HUB_IDENT3), REGDEF(V3D_HUB_INT_STS), REGDEF(V3D_HUB_INT_MSK_STS), + + REGDEF(V3D_MMU_CTL), + REGDEF(V3D_MMU_VIO_ADDR), + REGDEF(V3D_MMU_VIO_ID), + REGDEF(V3D_MMU_DEBUG_INFO), }; static const struct v3d_reg_def v3d_gca_reg_defs[] = { @@ -50,12 +55,25 @@ static const struct v3d_reg_def v3d_core_reg_defs[] = { REGDEF(V3D_PTB_BPCA), REGDEF(V3D_PTB_BPCS), - REGDEF(V3D_MMU_CTL), - REGDEF(V3D_MMU_VIO_ADDR), - REGDEF(V3D_GMP_STATUS), REGDEF(V3D_GMP_CFG), REGDEF(V3D_GMP_VIO_ADDR), + + REGDEF(V3D_ERR_FDBGO), + REGDEF(V3D_ERR_FDBGB), + REGDEF(V3D_ERR_FDBGS), + REGDEF(V3D_ERR_STAT), +}; + +static const struct v3d_reg_def v3d_csd_reg_defs[] = { + REGDEF(V3D_CSD_STATUS), + REGDEF(V3D_CSD_CURRENT_CFG0), + REGDEF(V3D_CSD_CURRENT_CFG1), + REGDEF(V3D_CSD_CURRENT_CFG2), + REGDEF(V3D_CSD_CURRENT_CFG3), + REGDEF(V3D_CSD_CURRENT_CFG4), + REGDEF(V3D_CSD_CURRENT_CFG5), + REGDEF(V3D_CSD_CURRENT_CFG6), }; static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) @@ -89,6 +107,17 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) V3D_CORE_READ(core, v3d_core_reg_defs[i].reg)); } + + if (v3d_has_csd(v3d)) { + for (i = 0; i < ARRAY_SIZE(v3d_csd_reg_defs); i++) { + seq_printf(m, "core %d %s (0x%04x): 0x%08x\n", + core, + v3d_csd_reg_defs[i].name, + v3d_csd_reg_defs[i].reg, + V3D_CORE_READ(core, + v3d_csd_reg_defs[i].reg)); + } + } } return 0; diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index a06b05f714a5..fea597f4db8a 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -7,9 +7,9 @@ * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs. * For V3D 2.x support, see the VC4 driver. * - * Currently only single-core rendering using the binner and renderer, - * along with TFU (texture formatting unit) rendering is supported. - * V3D 4.x's CSD (compute shader dispatch) is not yet supported. + * The V3D GPU includes a tiled render (composed of a bin and render + * pipelines), the TFU (texture formatting unit), and the CSD (compute + * shader dispatch). */ #include <linux/clk.h> @@ -120,6 +120,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_SUPPORTS_TFU: args->value = 1; return 0; + case DRM_V3D_PARAM_SUPPORTS_CSD: + args->value = v3d_has_csd(v3d); + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; @@ -179,6 +182,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), + DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CSD, v3d_submit_csd_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), }; static struct drm_driver v3d_drm_driver = { @@ -235,9 +239,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) struct drm_device *drm; struct v3d_dev *v3d; int ret; + u32 mmu_debug; u32 ident1; - dev->coherent_dma_mask = DMA_BIT_MASK(36); v3d = kzalloc(sizeof(*v3d), GFP_KERNEL); if (!v3d) @@ -254,6 +258,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) goto dev_free; + mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); + dev->coherent_dma_mask = + DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); + v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH); + ident1 = V3D_READ(V3D_HUB_IDENT1); v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV)); diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index e9d4a2fdcf44..9aad9da1eb11 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -16,9 +16,11 @@ enum v3d_queue { V3D_BIN, V3D_RENDER, V3D_TFU, + V3D_CSD, + V3D_CACHE_CLEAN, }; -#define V3D_MAX_QUEUES (V3D_TFU + 1) +#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1) struct v3d_queue_state { struct drm_gpu_scheduler sched; @@ -55,6 +57,8 @@ struct v3d_dev { */ void *mmu_scratch; dma_addr_t mmu_scratch_paddr; + /* virtual address bits from V3D to the MMU. */ + int va_width; /* Number of V3D cores. */ u32 cores; @@ -67,9 +71,10 @@ struct v3d_dev { struct work_struct overflow_mem_work; - struct v3d_exec_info *bin_job; - struct v3d_exec_info *render_job; + struct v3d_bin_job *bin_job; + struct v3d_render_job *render_job; struct v3d_tfu_job *tfu_job; + struct v3d_csd_job *csd_job; struct v3d_queue_state queue[V3D_MAX_QUEUES]; @@ -92,6 +97,12 @@ struct v3d_dev { */ struct mutex sched_lock; + /* Lock taken during a cache clean and when initiating an L2 + * flush, to keep L2 flushes from interfering with the + * synchronous L2 cleans. + */ + struct mutex cache_clean_lock; + struct { u32 num_allocated; u32 pages_allocated; @@ -104,6 +115,12 @@ to_v3d_dev(struct drm_device *dev) return (struct v3d_dev *)dev->dev_private; } +static inline bool +v3d_has_csd(struct v3d_dev *v3d) +{ + return v3d->ver >= 41; +} + /* The per-fd struct, which tracks the MMU mappings. */ struct v3d_file_priv { struct v3d_dev *v3d; @@ -117,7 +134,7 @@ struct v3d_bo { struct drm_mm_node node; /* List entry for the BO's position in - * v3d_exec_info->unref_list + * v3d_render_job->unref_list */ struct list_head unref_head; }; @@ -157,67 +174,74 @@ to_v3d_fence(struct dma_fence *fence) struct v3d_job { struct drm_sched_job base; - struct v3d_exec_info *exec; + struct kref refcount; - /* An optional fence userspace can pass in for the job to depend on. */ - struct dma_fence *in_fence; + struct v3d_dev *v3d; + + /* This is the array of BOs that were looked up at the start + * of submission. + */ + struct drm_gem_object **bo; + u32 bo_count; + + /* Array of struct dma_fence * to block on before submitting this job. + */ + struct xarray deps; + unsigned long last_dep; /* v3d fence to be signaled by IRQ handler when the job is complete. */ struct dma_fence *irq_fence; + /* scheduler fence for when the job is considered complete and + * the BO reservations can be released. + */ + struct dma_fence *done_fence; + + /* Callback for the freeing of the job on refcount going to 0. */ + void (*free)(struct kref *ref); +}; + +struct v3d_bin_job { + struct v3d_job base; + /* GPU virtual addresses of the start/end of the CL job. */ u32 start, end; u32 timedout_ctca, timedout_ctra; -}; -struct v3d_exec_info { - struct v3d_dev *v3d; + /* Corresponding render job, for attaching our overflow memory. */ + struct v3d_render_job *render; - struct v3d_job bin, render; - - /* Fence for when the scheduler considers the binner to be - * done, for render to depend on. - */ - struct dma_fence *bin_done_fence; + /* Submitted tile memory allocation start/size, tile state. */ + u32 qma, qms, qts; +}; - /* Fence for when the scheduler considers the render to be - * done, for when the BOs reservations should be complete. - */ - struct dma_fence *render_done_fence; +struct v3d_render_job { + struct v3d_job base; - struct kref refcount; + /* GPU virtual addresses of the start/end of the CL job. */ + u32 start, end; - /* This is the array of BOs that were looked up at the start of exec. */ - struct v3d_bo **bo; - u32 bo_count; + u32 timedout_ctca, timedout_ctra; /* List of overflow BOs used in the job that need to be * released once the job is complete. */ struct list_head unref_list; - - /* Submitted tile memory allocation start/size, tile state. */ - u32 qma, qms, qts; }; struct v3d_tfu_job { - struct drm_sched_job base; + struct v3d_job base; struct drm_v3d_submit_tfu args; +}; - /* An optional fence userspace can pass in for the job to depend on. */ - struct dma_fence *in_fence; - - /* v3d fence to be signaled by IRQ handler when the job is complete. */ - struct dma_fence *irq_fence; - - struct v3d_dev *v3d; +struct v3d_csd_job { + struct v3d_job base; - struct kref refcount; + u32 timedout_batches; - /* This is the array of BOs that were looked up at the start of exec. */ - struct v3d_bo *bo[4]; + struct drm_v3d_submit_csd args; }; /** @@ -281,12 +305,14 @@ int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int v3d_submit_csd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void v3d_exec_put(struct v3d_exec_info *exec); -void v3d_tfu_job_put(struct v3d_tfu_job *exec); +void v3d_job_put(struct v3d_job *job); void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); +void v3d_clean_caches(struct v3d_dev *v3d); /* v3d_irq.c */ int v3d_irq_init(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c index b0a2a1ae2eb1..89840ed212c0 100644 --- a/drivers/gpu/drm/v3d/v3d_fence.c +++ b/drivers/gpu/drm/v3d/v3d_fence.c @@ -36,6 +36,8 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence) return "v3d-render"; case V3D_TFU: return "v3d-tfu"; + case V3D_CSD: + return "v3d-csd"; default: return NULL; } diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 93ff8fcbe475..27e0f87075d9 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -109,7 +109,9 @@ v3d_reset(struct v3d_dev *v3d) { struct drm_device *dev = &v3d->drm; - DRM_ERROR("Resetting GPU.\n"); + DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n"); + DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n", + V3D_CORE_READ(0, V3D_ERR_STAT)); trace_v3d_reset_begin(dev); /* XXX: only needed for safe powerdown, not reset. */ @@ -162,10 +164,52 @@ v3d_flush_l2t(struct v3d_dev *v3d, int core) /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't * need to wait for completion before dispatching the job -- * L2T accesses will be stalled until the flush has completed. + * However, we do need to make sure we don't try to trigger a + * new flush while the L2_CLEAN queue is trying to + * synchronously clean after a job. */ + mutex_lock(&v3d->cache_clean_lock); V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_L2TFLS | V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM)); + mutex_unlock(&v3d->cache_clean_lock); +} + +/* Cleans texture L1 and L2 cachelines (writing back dirty data). + * + * For cleaning, which happens from the CACHE_CLEAN queue after CSD has + * executed, we need to make sure that the clean is done before + * signaling job completion. So, we synchronously wait before + * returning, and we make sure that L2 invalidates don't happen in the + * meantime to confuse our are-we-done checks. + */ +void +v3d_clean_caches(struct v3d_dev *v3d) +{ + struct drm_device *dev = &v3d->drm; + int core = 0; + + trace_v3d_cache_clean_begin(dev); + + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF); + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & + V3D_L2TCACTL_L2TFLS), 100)) { + DRM_ERROR("Timeout waiting for L1T write combiner flush\n"); + } + + mutex_lock(&v3d->cache_clean_lock); + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, + V3D_L2TCACTL_L2TFLS | + V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM)); + + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & + V3D_L2TCACTL_L2TFLS), 100)) { + DRM_ERROR("Timeout waiting for L2T clean\n"); + } + + mutex_unlock(&v3d->cache_clean_lock); + + trace_v3d_cache_clean_end(dev); } /* Invalidates the slice caches. These are read-only caches. */ @@ -193,28 +237,6 @@ v3d_invalidate_caches(struct v3d_dev *v3d) v3d_invalidate_slices(v3d, 0); } -static void -v3d_attach_object_fences(struct v3d_bo **bos, int bo_count, - struct dma_fence *fence) -{ - int i; - - for (i = 0; i < bo_count; i++) { - /* XXX: Use shared fences for read-only objects. */ - reservation_object_add_excl_fence(bos[i]->base.base.resv, - fence); - } -} - -static void -v3d_unlock_bo_reservations(struct v3d_bo **bos, - int bo_count, - struct ww_acquire_ctx *acquire_ctx) -{ - drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count, - acquire_ctx); -} - /* Takes the reservation lock on all the BOs being referenced, so that * at queue submit time we can update the reservations. * @@ -223,26 +245,21 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos, * to v3d, so we don't attach dma-buf fences to them. */ static int -v3d_lock_bo_reservations(struct v3d_bo **bos, - int bo_count, +v3d_lock_bo_reservations(struct v3d_job *job, struct ww_acquire_ctx *acquire_ctx) { int i, ret; - ret = drm_gem_lock_reservations((struct drm_gem_object **)bos, - bo_count, acquire_ctx); + ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); if (ret) return ret; - /* Reserve space for our shared (read-only) fence references, - * before we commit the CL to the hardware. - */ - for (i = 0; i < bo_count; i++) { - ret = reservation_object_reserve_shared(bos[i]->base.base.resv, - 1); + for (i = 0; i < job->bo_count; i++) { + ret = drm_gem_fence_array_add_implicit(&job->deps, + job->bo[i], true); if (ret) { - v3d_unlock_bo_reservations(bos, bo_count, - acquire_ctx); + drm_gem_unlock_reservations(job->bo, job->bo_count, + acquire_ctx); return ret; } } @@ -251,11 +268,11 @@ v3d_lock_bo_reservations(struct v3d_bo **bos, } /** - * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects + * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects * referenced by the job. * @dev: DRM device * @file_priv: DRM file for this fd - * @exec: V3D job being set up + * @job: V3D job being set up * * The command validator needs to reference BOs by their index within * the submitted job's BO list. This does the validation of the job's @@ -265,18 +282,19 @@ v3d_lock_bo_reservations(struct v3d_bo **bos, * failure, because that will happen at v3d_exec_cleanup() time. */ static int -v3d_cl_lookup_bos(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_v3d_submit_cl *args, - struct v3d_exec_info *exec) +v3d_lookup_bos(struct drm_device *dev, + struct drm_file *file_priv, + struct v3d_job *job, + u64 bo_handles, + u32 bo_count) { u32 *handles; int ret = 0; int i; - exec->bo_count = args->bo_handle_count; + job->bo_count = bo_count; - if (!exec->bo_count) { + if (!job->bo_count) { /* See comment on bo_index for why we have to check * this. */ @@ -284,15 +302,15 @@ v3d_cl_lookup_bos(struct drm_device *dev, return -EINVAL; } - exec->bo = kvmalloc_array(exec->bo_count, - sizeof(struct drm_gem_cma_object *), - GFP_KERNEL | __GFP_ZERO); - if (!exec->bo) { + job->bo = kvmalloc_array(job->bo_count, + sizeof(struct drm_gem_cma_object *), + GFP_KERNEL | __GFP_ZERO); + if (!job->bo) { DRM_DEBUG("Failed to allocate validated BO pointers\n"); return -ENOMEM; } - handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL); + handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL); if (!handles) { ret = -ENOMEM; DRM_DEBUG("Failed to allocate incoming GEM handles\n"); @@ -300,15 +318,15 @@ v3d_cl_lookup_bos(struct drm_device *dev, } if (copy_from_user(handles, - (void __user *)(uintptr_t)args->bo_handles, - exec->bo_count * sizeof(u32))) { + (void __user *)(uintptr_t)bo_handles, + job->bo_count * sizeof(u32))) { ret = -EFAULT; DRM_DEBUG("Failed to copy in GEM handles\n"); goto fail; } spin_lock(&file_priv->table_lock); - for (i = 0; i < exec->bo_count; i++) { + for (i = 0; i < job->bo_count; i++) { struct drm_gem_object *bo = idr_find(&file_priv->object_idr, handles[i]); if (!bo) { @@ -319,7 +337,7 @@ v3d_cl_lookup_bos(struct drm_device *dev, goto fail; } drm_gem_object_get(bo); - exec->bo[i] = to_v3d_bo(bo); + job->bo[i] = bo; } spin_unlock(&file_priv->table_lock); @@ -329,67 +347,50 @@ fail: } static void -v3d_exec_cleanup(struct kref *ref) +v3d_job_free(struct kref *ref) { - struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info, - refcount); - struct v3d_dev *v3d = exec->v3d; - unsigned int i; - struct v3d_bo *bo, *save; - - dma_fence_put(exec->bin.in_fence); - dma_fence_put(exec->render.in_fence); - - dma_fence_put(exec->bin.irq_fence); - dma_fence_put(exec->render.irq_fence); - - dma_fence_put(exec->bin_done_fence); - dma_fence_put(exec->render_done_fence); + struct v3d_job *job = container_of(ref, struct v3d_job, refcount); + unsigned long index; + struct dma_fence *fence; + int i; - for (i = 0; i < exec->bo_count; i++) - drm_gem_object_put_unlocked(&exec->bo[i]->base.base); - kvfree(exec->bo); + for (i = 0; i < job->bo_count; i++) { + if (job->bo[i]) + drm_gem_object_put_unlocked(job->bo[i]); + } + kvfree(job->bo); - list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) { - drm_gem_object_put_unlocked(&bo->base.base); + xa_for_each(&job->deps, index, fence) { + dma_fence_put(fence); } + xa_destroy(&job->deps); - pm_runtime_mark_last_busy(v3d->dev); - pm_runtime_put_autosuspend(v3d->dev); + dma_fence_put(job->irq_fence); + dma_fence_put(job->done_fence); - kfree(exec); -} + pm_runtime_mark_last_busy(job->v3d->dev); + pm_runtime_put_autosuspend(job->v3d->dev); -void v3d_exec_put(struct v3d_exec_info *exec) -{ - kref_put(&exec->refcount, v3d_exec_cleanup); + kfree(job); } static void -v3d_tfu_job_cleanup(struct kref *ref) +v3d_render_job_free(struct kref *ref) { - struct v3d_tfu_job *job = container_of(ref, struct v3d_tfu_job, - refcount); - struct v3d_dev *v3d = job->v3d; - unsigned int i; - - dma_fence_put(job->in_fence); - dma_fence_put(job->irq_fence); + struct v3d_render_job *job = container_of(ref, struct v3d_render_job, + base.refcount); + struct v3d_bo *bo, *save; - for (i = 0; i < ARRAY_SIZE(job->bo); i++) { - if (job->bo[i]) - drm_gem_object_put_unlocked(&job->bo[i]->base.base); + list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) { + drm_gem_object_put_unlocked(&bo->base.base); } - pm_runtime_mark_last_busy(v3d->dev); - pm_runtime_put_autosuspend(v3d->dev); - - kfree(job); + v3d_job_free(ref); } -void v3d_tfu_job_put(struct v3d_tfu_job *job) +void v3d_job_put(struct v3d_job *job) { - kref_put(&job->refcount, v3d_tfu_job_cleanup); + kref_put(&job->refcount, job->free); } int @@ -425,6 +426,87 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, return ret; } +static int +v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, + struct v3d_job *job, void (*free)(struct kref *ref), + u32 in_sync) +{ + struct dma_fence *in_fence = NULL; + int ret; + + job->v3d = v3d; + job->free = free; + + ret = pm_runtime_get_sync(v3d->dev); + if (ret < 0) + return ret; + + xa_init_flags(&job->deps, XA_FLAGS_ALLOC); + + ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence); + if (ret == -EINVAL) + goto fail; + + ret = drm_gem_fence_array_add(&job->deps, in_fence); + if (ret) + goto fail; + + kref_init(&job->refcount); + + return 0; +fail: + xa_destroy(&job->deps); + pm_runtime_put_autosuspend(v3d->dev); + return ret; +} + +static int +v3d_push_job(struct v3d_file_priv *v3d_priv, + struct v3d_job *job, enum v3d_queue queue) +{ + int ret; + + ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], + v3d_priv); + if (ret) + return ret; + + job->done_fence = dma_fence_get(&job->base.s_fence->finished); + + /* put by scheduler job completion */ + kref_get(&job->refcount); + + drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]); + + return 0; +} + +static void +v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, + struct v3d_job *job, + struct ww_acquire_ctx *acquire_ctx, + u32 out_sync, + struct dma_fence *done_fence) +{ + struct drm_syncobj *sync_out; + int i; + + for (i = 0; i < job->bo_count; i++) { + /* XXX: Use shared fences for read-only objects. */ + reservation_object_add_excl_fence(job->bo[i]->resv, + job->done_fence); + } + + drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); + + /* Update the return sync object for the job */ + sync_out = drm_syncobj_find(file_priv, out_sync); + if (sync_out) { + drm_syncobj_replace_fence(sync_out, done_fence); + drm_syncobj_put(sync_out); + } +} + /** * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. * @dev: DRM device @@ -444,9 +526,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_submit_cl *args = data; - struct v3d_exec_info *exec; + struct v3d_bin_job *bin = NULL; + struct v3d_render_job *render; struct ww_acquire_ctx acquire_ctx; - struct drm_syncobj *sync_out; int ret = 0; trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); @@ -456,100 +538,87 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); - if (!exec) + render = kcalloc(1, sizeof(*render), GFP_KERNEL); + if (!render) return -ENOMEM; - ret = pm_runtime_get_sync(v3d->dev); - if (ret < 0) { - kfree(exec); + render->start = args->rcl_start; + render->end = args->rcl_end; + INIT_LIST_HEAD(&render->unref_list); + + ret = v3d_job_init(v3d, file_priv, &render->base, + v3d_render_job_free, args->in_sync_rcl); + if (ret) { + kfree(render); return ret; } - kref_init(&exec->refcount); + if (args->bcl_start != args->bcl_end) { + bin = kcalloc(1, sizeof(*bin), GFP_KERNEL); + if (!bin) + return -ENOMEM; - ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl, - 0, 0, &exec->bin.in_fence); - if (ret == -EINVAL) - goto fail; + ret = v3d_job_init(v3d, file_priv, &bin->base, + v3d_job_free, args->in_sync_bcl); + if (ret) { + v3d_job_put(&render->base); + return ret; + } - ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl, - 0, 0, &exec->render.in_fence); - if (ret == -EINVAL) - goto fail; + bin->start = args->bcl_start; + bin->end = args->bcl_end; + bin->qma = args->qma; + bin->qms = args->qms; + bin->qts = args->qts; + bin->render = render; + } - exec->qma = args->qma; - exec->qms = args->qms; - exec->qts = args->qts; - exec->bin.exec = exec; - exec->bin.start = args->bcl_start; - exec->bin.end = args->bcl_end; - exec->render.exec = exec; - exec->render.start = args->rcl_start; - exec->render.end = args->rcl_end; - exec->v3d = v3d; - INIT_LIST_HEAD(&exec->unref_list); - - ret = v3d_cl_lookup_bos(dev, file_priv, args, exec); + ret = v3d_lookup_bos(dev, file_priv, &render->base, + args->bo_handles, args->bo_handle_count); if (ret) goto fail; - ret = v3d_lock_bo_reservations(exec->bo, exec->bo_count, - &acquire_ctx); + ret = v3d_lock_bo_reservations(&render->base, &acquire_ctx); if (ret) goto fail; mutex_lock(&v3d->sched_lock); - if (exec->bin.start != exec->bin.end) { - ret = drm_sched_job_init(&exec->bin.base, - &v3d_priv->sched_entity[V3D_BIN], - v3d_priv); + if (bin) { + ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN); if (ret) goto fail_unreserve; - exec->bin_done_fence = - dma_fence_get(&exec->bin.base.s_fence->finished); - - kref_get(&exec->refcount); /* put by scheduler job completion */ - drm_sched_entity_push_job(&exec->bin.base, - &v3d_priv->sched_entity[V3D_BIN]); + ret = drm_gem_fence_array_add(&render->base.deps, + dma_fence_get(bin->base.done_fence)); + if (ret) + goto fail_unreserve; } - ret = drm_sched_job_init(&exec->render.base, - &v3d_priv->sched_entity[V3D_RENDER], - v3d_priv); + ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER); if (ret) goto fail_unreserve; - - exec->render_done_fence = - dma_fence_get(&exec->render.base.s_fence->finished); - - kref_get(&exec->refcount); /* put by scheduler job completion */ - drm_sched_entity_push_job(&exec->render.base, - &v3d_priv->sched_entity[V3D_RENDER]); mutex_unlock(&v3d->sched_lock); - v3d_attach_object_fences(exec->bo, exec->bo_count, - exec->render_done_fence); - - v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx); - - /* Update the return sync object for the */ - sync_out = drm_syncobj_find(file_priv, args->out_sync); - if (sync_out) { - drm_syncobj_replace_fence(sync_out, exec->render_done_fence); - drm_syncobj_put(sync_out); - } + v3d_attach_fences_and_unlock_reservation(file_priv, + &render->base, + &acquire_ctx, + args->out_sync, + render->base.done_fence); - v3d_exec_put(exec); + if (bin) + v3d_job_put(&bin->base); + v3d_job_put(&render->base); return 0; fail_unreserve: mutex_unlock(&v3d->sched_lock); - v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx); + drm_gem_unlock_reservations(render->base.bo, + render->base.bo_count, &acquire_ctx); fail: - v3d_exec_put(exec); + if (bin) + v3d_job_put(&bin->base); + v3d_job_put(&render->base); return ret; } @@ -572,10 +641,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, struct drm_v3d_submit_tfu *args = data; struct v3d_tfu_job *job; struct ww_acquire_ctx acquire_ctx; - struct drm_syncobj *sync_out; - struct dma_fence *sched_done_fence; int ret = 0; - int bo_count; trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia); @@ -583,81 +649,172 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data, if (!job) return -ENOMEM; - ret = pm_runtime_get_sync(v3d->dev); - if (ret < 0) { + ret = v3d_job_init(v3d, file_priv, &job->base, + v3d_job_free, args->in_sync); + if (ret) { kfree(job); return ret; } - kref_init(&job->refcount); - - ret = drm_syncobj_find_fence(file_priv, args->in_sync, - 0, 0, &job->in_fence); - if (ret == -EINVAL) - goto fail; + job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles), + sizeof(*job->base.bo), GFP_KERNEL); + if (!job->base.bo) { + v3d_job_put(&job->base); + return -ENOMEM; + } job->args = *args; - job->v3d = v3d; spin_lock(&file_priv->table_lock); - for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) { + for (job->base.bo_count = 0; + job->base.bo_count < ARRAY_SIZE(args->bo_handles); + job->base.bo_count++) { struct drm_gem_object *bo; - if (!args->bo_handles[bo_count]) + if (!args->bo_handles[job->base.bo_count]) break; bo = idr_find(&file_priv->object_idr, - args->bo_handles[bo_count]); + args->bo_handles[job->base.bo_count]); if (!bo) { DRM_DEBUG("Failed to look up GEM BO %d: %d\n", - bo_count, args->bo_handles[bo_count]); + job->base.bo_count, + args->bo_handles[job->base.bo_count]); ret = -ENOENT; spin_unlock(&file_priv->table_lock); goto fail; } drm_gem_object_get(bo); - job->bo[bo_count] = to_v3d_bo(bo); + job->base.bo[job->base.bo_count] = bo; } spin_unlock(&file_priv->table_lock); - ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx); + ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx); if (ret) goto fail; mutex_lock(&v3d->sched_lock); - ret = drm_sched_job_init(&job->base, - &v3d_priv->sched_entity[V3D_TFU], - v3d_priv); + ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU); if (ret) goto fail_unreserve; + mutex_unlock(&v3d->sched_lock); - sched_done_fence = dma_fence_get(&job->base.s_fence->finished); + v3d_attach_fences_and_unlock_reservation(file_priv, + &job->base, &acquire_ctx, + args->out_sync, + job->base.done_fence); - kref_get(&job->refcount); /* put by scheduler job completion */ - drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[V3D_TFU]); + v3d_job_put(&job->base); + + return 0; + +fail_unreserve: mutex_unlock(&v3d->sched_lock); + drm_gem_unlock_reservations(job->base.bo, job->base.bo_count, + &acquire_ctx); +fail: + v3d_job_put(&job->base); - v3d_attach_object_fences(job->bo, bo_count, sched_done_fence); + return ret; +} - v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx); +/** + * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D. + * @dev: DRM device + * @data: ioctl argument + * @file_priv: DRM file for this fd + * + * Userspace provides the register setup for the CSD, which we don't + * need to validate since the CSD is behind the MMU. + */ +int +v3d_submit_csd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_submit_csd *args = data; + struct v3d_csd_job *job; + struct v3d_job *clean_job; + struct ww_acquire_ctx acquire_ctx; + int ret; - /* Update the return sync object */ - sync_out = drm_syncobj_find(file_priv, args->out_sync); - if (sync_out) { - drm_syncobj_replace_fence(sync_out, sched_done_fence); - drm_syncobj_put(sync_out); + trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]); + + if (!v3d_has_csd(v3d)) { + DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n"); + return -EINVAL; + } + + job = kcalloc(1, sizeof(*job), GFP_KERNEL); + if (!job) + return -ENOMEM; + + ret = v3d_job_init(v3d, file_priv, &job->base, + v3d_job_free, args->in_sync); + if (ret) { + kfree(job); + return ret; + } + + clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL); + if (!clean_job) { + v3d_job_put(&job->base); + kfree(job); + return -ENOMEM; } - dma_fence_put(sched_done_fence); - v3d_tfu_job_put(job); + ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0); + if (ret) { + v3d_job_put(&job->base); + kfree(clean_job); + return ret; + } + + job->args = *args; + + ret = v3d_lookup_bos(dev, file_priv, clean_job, + args->bo_handles, args->bo_handle_count); + if (ret) + goto fail; + + ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx); + if (ret) + goto fail; + + mutex_lock(&v3d->sched_lock); + ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD); + if (ret) + goto fail_unreserve; + + ret = drm_gem_fence_array_add(&clean_job->deps, + dma_fence_get(job->base.done_fence)); + if (ret) + goto fail_unreserve; + + ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN); + if (ret) + goto fail_unreserve; + mutex_unlock(&v3d->sched_lock); + + v3d_attach_fences_and_unlock_reservation(file_priv, + clean_job, + &acquire_ctx, + args->out_sync, + clean_job->done_fence); + + v3d_job_put(&job->base); + v3d_job_put(clean_job); return 0; fail_unreserve: mutex_unlock(&v3d->sched_lock); - v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx); + drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, + &acquire_ctx); fail: - v3d_tfu_job_put(job); + v3d_job_put(&job->base); + v3d_job_put(clean_job); return ret; } @@ -677,6 +834,7 @@ v3d_gem_init(struct drm_device *dev) mutex_init(&v3d->bo_lock); mutex_init(&v3d->reset_lock); mutex_init(&v3d->sched_lock); + mutex_init(&v3d->cache_clean_lock); /* Note: We don't allocate address 0. Various bits of HW * treat 0 as special, such as the occlusion query counters @@ -715,7 +873,7 @@ v3d_gem_destroy(struct drm_device *dev) v3d_sched_fini(v3d); - /* Waiting for exec to finish would need to be done before + /* Waiting for jobs to finish would need to be done before * unregistering V3D. */ WARN_ON(v3d->bin_job); diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index aa0a180ae700..268d8a889ac5 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -4,9 +4,9 @@ /** * DOC: Interrupt management for the V3D engine * - * When we take a bin, render, or TFU done interrupt, we need to - * signal the fence for that job so that the scheduler can queue up - * the next one and unblock any waiters. + * When we take a bin, render, TFU done, or CSD done interrupt, we + * need to signal the fence for that job so that the scheduler can + * queue up the next one and unblock any waiters. * * When we take the binner out of memory interrupt, we need to * allocate some new memory and pass it to the binner so that the @@ -20,6 +20,7 @@ #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ V3D_INT_FLDONE | \ V3D_INT_FRDONE | \ + V3D_INT_CSDDONE | \ V3D_INT_GMPV)) #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ @@ -62,7 +63,7 @@ v3d_overflow_mem_work(struct work_struct *work) } drm_gem_object_get(obj); - list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); + list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list); spin_unlock_irqrestore(&v3d->job_lock, irqflags); V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); @@ -96,7 +97,7 @@ v3d_irq(int irq, void *arg) if (intsts & V3D_INT_FLDONE) { struct v3d_fence *fence = - to_v3d_fence(v3d->bin_job->bin.irq_fence); + to_v3d_fence(v3d->bin_job->base.irq_fence); trace_v3d_bcl_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); @@ -105,13 +106,22 @@ v3d_irq(int irq, void *arg) if (intsts & V3D_INT_FRDONE) { struct v3d_fence *fence = - to_v3d_fence(v3d->render_job->render.irq_fence); + to_v3d_fence(v3d->render_job->base.irq_fence); trace_v3d_rcl_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); status = IRQ_HANDLED; } + if (intsts & V3D_INT_CSDDONE) { + struct v3d_fence *fence = + to_v3d_fence(v3d->csd_job->base.irq_fence); + + trace_v3d_csd_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); + status = IRQ_HANDLED; + } + /* We shouldn't be triggering these if we have GMP in * always-allowed mode. */ @@ -141,7 +151,7 @@ v3d_hub_irq(int irq, void *arg) if (intsts & V3D_HUB_INT_TFUC) { struct v3d_fence *fence = - to_v3d_fence(v3d->tfu_job->irq_fence); + to_v3d_fence(v3d->tfu_job->base.irq_fence); trace_v3d_tfu_irq(&v3d->drm, fence->seqno); dma_fence_signal(&fence->base); @@ -152,10 +162,33 @@ v3d_hub_irq(int irq, void *arg) V3D_HUB_INT_MMU_PTI | V3D_HUB_INT_MMU_CAP)) { u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); - u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8; - - dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n", - axi_id, (long long)vio_addr, + u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) << + (v3d->va_width - 32)); + static const char *const v3d41_axi_ids[] = { + "L2T", + "PTB", + "PSE", + "TLB", + "CLE", + "TFU", + "MMU", + "GMP", + }; + const char *client = "?"; + + V3D_WRITE(V3D_MMU_CTL, + V3D_READ(V3D_MMU_CTL) & (V3D_MMU_CTL_CAP_EXCEEDED | + V3D_MMU_CTL_PT_INVALID | + V3D_MMU_CTL_WRITE_VIOLATION)); + + if (v3d->ver >= 41) { + axi_id = axi_id >> 5; + if (axi_id < ARRAY_SIZE(v3d41_axi_ids)) + client = v3d41_axi_ids[axi_id]; + } + + dev_err(v3d->dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n", + client, axi_id, (long long)vio_addr, ((intsts & V3D_HUB_INT_MMU_WRV) ? ", write violation" : ""), ((intsts & V3D_HUB_INT_MMU_PTI) ? diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index 7a21f1787ab1..395e81d97163 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -69,10 +69,13 @@ int v3d_mmu_set_page_table(struct v3d_dev *v3d) V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT); V3D_WRITE(V3D_MMU_CTL, V3D_MMU_CTL_ENABLE | - V3D_MMU_CTL_PT_INVALID | + V3D_MMU_CTL_PT_INVALID_ENABLE | V3D_MMU_CTL_PT_INVALID_ABORT | + V3D_MMU_CTL_PT_INVALID_INT | V3D_MMU_CTL_WRITE_VIOLATION_ABORT | - V3D_MMU_CTL_CAP_EXCEEDED_ABORT); + V3D_MMU_CTL_WRITE_VIOLATION_INT | + V3D_MMU_CTL_CAP_EXCEEDED_ABORT | + V3D_MMU_CTL_CAP_EXCEEDED_INT); V3D_WRITE(V3D_MMU_ILLEGAL_ADDR, (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) | V3D_MMU_ILLEGAL_ADDR_ENABLE); diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index 8e88af237610..9bcb57781d31 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -152,7 +152,8 @@ # define V3D_MMU_CTL_PT_INVALID_ABORT BIT(19) # define V3D_MMU_CTL_PT_INVALID_INT BIT(18) # define V3D_MMU_CTL_PT_INVALID_EXCEPTION BIT(17) -# define V3D_MMU_CTL_WRITE_VIOLATION BIT(16) +# define V3D_MMU_CTL_PT_INVALID_ENABLE BIT(16) +# define V3D_MMU_CTL_WRITE_VIOLATION BIT(12) # define V3D_MMU_CTL_WRITE_VIOLATION_ABORT BIT(11) # define V3D_MMU_CTL_WRITE_VIOLATION_INT BIT(10) # define V3D_MMU_CTL_WRITE_VIOLATION_EXCEPTION BIT(9) @@ -191,6 +192,14 @@ /* Address that faulted */ #define V3D_MMU_VIO_ADDR 0x01234 +#define V3D_MMU_DEBUG_INFO 0x01238 +# define V3D_MMU_PA_WIDTH_MASK V3D_MASK(11, 8) +# define V3D_MMU_PA_WIDTH_SHIFT 8 +# define V3D_MMU_VA_WIDTH_MASK V3D_MASK(7, 4) +# define V3D_MMU_VA_WIDTH_SHIFT 4 +# define V3D_MMU_VERSION_MASK V3D_MASK(3, 0) +# define V3D_MMU_VERSION_SHIFT 0 + /* Per-V3D-core registers */ #define V3D_CTL_IDENT0 0x00000 @@ -238,8 +247,11 @@ #define V3D_CTL_L2TCACTL 0x00030 # define V3D_L2TCACTL_TMUWCF BIT(8) # define V3D_L2TCACTL_L2T_NO_WM BIT(4) +/* Invalidates cache lines. */ # define V3D_L2TCACTL_FLM_FLUSH 0 +/* Removes cachelines without writing dirty lines back. */ # define V3D_L2TCACTL_FLM_CLEAR 1 +/* Writes out dirty cachelines and marks them clean, but doesn't invalidate. */ # define V3D_L2TCACTL_FLM_CLEAN 2 # define V3D_L2TCACTL_FLM_MASK V3D_MASK(2, 1) # define V3D_L2TCACTL_FLM_SHIFT 1 @@ -255,6 +267,8 @@ #define V3D_CTL_INT_MSK_CLR 0x00064 # define V3D_INT_QPU_MASK V3D_MASK(27, 16) # define V3D_INT_QPU_SHIFT 16 +# define V3D_INT_CSDDONE BIT(7) +# define V3D_INT_PCTR BIT(6) # define V3D_INT_GMPV BIT(5) # define V3D_INT_TRFB BIT(4) # define V3D_INT_SPILLUSE BIT(3) @@ -374,4 +388,110 @@ #define V3D_GMP_PRESERVE_LOAD 0x00818 #define V3D_GMP_VALID_LINES 0x00820 +#define V3D_CSD_STATUS 0x00900 +# define V3D_CSD_STATUS_NUM_COMPLETED_MASK V3D_MASK(11, 4) +# define V3D_CSD_STATUS_NUM_COMPLETED_SHIFT 4 +# define V3D_CSD_STATUS_NUM_ACTIVE_MASK V3D_MASK(3, 2) +# define V3D_CSD_STATUS_NUM_ACTIVE_SHIFT 2 +# define V3D_CSD_STATUS_HAVE_CURRENT_DISPATCH BIT(1) +# define V3D_CSD_STATUS_HAVE_QUEUED_DISPATCH BIT(0) + +#define V3D_CSD_QUEUED_CFG0 0x00904 +# define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_MASK V3D_MASK(31, 16) +# define V3D_CSD_QUEUED_CFG0_NUM_WGS_X_SHIFT 16 +# define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_MASK V3D_MASK(15, 0) +# define V3D_CSD_QUEUED_CFG0_WG_X_OFFSET_SHIFT 0 + +#define V3D_CSD_QUEUED_CFG1 0x00908 +# define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_MASK V3D_MASK(31, 16) +# define V3D_CSD_QUEUED_CFG1_NUM_WGS_Y_SHIFT 16 +# define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_MASK V3D_MASK(15, 0) +# define V3D_CSD_QUEUED_CFG1_WG_Y_OFFSET_SHIFT 0 + +#define V3D_CSD_QUEUED_CFG2 0x0090c +# define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_MASK V3D_MASK(31, 16) +# define V3D_CSD_QUEUED_CFG2_NUM_WGS_Z_SHIFT 16 +# define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_MASK V3D_MASK(15, 0) +# define V3D_CSD_QUEUED_CFG2_WG_Z_OFFSET_SHIFT 0 + +#define V3D_CSD_QUEUED_CFG3 0x00910 +# define V3D_CSD_QUEUED_CFG3_OVERLAP_WITH_PREV BIT(26) +# define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_MASK V3D_MASK(25, 20) +# define V3D_CSD_QUEUED_CFG3_MAX_SG_ID_SHIFT 20 +# define V3D_CSD_QUEUED_CFG3_BATCHES_PER_SG_M1_MASK V3D_MASK(19, 12) +# define V3D_CSD_QUEUED_CFG3_BATCHES_PER_SG_M1_SHIFT 12 +# define V3D_CSD_QUEUED_CFG3_WGS_PER_SG_MASK V3D_MASK(11, 8) +# define V3D_CSD_QUEUED_CFG3_WGS_PER_SG_SHIFT 8 +# define V3D_CSD_QUEUED_CFG3_WG_SIZE_MASK V3D_MASK(7, 0) +# define V3D_CSD_QUEUED_CFG3_WG_SIZE_SHIFT 0 + +/* Number of batches, minus 1 */ +#define V3D_CSD_QUEUED_CFG4 0x00914 + +/* Shader address, pnan, singleseg, threading, like a shader record. */ +#define V3D_CSD_QUEUED_CFG5 0x00918 + +/* Uniforms address (4 byte aligned) */ +#define V3D_CSD_QUEUED_CFG6 0x0091c + +#define V3D_CSD_CURRENT_CFG0 0x00920 +#define V3D_CSD_CURRENT_CFG1 0x00924 +#define V3D_CSD_CURRENT_CFG2 0x00928 +#define V3D_CSD_CURRENT_CFG3 0x0092c +#define V3D_CSD_CURRENT_CFG4 0x00930 +#define V3D_CSD_CURRENT_CFG5 0x00934 +#define V3D_CSD_CURRENT_CFG6 0x00938 + +#define V3D_CSD_CURRENT_ID0 0x0093c +# define V3D_CSD_CURRENT_ID0_WG_X_MASK V3D_MASK(31, 16) +# define V3D_CSD_CURRENT_ID0_WG_X_SHIFT 16 +# define V3D_CSD_CURRENT_ID0_WG_IN_SG_MASK V3D_MASK(11, 8) +# define V3D_CSD_CURRENT_ID0_WG_IN_SG_SHIFT 8 +# define V3D_CSD_CURRENT_ID0_L_IDX_MASK V3D_MASK(7, 0) +# define V3D_CSD_CURRENT_ID0_L_IDX_SHIFT 0 + +#define V3D_CSD_CURRENT_ID1 0x00940 +# define V3D_CSD_CURRENT_ID0_WG_Z_MASK V3D_MASK(31, 16) +# define V3D_CSD_CURRENT_ID0_WG_Z_SHIFT 16 +# define V3D_CSD_CURRENT_ID0_WG_Y_MASK V3D_MASK(15, 0) +# define V3D_CSD_CURRENT_ID0_WG_Y_SHIFT 0 + +#define V3D_ERR_FDBGO 0x00f04 +#define V3D_ERR_FDBGB 0x00f08 +#define V3D_ERR_FDBGR 0x00f0c + +#define V3D_ERR_FDBGS 0x00f10 +# define V3D_ERR_FDBGS_INTERPZ_IP_STALL BIT(17) +# define V3D_ERR_FDBGS_DEPTHO_FIFO_IP_STALL BIT(16) +# define V3D_ERR_FDBGS_XYNRM_IP_STALL BIT(14) +# define V3D_ERR_FDBGS_EZREQ_FIFO_OP_VALID BIT(13) +# define V3D_ERR_FDBGS_QXYF_FIFO_OP_VALID BIT(12) +# define V3D_ERR_FDBGS_QXYF_FIFO_OP_LAST BIT(11) +# define V3D_ERR_FDBGS_EZTEST_ANYQVALID BIT(7) +# define V3D_ERR_FDBGS_EZTEST_PASS BIT(6) +# define V3D_ERR_FDBGS_EZTEST_QREADY BIT(5) +# define V3D_ERR_FDBGS_EZTEST_VLF_OKNOVALID BIT(4) +# define V3D_ERR_FDBGS_EZTEST_QSTALL BIT(3) +# define V3D_ERR_FDBGS_EZTEST_IP_VLFSTALL BIT(2) +# define V3D_ERR_FDBGS_EZTEST_IP_PRSTALL BIT(1) +# define V3D_ERR_FDBGS_EZTEST_IP_QSTALL BIT(0) + +#define V3D_ERR_STAT 0x00f20 +# define V3D_ERR_L2CARE BIT(15) +# define V3D_ERR_VCMBE BIT(14) +# define V3D_ERR_VCMRE BIT(13) +# define V3D_ERR_VCDI BIT(12) +# define V3D_ERR_VCDE BIT(11) +# define V3D_ERR_VDWE BIT(10) +# define V3D_ERR_VPMEAS BIT(9) +# define V3D_ERR_VPMEFNA BIT(8) +# define V3D_ERR_VPMEWNA BIT(7) +# define V3D_ERR_VPMERNA BIT(6) +# define V3D_ERR_VPMERR BIT(5) +# define V3D_ERR_VPMEWR BIT(4) +# define V3D_ERR_VPAERRGL BIT(3) +# define V3D_ERR_VPAEBRGL BIT(2) +# define V3D_ERR_VPAERGS BIT(1) +# define V3D_ERR_VPAEABB BIT(0) + #endif /* V3D_REGS_H */ diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index e740f3b99aa5..8c2df6d95283 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -30,158 +30,152 @@ to_v3d_job(struct drm_sched_job *sched_job) return container_of(sched_job, struct v3d_job, base); } -static struct v3d_tfu_job * -to_tfu_job(struct drm_sched_job *sched_job) +static struct v3d_bin_job * +to_bin_job(struct drm_sched_job *sched_job) { - return container_of(sched_job, struct v3d_tfu_job, base); + return container_of(sched_job, struct v3d_bin_job, base.base); } -static void -v3d_job_free(struct drm_sched_job *sched_job) +static struct v3d_render_job * +to_render_job(struct drm_sched_job *sched_job) { - struct v3d_job *job = to_v3d_job(sched_job); + return container_of(sched_job, struct v3d_render_job, base.base); +} - drm_sched_job_cleanup(sched_job); +static struct v3d_tfu_job * +to_tfu_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_tfu_job, base.base); +} - v3d_exec_put(job->exec); +static struct v3d_csd_job * +to_csd_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct v3d_csd_job, base.base); } static void -v3d_tfu_job_free(struct drm_sched_job *sched_job) +v3d_job_free(struct drm_sched_job *sched_job) { - struct v3d_tfu_job *job = to_tfu_job(sched_job); + struct v3d_job *job = to_v3d_job(sched_job); drm_sched_job_cleanup(sched_job); - - v3d_tfu_job_put(job); + v3d_job_put(job); } /** - * Returns the fences that the bin or render job depends on, one by one. - * v3d_job_run() won't be called until all of them have been signaled. + * Returns the fences that the job depends on, one by one. + * + * If placed in the scheduler's .dependency method, the corresponding + * .run_job won't be called until all of them have been signaled. */ static struct dma_fence * v3d_job_dependency(struct drm_sched_job *sched_job, struct drm_sched_entity *s_entity) { struct v3d_job *job = to_v3d_job(sched_job); - struct v3d_exec_info *exec = job->exec; - enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; - struct dma_fence *fence; - - fence = job->in_fence; - if (fence) { - job->in_fence = NULL; - return fence; - } - - if (q == V3D_RENDER) { - /* If we had a bin job, the render job definitely depends on - * it. We first have to wait for bin to be scheduled, so that - * its done_fence is created. - */ - fence = exec->bin_done_fence; - if (fence) { - exec->bin_done_fence = NULL; - return fence; - } - } /* XXX: Wait on a fence for switching the GMP if necessary, * and then do so. */ - return fence; -} - -/** - * Returns the fences that the TFU job depends on, one by one. - * v3d_tfu_job_run() won't be called until all of them have been - * signaled. - */ -static struct dma_fence * -v3d_tfu_job_dependency(struct drm_sched_job *sched_job, - struct drm_sched_entity *s_entity) -{ - struct v3d_tfu_job *job = to_tfu_job(sched_job); - struct dma_fence *fence; - - fence = job->in_fence; - if (fence) { - job->in_fence = NULL; - return fence; - } + if (!xa_empty(&job->deps)) + return xa_erase(&job->deps, job->last_dep++); return NULL; } -static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job) +static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) { - struct v3d_job *job = to_v3d_job(sched_job); - struct v3d_exec_info *exec = job->exec; - enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; - struct v3d_dev *v3d = exec->v3d; + struct v3d_bin_job *job = to_bin_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; unsigned long irqflags; - if (unlikely(job->base.s_fence->finished.error)) + if (unlikely(job->base.base.s_fence->finished.error)) return NULL; /* Lock required around bin_job update vs * v3d_overflow_mem_work(). */ spin_lock_irqsave(&v3d->job_lock, irqflags); - if (q == V3D_BIN) { - v3d->bin_job = job->exec; - - /* Clear out the overflow allocation, so we don't - * reuse the overflow attached to a previous job. - */ - V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); - } else { - v3d->render_job = job->exec; - } + v3d->bin_job = job; + /* Clear out the overflow allocation, so we don't + * reuse the overflow attached to a previous job. + */ + V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); spin_unlock_irqrestore(&v3d->job_lock, irqflags); - /* Can we avoid this flush when q==RENDER? We need to be - * careful of scheduling, though -- imagine job0 rendering to - * texture and job1 reading, and them being executed as bin0, - * bin1, render0, render1, so that render1's flush at bin time + v3d_invalidate_caches(v3d); + + fence = v3d_fence_create(v3d, V3D_BIN); + if (IS_ERR(fence)) + return NULL; + + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); + + trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno, + job->start, job->end); + + /* Set the current and end address of the control list. + * Writing the end register is what starts the job. + */ + if (job->qma) { + V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma); + V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms); + } + if (job->qts) { + V3D_CORE_WRITE(0, V3D_CLE_CT0QTS, + V3D_CLE_CT0QTS_ENABLE | + job->qts); + } + V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start); + V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end); + + return fence; +} + +static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_render_job *job = to_render_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + struct drm_device *dev = &v3d->drm; + struct dma_fence *fence; + + if (unlikely(job->base.base.s_fence->finished.error)) + return NULL; + + v3d->render_job = job; + + /* Can we avoid this flush? We need to be careful of + * scheduling, though -- imagine job0 rendering to texture and + * job1 reading, and them being executed as bin0, bin1, + * render0, render1, so that render1's flush at bin time * wasn't enough. */ v3d_invalidate_caches(v3d); - fence = v3d_fence_create(v3d, q); + fence = v3d_fence_create(v3d, V3D_RENDER); if (IS_ERR(fence)) return NULL; - if (job->irq_fence) - dma_fence_put(job->irq_fence); - job->irq_fence = dma_fence_get(fence); + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); - trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno, + trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno, job->start, job->end); - if (q == V3D_BIN) { - if (exec->qma) { - V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, exec->qma); - V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, exec->qms); - } - if (exec->qts) { - V3D_CORE_WRITE(0, V3D_CLE_CT0QTS, - V3D_CLE_CT0QTS_ENABLE | - exec->qts); - } - } else { - /* XXX: Set the QCFG */ - } + /* XXX: Set the QCFG */ /* Set the current and end address of the control list. * Writing the end register is what starts the job. */ - V3D_CORE_WRITE(0, V3D_CLE_CTNQBA(q), job->start); - V3D_CORE_WRITE(0, V3D_CLE_CTNQEA(q), job->end); + V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start); + V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end); return fence; } @@ -190,7 +184,7 @@ static struct dma_fence * v3d_tfu_job_run(struct drm_sched_job *sched_job) { struct v3d_tfu_job *job = to_tfu_job(sched_job); - struct v3d_dev *v3d = job->v3d; + struct v3d_dev *v3d = job->base.v3d; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; @@ -199,9 +193,9 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) return NULL; v3d->tfu_job = job; - if (job->irq_fence) - dma_fence_put(job->irq_fence); - job->irq_fence = dma_fence_get(fence); + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno); @@ -223,6 +217,48 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) return fence; } +static struct dma_fence * +v3d_csd_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_csd_job *job = to_csd_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + struct drm_device *dev = &v3d->drm; + struct dma_fence *fence; + int i; + + v3d->csd_job = job; + + v3d_invalidate_caches(v3d); + + fence = v3d_fence_create(v3d, V3D_CSD); + if (IS_ERR(fence)) + return NULL; + + if (job->base.irq_fence) + dma_fence_put(job->base.irq_fence); + job->base.irq_fence = dma_fence_get(fence); + + trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno); + + for (i = 1; i <= 6; i++) + V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]); + /* CFG0 write kicks off the job. */ + V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]); + + return fence; +} + +static struct dma_fence * +v3d_cache_clean_job_run(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); + struct v3d_dev *v3d = job->v3d; + + v3d_clean_caches(v3d); + + return NULL; +} + static void v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) { @@ -232,7 +268,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) /* block scheduler */ for (q = 0; q < V3D_MAX_QUEUES; q++) - drm_sched_stop(&v3d->queue[q].sched); + drm_sched_stop(&v3d->queue[q].sched, sched_job); if (sched_job) drm_sched_increase_karma(sched_job); @@ -251,25 +287,23 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) mutex_unlock(&v3d->reset_lock); } +/* If the current address or return address have changed, then the GPU + * has probably made progress and we should delay the reset. This + * could fail if the GPU got in an infinite loop in the CL, but that + * is pretty unlikely outside of an i-g-t testcase. + */ static void -v3d_job_timedout(struct drm_sched_job *sched_job) +v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, + u32 *timedout_ctca, u32 *timedout_ctra) { struct v3d_job *job = to_v3d_job(sched_job); - struct v3d_exec_info *exec = job->exec; - struct v3d_dev *v3d = exec->v3d; - enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER; - u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q)); - u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q)); - - /* If the current address or return address have changed, then - * the GPU has probably made progress and we should delay the - * reset. This could fail if the GPU got in an infinite loop - * in the CL, but that is pretty unlikely outside of an i-g-t - * testcase. - */ - if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) { - job->timedout_ctca = ctca; - job->timedout_ctra = ctra; + struct v3d_dev *v3d = job->v3d; + u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); + u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); + + if (*timedout_ctca != ctca || *timedout_ctra != ctra) { + *timedout_ctca = ctca; + *timedout_ctra = ctra; return; } @@ -277,25 +311,82 @@ v3d_job_timedout(struct drm_sched_job *sched_job) } static void -v3d_tfu_job_timedout(struct drm_sched_job *sched_job) +v3d_bin_job_timedout(struct drm_sched_job *sched_job) { - struct v3d_tfu_job *job = to_tfu_job(sched_job); + struct v3d_bin_job *job = to_bin_job(sched_job); + + v3d_cl_job_timedout(sched_job, V3D_BIN, + &job->timedout_ctca, &job->timedout_ctra); +} + +static void +v3d_render_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_render_job *job = to_render_job(sched_job); + + v3d_cl_job_timedout(sched_job, V3D_RENDER, + &job->timedout_ctca, &job->timedout_ctra); +} + +static void +v3d_generic_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_job *job = to_v3d_job(sched_job); v3d_gpu_reset_for_timeout(job->v3d, sched_job); } -static const struct drm_sched_backend_ops v3d_sched_ops = { +static void +v3d_csd_job_timedout(struct drm_sched_job *sched_job) +{ + struct v3d_csd_job *job = to_csd_job(sched_job); + struct v3d_dev *v3d = job->base.v3d; + u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4); + + /* If we've made progress, skip reset and let the timer get + * rearmed. + */ + if (job->timedout_batches != batches) { + job->timedout_batches = batches; + return; + } + + v3d_gpu_reset_for_timeout(v3d, sched_job); +} + +static const struct drm_sched_backend_ops v3d_bin_sched_ops = { .dependency = v3d_job_dependency, - .run_job = v3d_job_run, - .timedout_job = v3d_job_timedout, - .free_job = v3d_job_free + .run_job = v3d_bin_job_run, + .timedout_job = v3d_bin_job_timedout, + .free_job = v3d_job_free, +}; + +static const struct drm_sched_backend_ops v3d_render_sched_ops = { + .dependency = v3d_job_dependency, + .run_job = v3d_render_job_run, + .timedout_job = v3d_render_job_timedout, + .free_job = v3d_job_free, }; static const struct drm_sched_backend_ops v3d_tfu_sched_ops = { - .dependency = v3d_tfu_job_dependency, + .dependency = v3d_job_dependency, .run_job = v3d_tfu_job_run, - .timedout_job = v3d_tfu_job_timedout, - .free_job = v3d_tfu_job_free + .timedout_job = v3d_generic_job_timedout, + .free_job = v3d_job_free, +}; + +static const struct drm_sched_backend_ops v3d_csd_sched_ops = { + .dependency = v3d_job_dependency, + .run_job = v3d_csd_job_run, + .timedout_job = v3d_csd_job_timedout, + .free_job = v3d_job_free +}; + +static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = { + .dependency = v3d_job_dependency, + .run_job = v3d_cache_clean_job_run, + .timedout_job = v3d_generic_job_timedout, + .free_job = v3d_job_free }; int @@ -307,7 +398,7 @@ v3d_sched_init(struct v3d_dev *v3d) int ret; ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, - &v3d_sched_ops, + &v3d_bin_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), "v3d_bin"); @@ -317,14 +408,14 @@ v3d_sched_init(struct v3d_dev *v3d) } ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, - &v3d_sched_ops, + &v3d_render_sched_ops, hw_jobs_limit, job_hang_limit, msecs_to_jiffies(hang_limit_ms), "v3d_render"); if (ret) { dev_err(v3d->dev, "Failed to create render scheduler: %d.", ret); - drm_sched_fini(&v3d->queue[V3D_BIN].sched); + v3d_sched_fini(v3d); return ret; } @@ -336,11 +427,36 @@ v3d_sched_init(struct v3d_dev *v3d) if (ret) { dev_err(v3d->dev, "Failed to create TFU scheduler: %d.", ret); - drm_sched_fini(&v3d->queue[V3D_RENDER].sched); - drm_sched_fini(&v3d->queue[V3D_BIN].sched); + v3d_sched_fini(v3d); return ret; } + if (v3d_has_csd(v3d)) { + ret = drm_sched_init(&v3d->queue[V3D_CSD].sched, + &v3d_csd_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), + "v3d_csd"); + if (ret) { + dev_err(v3d->dev, "Failed to create CSD scheduler: %d.", + ret); + v3d_sched_fini(v3d); + return ret; + } + + ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched, + &v3d_cache_clean_sched_ops, + hw_jobs_limit, job_hang_limit, + msecs_to_jiffies(hang_limit_ms), + "v3d_cache_clean"); + if (ret) { + dev_err(v3d->dev, "Failed to create CACHE_CLEAN scheduler: %d.", + ret); + v3d_sched_fini(v3d); + return ret; + } + } + return 0; } @@ -349,6 +465,8 @@ v3d_sched_fini(struct v3d_dev *v3d) { enum v3d_queue q; - for (q = 0; q < V3D_MAX_QUEUES; q++) - drm_sched_fini(&v3d->queue[q].sched); + for (q = 0; q < V3D_MAX_QUEUES; q++) { + if (v3d->queue[q].sched.ready) + drm_sched_fini(&v3d->queue[q].sched); + } } diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h index edd984afa33f..7aa8dc356e54 100644 --- a/drivers/gpu/drm/v3d/v3d_trace.h +++ b/drivers/gpu/drm/v3d/v3d_trace.h @@ -124,6 +124,26 @@ TRACE_EVENT(v3d_tfu_irq, __entry->seqno) ); +TRACE_EVENT(v3d_csd_irq, + TP_PROTO(struct drm_device *dev, + uint64_t seqno), + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u64, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%llu", + __entry->dev, + __entry->seqno) +); + TRACE_EVENT(v3d_submit_tfu_ioctl, TP_PROTO(struct drm_device *dev, u32 iia), TP_ARGS(dev, iia), @@ -163,6 +183,80 @@ TRACE_EVENT(v3d_submit_tfu, __entry->seqno) ); +TRACE_EVENT(v3d_submit_csd_ioctl, + TP_PROTO(struct drm_device *dev, u32 cfg5, u32 cfg6), + TP_ARGS(dev, cfg5, cfg6), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, cfg5) + __field(u32, cfg6) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->cfg5 = cfg5; + __entry->cfg6 = cfg6; + ), + + TP_printk("dev=%u, CFG5 0x%08x, CFG6 0x%08x", + __entry->dev, + __entry->cfg5, + __entry->cfg6) +); + +TRACE_EVENT(v3d_submit_csd, + TP_PROTO(struct drm_device *dev, + uint64_t seqno), + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u64, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%llu", + __entry->dev, + __entry->seqno) +); + +TRACE_EVENT(v3d_cache_clean_begin, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%u", + __entry->dev) +); + +TRACE_EVENT(v3d_cache_clean_end, + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(u32, dev) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + ), + + TP_printk("dev=%u", + __entry->dev) +); + TRACE_EVENT(v3d_reset_begin, TP_PROTO(struct drm_device *dev), TP_ARGS(dev), |