summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKarol Wachowski <karol.wachowski@intel.com>2024-10-17 16:58:12 +0200
committerJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>2024-10-30 10:22:07 +0100
commitae7af7d8dc2a13a427aa90d003fe4fb2c168342a (patch)
tree9a66db1498445142c831e0fd2a7d052b101fed4c
parentaccel/ivpu: Unmap partially mapped BOs in case of errors (diff)
downloadlinux-ae7af7d8dc2a13a427aa90d003fe4fb2c168342a.tar.xz
linux-ae7af7d8dc2a13a427aa90d003fe4fb2c168342a.zip
accel/ivpu: Use xa_alloc_cyclic() instead of custom function
Remove custom ivpu_id_alloc() wrapper used for ID allocations and replace it with standard xa_alloc_cyclic() API. The idea behind ivpu_id_alloc() was to have monotonic IDs, so the driver is easier to debug because same IDs are not reused all over. The same can be achieved just by using appropriate Linux API. Signed-off-by: Karol Wachowski <karol.wachowski@intel.com> Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com> Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20241017145817.121590-7-jacek.lawrynowicz@linux.intel.com
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c11
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h4
-rw-r--r--drivers/accel/ivpu/ivpu_job.c34
3 files changed, 12 insertions, 37 deletions
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 34e3e9b1c3f2..383e3eb98898 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -256,10 +256,8 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id);
- file_priv->default_job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK,
- (file_priv->ctx.id - 1));
- file_priv->default_job_limit.max = file_priv->default_job_limit.min | IVPU_JOB_ID_JOB_MASK;
- file_priv->job_limit = file_priv->default_job_limit;
+ file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
+ file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
mutex_unlock(&vdev->context_list_lock);
drm_dev_exit(idx);
@@ -618,9 +616,8 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
- vdev->default_db_limit.min = IVPU_MIN_DB;
- vdev->default_db_limit.max = IVPU_MAX_DB;
- vdev->db_limit = vdev->default_db_limit;
+ vdev->db_limit.min = IVPU_MIN_DB;
+ vdev->db_limit.max = IVPU_MAX_DB;
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
if (ret)
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 5b4f5104b470..677440282170 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -137,7 +137,7 @@ struct ivpu_device {
struct xarray db_xa;
struct xa_limit db_limit;
- struct xa_limit default_db_limit;
+ u32 db_next;
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;
@@ -174,7 +174,7 @@ struct ivpu_file_priv {
struct list_head ms_instance_list;
struct ivpu_bo *ms_info_bo;
struct xa_limit job_limit;
- struct xa_limit default_job_limit;
+ u32 job_id_next;
bool has_mmu_faults;
bool bound;
bool aborted;
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index f580959e8778..9154c2e14245 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -72,26 +72,6 @@ static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
ivpu_bo_free(cmdq->secondary_preempt_buf);
}
-static int ivpu_id_alloc(struct xarray *xa, u32 *id, void *entry, struct xa_limit *limit,
- const struct xa_limit default_limit)
-{
- int ret;
-
- ret = __xa_alloc(xa, id, entry, *limit, GFP_KERNEL);
- if (ret) {
- limit->min = default_limit.min;
- ret = __xa_alloc(xa, id, entry, *limit, GFP_KERNEL);
- if (ret)
- return ret;
- }
-
- limit->min = *id + 1;
- if (limit->min > limit->max)
- limit->min = default_limit.min;
-
- return ret;
-}
-
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
@@ -102,11 +82,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq)
return NULL;
- xa_lock(&vdev->db_xa); /* lock here to protect db_limit */
- ret = ivpu_id_alloc(&vdev->db_xa, &cmdq->db_id, NULL, &vdev->db_limit,
- vdev->default_db_limit);
- xa_unlock(&vdev->db_xa);
- if (ret) {
+ ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
+ GFP_KERNEL);
+ if (ret < 0) {
ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
goto err_free_cmdq;
}
@@ -554,9 +532,9 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
xa_lock(&vdev->submitted_jobs_xa);
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
- ret = ivpu_id_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, &file_priv->job_limit,
- file_priv->default_job_limit);
- if (ret) {
+ ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
+ &file_priv->job_id_next, GFP_KERNEL);
+ if (ret < 0) {
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
file_priv->ctx.id);
ret = -EBUSY;