diff options
author | Tomer Tayar <ttayar@habana.ai> | 2019-10-03 17:22:36 +0200 |
---|---|---|
committer | Oded Gabbay <oded.gabbay@gmail.com> | 2019-11-21 10:35:45 +0100 |
commit | cb596aee8842c87605ea1a9062af2ab435a742d4 (patch) | |
tree | b50e2cbe47c26188c75754a934b3fa1e4c4f3268 /drivers/misc/habanalabs/command_submission.c | |
parent | habanalabs: Mark queue as expecting CB handle or address (diff) | |
download | linux-cb596aee8842c87605ea1a9062af2ab435a742d4.tar.xz linux-cb596aee8842c87605ea1a9062af2ab435a742d4.zip |
habanalabs: Add a new H/W queue type
This patch adds a support for a new H/W queue type.
This type of queue is for DMA and compute engines jobs, for which
completion notification are sent by H/W.
Command buffer for this queue can be created either through the CB
IOCTL and using the retrieved CB handle, or by preparing a buffer on the
host or device SRAM/DRAM, and using the device address to that buffer.
The patch includes the handling of the 2 options, as well as the
initialization of the H/W queue and its jobs scheduling.
Signed-off-by: Tomer Tayar <ttayar@habana.ai>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
Diffstat (limited to 'drivers/misc/habanalabs/command_submission.c')
-rw-r--r-- | drivers/misc/habanalabs/command_submission.c | 120 |
1 files changed, 82 insertions, 38 deletions
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index f44205540520..776ddafc47fb 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -65,6 +65,18 @@ static void cs_put(struct hl_cs *cs) kref_put(&cs->refcount, cs_do_release); } +static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) +{ + /* + * Patched CB is created for external queues jobs, and for H/W queues + * jobs if the user CB was allocated by driver and MMU is disabled. + */ + return (job->queue_type == QUEUE_TYPE_EXT || + (job->queue_type == QUEUE_TYPE_HW && + job->is_kernel_allocated_cb && + !hdev->mmu_enable)); +} + /* * cs_parser - parse the user command submission * @@ -91,11 +103,13 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) parser.patched_cb = NULL; parser.user_cb = job->user_cb; parser.user_cb_size = job->user_cb_size; - parser.ext_queue = job->ext_queue; + parser.queue_type = job->queue_type; + parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb; job->patched_cb = NULL; rc = hdev->asic_funcs->cs_parser(hdev, &parser); - if (job->ext_queue) { + + if (is_cb_patched(hdev, job)) { if (!rc) { job->patched_cb = parser.patched_cb; job->job_cb_size = parser.patched_cb_size; @@ -124,7 +138,7 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) { struct hl_cs *cs = job->cs; - if (job->ext_queue) { + if (is_cb_patched(hdev, job)) { hl_userptr_delete_list(hdev, &job->userptr_list); /* @@ -140,6 +154,19 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) } } + /* For H/W queue jobs, if a user CB was allocated by driver and MMU is + * enabled, the user CB isn't released in cs_parser() and thus should be + * released here. + */ + if (job->queue_type == QUEUE_TYPE_HW && + job->is_kernel_allocated_cb && hdev->mmu_enable) { + spin_lock(&job->user_cb->lock); + job->user_cb->cs_cnt--; + spin_unlock(&job->user_cb->lock); + + hl_cb_put(job->user_cb); + } + /* * This is the only place where there can be multiple threads * modifying the list at the same time @@ -150,7 +177,8 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) hl_debugfs_remove_job(hdev, job); - if (job->ext_queue) + if (job->queue_type == QUEUE_TYPE_EXT || + job->queue_type == QUEUE_TYPE_HW) cs_put(cs); kfree(job); @@ -387,18 +415,13 @@ static void job_wq_completion(struct work_struct *work) free_job(hdev, job); } -static struct hl_cb *validate_queue_index(struct hl_device *hdev, - struct hl_cb_mgr *cb_mgr, - struct hl_cs_chunk *chunk, - bool *ext_queue) +static int validate_queue_index(struct hl_device *hdev, + struct hl_cs_chunk *chunk, + enum hl_queue_type *queue_type, + bool *is_kernel_allocated_cb) { struct asic_fixed_properties *asic = &hdev->asic_prop; struct hw_queue_properties *hw_queue_prop; - u32 cb_handle; - struct hl_cb *cb; - - /* Assume external queue */ - *ext_queue = true; hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; @@ -406,22 +429,29 @@ static struct hl_cb *validate_queue_index(struct hl_device *hdev, (hw_queue_prop->type == QUEUE_TYPE_NA)) { dev_err(hdev->dev, "Queue index %d is invalid\n", chunk->queue_index); - return NULL; + return -EINVAL; } if (hw_queue_prop->driver_only) { dev_err(hdev->dev, "Queue index %d is restricted for the kernel driver\n", chunk->queue_index); - return NULL; + return -EINVAL; } - if (!hw_queue_prop->requires_kernel_cb) { - *ext_queue = false; - return (struct hl_cb *) (uintptr_t) chunk->cb_handle; - } + *queue_type = hw_queue_prop->type; + *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb; + + return 0; +} + +static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev, + struct hl_cb_mgr *cb_mgr, + struct hl_cs_chunk *chunk) +{ + struct hl_cb *cb; + u32 cb_handle; - /* Retrieve CB object */ cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT); cb = hl_cb_get(hdev, cb_mgr, cb_handle); @@ -446,7 +476,8 @@ release_cb: return NULL; } -struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue) +struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, + enum hl_queue_type queue_type, bool is_kernel_allocated_cb) { struct hl_cs_job *job; @@ -454,12 +485,14 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue) if (!job) return NULL; - job->ext_queue = ext_queue; + job->queue_type = queue_type; + job->is_kernel_allocated_cb = is_kernel_allocated_cb; - if (job->ext_queue) { + if (is_cb_patched(hdev, job)) INIT_LIST_HEAD(&job->userptr_list); + + if (job->queue_type == QUEUE_TYPE_EXT) INIT_WORK(&job->finish_work, job_wq_completion); - } return job; } @@ -472,7 +505,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks, struct hl_cs_job *job; struct hl_cs *cs; struct hl_cb *cb; - bool ext_queue_present = false; + bool int_queues_only = true; u32 size_to_copy; int rc, i, parse_cnt; @@ -516,23 +549,33 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks, /* Validate ALL the CS chunks before submitting the CS */ for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) { struct hl_cs_chunk *chunk = &cs_chunk_array[i]; - bool ext_queue; + enum hl_queue_type queue_type; + bool is_kernel_allocated_cb; - cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk, - &ext_queue); - if (ext_queue) { - ext_queue_present = true; + rc = validate_queue_index(hdev, chunk, &queue_type, + &is_kernel_allocated_cb); + if (rc) + goto free_cs_object; + + if (is_kernel_allocated_cb) { + cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk); if (!cb) { rc = -EINVAL; goto free_cs_object; } + } else { + cb = (struct hl_cb *) (uintptr_t) chunk->cb_handle; } - job = hl_cs_allocate_job(hdev, ext_queue); + if (queue_type == QUEUE_TYPE_EXT || queue_type == QUEUE_TYPE_HW) + int_queues_only = false; + + job = hl_cs_allocate_job(hdev, queue_type, + is_kernel_allocated_cb); if (!job) { dev_err(hdev->dev, "Failed to allocate a new job\n"); rc = -ENOMEM; - if (ext_queue) + if (is_kernel_allocated_cb) goto release_cb; else goto free_cs_object; @@ -542,7 +585,7 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks, job->cs = cs; job->user_cb = cb; job->user_cb_size = chunk->cb_size; - if (job->ext_queue) + if (is_kernel_allocated_cb) job->job_cb_size = cb->size; else job->job_cb_size = chunk->cb_size; @@ -555,10 +598,11 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks, /* * Increment CS reference. When CS reference is 0, CS is * done and can be signaled to user and free all its resources - * Only increment for JOB on external queues, because only - * for those JOBs we get completion + * Only increment for JOB on external or H/W queues, because + * only for those JOBs we get completion */ - if (job->ext_queue) + if (job->queue_type == QUEUE_TYPE_EXT || + job->queue_type == QUEUE_TYPE_HW) cs_get(cs); hl_debugfs_add_job(hdev, job); @@ -572,9 +616,9 @@ static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks, } } - if (!ext_queue_present) { + if (int_queues_only) { dev_err(hdev->dev, - "Reject CS %d.%llu because no external queues jobs\n", + "Reject CS %d.%llu because only internal queues jobs are present\n", cs->ctx->asid, cs->sequence); rc = -EINVAL; goto free_cs_object; |