summaryrefslogtreecommitdiffstats
path: root/drivers/misc/habanalabs/common/command_submission.c
diff options
context:
space:
mode:
authorfarah kassabri <fkassabri@habana.ai>2020-10-12 13:30:26 +0200
committerOded Gabbay <ogabbay@kernel.org>2020-11-30 09:47:31 +0100
commite753643d516c7c38f69f3d73169bb00cd70a60b9 (patch)
treec5b0b56a748cc99e5c5763eb72d9888a7c756160 /drivers/misc/habanalabs/common/command_submission.c
parenthabanalabs: advanced FW loading (diff)
downloadlinux-e753643d516c7c38f69f3d73169bb00cd70a60b9.tar.xz
linux-e753643d516c7c38f69f3d73169bb00cd70a60b9.zip
habanalabs: fix cs counters structure
Fix cs counters structure in uapi to be one flat structure instead of two instances of the same other structure. use atomic read/increment for context counters so we could use one structure for both aggregated and context counters. Signed-off-by: farah kassabri <fkassabri@habana.ai> Reviewed-by: Oded Gabbay <ogabbay@kernel.org> Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
Diffstat (limited to 'drivers/misc/habanalabs/common/command_submission.c')
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 26822cfd1491..e123101b74d6 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -462,7 +462,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
if (other && !completion_done(&other->completion)) {
dev_dbg_ratelimited(hdev->dev,
"Rejecting CS because of too many in-flights CS\n");
- ctx->cs_counters.max_cs_in_flight_drop_cnt++;
+ atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
rc = -EAGAIN;
goto free_fence;
@@ -720,7 +720,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
if (rc) {
- hpriv->ctx->cs_counters.parsing_drop_cnt++;
+ atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
goto free_cs_object;
}
@@ -728,7 +728,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
if (is_kernel_allocated_cb) {
cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
- hpriv->ctx->cs_counters.parsing_drop_cnt++;
+ atomic64_inc(
+ &hpriv->ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
rc = -EINVAL;
goto free_cs_object;
@@ -743,7 +744,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
- hpriv->ctx->cs_counters.out_of_mem_drop_cnt++;
+ atomic64_inc(
+ &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -777,7 +779,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = cs_parser(hpriv, job);
if (rc) {
- hpriv->ctx->cs_counters.parsing_drop_cnt++;
+ atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
@@ -787,7 +789,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (int_queues_only) {
- hpriv->ctx->cs_counters.parsing_drop_cnt++;
+ atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
dev_err(hdev->dev,
"Reject CS %d.%llu because only internal queues jobs are present\n",
@@ -880,7 +882,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
job = hl_cs_allocate_job(hdev, q_type, true);
if (!job) {
- ctx->cs_counters.out_of_mem_drop_cnt++;
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
return -ENOMEM;
@@ -894,7 +896,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
cb = hl_cb_kernel_create(hdev, cb_size,
q_type == QUEUE_TYPE_HW && hdev->mmu_enable);
if (!cb) {
- ctx->cs_counters.out_of_mem_drop_cnt++;
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
kfree(job);
return -EFAULT;