summaryrefslogtreecommitdiffstats
path: root/drivers/misc/habanalabs/common/mmu/mmu.c
diff options
context:
space:
mode:
authorOhad Sharabi <osharabi@habana.ai>2022-04-11 08:31:32 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-05-22 21:01:21 +0200
commit9e495e24003eec491141c80a9bd8fb4ea5edc171 (patch)
treeea6d47ab88848beae274abd97204e6eab53215d1 /drivers/misc/habanalabs/common/mmu/mmu.c
parenthabanalabs: order memory manager messages (diff)
downloadlinux-9e495e24003eec491141c80a9bd8fb4ea5edc171.tar.xz
linux-9e495e24003eec491141c80a9bd8fb4ea5edc171.zip
habanalabs: do MMU prefetch as deferred work
When user requests to prefetch the MMU translations, the driver will not block the user until prefetch is done. Instead, the prefetch work will be delegated to a WQ which will do it in the background. This way, the prefetch may progress without blocking the user at all. Signed-off-by: Ohad Sharabi <osharabi@habana.ai> Reviewed-by: Oded Gabbay <ogabbay@kernel.org> Signed-off-by: Oded Gabbay <ogabbay@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/misc/habanalabs/common/mmu/mmu.c')
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c50
1 files changed, 44 insertions, 6 deletions
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index 04e53af4c67f..ae9b4923c32b 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -665,15 +665,53 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
return rc;
}
-int hl_mmu_prefetch_cache_range(struct hl_device *hdev, u32 flags, u32 asid, u64 va, u64 size)
+static void hl_mmu_prefetch_work_function(struct work_struct *work)
{
- int rc;
+ struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, pf_work);
+ struct hl_ctx *ctx = pfw->ctx;
- rc = hdev->asic_funcs->mmu_prefetch_cache_range(hdev, flags, asid, va, size);
- if (rc)
- dev_err_ratelimited(hdev->dev, "MMU cache range prefetch failed\n");
+ if (!hl_device_operational(ctx->hdev, NULL))
+ goto put_ctx;
- return rc;
+ mutex_lock(&ctx->mmu_lock);
+
+ ctx->hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid,
+ pfw->va, pfw->size);
+
+ mutex_unlock(&ctx->mmu_lock);
+
+put_ctx:
+ /*
+ * context was taken in the common mmu prefetch function- see comment there about
+ * context handling.
+ */
+ hl_ctx_put(ctx);
+ kfree(pfw);
+}
+
+int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size)
+{
+ struct hl_prefetch_work *handle_pf_work;
+
+ handle_pf_work = kmalloc(sizeof(*handle_pf_work), GFP_KERNEL);
+ if (!handle_pf_work)
+ return -ENOMEM;
+
+ INIT_WORK(&handle_pf_work->pf_work, hl_mmu_prefetch_work_function);
+ handle_pf_work->ctx = ctx;
+ handle_pf_work->va = va;
+ handle_pf_work->size = size;
+ handle_pf_work->flags = flags;
+ handle_pf_work->asid = asid;
+
+ /*
+ * as actual prefetch is done in a WQ we must get the context (and put it
+ * at the end of the work function)
+ */
+ hl_ctx_get(ctx->hdev, ctx);
+ queue_work(ctx->hdev->pf_wq, &handle_pf_work->pf_work);
+
+ return 0;
}
u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)