summaryrefslogtreecommitdiffstats
path: root/drivers/tee/optee/smc_abi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tee/optee/smc_abi.c')
-rw-r--r--drivers/tee/optee/smc_abi.c1362
1 files changed, 1362 insertions, 0 deletions
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
new file mode 100644
index 000000000000..6196d7c3888f
--- /dev/null
+++ b/drivers/tee/optee/smc_abi.c
@@ -0,0 +1,1362 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+#include "optee_rpc_cmd.h"
+#define CREATE_TRACE_POINTS
+#include "optee_trace.h"
+
+/*
+ * This file implement the SMC ABI used when communicating with secure world
+ * OP-TEE OS via raw SMCs.
+ * This file is divided into the following sections:
+ * 1. Convert between struct tee_param and struct optee_msg_param
+ * 2. Low level support functions to register shared memory in secure world
+ * 3. Dynamic shared memory pool based on alloc_pages()
+ * 4. Do a normal scheduled call into secure world
+ * 5. Driver initialization.
+ */
+
+#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
+
+/*
+ * 1. Convert between struct tee_param and struct optee_msg_param
+ *
+ * optee_from_msg_param() and optee_to_msg_param() are the main
+ * functions.
+ */
+
+static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm;
+ phys_addr_t pa;
+ int rc;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ p->u.memref.size = mp->u.tmem.size;
+ shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
+ if (!shm) {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(shm, 0, &pa);
+ if (rc)
+ return rc;
+
+ p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
+ p->u.memref.shm = shm;
+
+ /* Check that the memref is covered by the shm object */
+ if (p->u.memref.size) {
+ size_t o = p->u.memref.shm_offs +
+ p->u.memref.size - 1;
+
+ rc = tee_shm_get_pa(shm, o, NULL);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ p->u.memref.size = mp->u.rmem.size;
+ shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
+
+ if (shm) {
+ p->u.memref.shm_offs = mp->u.rmem.offs;
+ p->u.memref.shm = shm;
+ } else {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ }
+}
+
+/**
+ * optee_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @optee: main service struct
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
+ size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ int rc;
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ optee_from_msg_param_value(p, attr, mp);
+ break;
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ rc = from_msg_param_tmp_mem(p, attr, mp);
+ if (rc)
+ return rc;
+ break;
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+ from_msg_param_reg_mem(p, attr, mp);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ int rc;
+ phys_addr_t pa;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.tmem.size = p->u.memref.size;
+
+ if (!p->u.memref.shm) {
+ mp->u.tmem.buf_ptr = 0;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
+ if (rc)
+ return rc;
+
+ mp->u.tmem.buf_ptr = pa;
+ mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
+ OPTEE_MSG_ATTR_CACHE_SHIFT;
+
+ return 0;
+}
+
+static int to_msg_param_reg_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.rmem.size = p->u.memref.size;
+ mp->u.rmem.offs = p->u.memref.shm_offs;
+ return 0;
+}
+
+/**
+ * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
+ * @optee: main service struct
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_to_msg_param(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params, const struct tee_param *params)
+{
+ int rc;
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ optee_to_msg_param_value(mp, p);
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (tee_shm_is_registered(p->u.memref.shm))
+ rc = to_msg_param_reg_mem(mp, p);
+ else
+ rc = to_msg_param_tmp_mem(mp, p);
+ if (rc)
+ return rc;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * 2. Low level support functions to register shared memory in secure world
+ *
+ * Functions to enable/disable shared memory caching in secure world, that
+ * is, lazy freeing of previously allocated shared memory. Freeing is
+ * performed when a request has been compled.
+ *
+ * Functions to register and unregister shared memory both for normal
+ * clients and for tee-supplicant.
+ */
+
+/**
+ * optee_enable_shm_cache() - Enables caching of some shared memory allocation
+ * in OP-TEE
+ * @optee: main service struct
+ */
+static void optee_enable_shm_cache(struct optee *optee)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == OPTEE_SMC_RETURN_OK)
+ break;
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * __optee_disable_shm_cache() - Disables caching of some shared memory
+ * allocation in OP-TEE
+ * @optee: main service struct
+ * @is_mapped: true if the cached shared memory addresses were mapped by this
+ * kernel, are safe to dereference, and should be freed
+ */
+static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_disable_shm_cache_result result;
+ } res;
+
+ optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
+ 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
+ break; /* All shm's freed */
+ if (res.result.status == OPTEE_SMC_RETURN_OK) {
+ struct tee_shm *shm;
+
+ /*
+ * Shared memory references that were not mapped by
+ * this kernel must be ignored to prevent a crash.
+ */
+ if (!is_mapped)
+ continue;
+
+ shm = reg_pair_to_ptr(res.result.shm_upper32,
+ res.result.shm_lower32);
+ tee_shm_free(shm);
+ } else {
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * optee_disable_shm_cache() - Disables caching of mapped shared memory
+ * allocations in OP-TEE
+ * @optee: main service struct
+ */
+static void optee_disable_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, true);
+}
+
+/**
+ * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
+ * allocations in OP-TEE which are not
+ * currently mapped
+ * @optee: main service struct
+ */
+static void optee_disable_unmapped_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, false);
+}
+
+#define PAGELIST_ENTRIES_PER_PAGE \
+ ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
+
+/*
+ * The final entry in each pagelist page is a pointer to the next
+ * pagelist page.
+ */
+static size_t get_pages_list_size(size_t num_entries)
+{
+ int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
+
+ return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+}
+
+static u64 *optee_allocate_pages_list(size_t num_entries)
+{
+ return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
+}
+
+static void optee_free_pages_list(void *list, size_t num_entries)
+{
+ free_pages_exact(list, get_pages_list_size(num_entries));
+}
+
+/**
+ * optee_fill_pages_list() - write list of user pages to given shared
+ * buffer.
+ *
+ * @dst: page-aligned buffer where list of pages will be stored
+ * @pages: array of pages that represents shared buffer
+ * @num_pages: number of entries in @pages
+ * @page_offset: offset of user buffer from page start
+ *
+ * @dst should be big enough to hold list of user page addresses and
+ * links to the next pages of buffer
+ */
+static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+ size_t page_offset)
+{
+ int n = 0;
+ phys_addr_t optee_page;
+ /*
+ * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
+ * for details.
+ */
+ struct {
+ u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
+ u64 next_page_data;
+ } *pages_data;
+
+ /*
+ * Currently OP-TEE uses 4k page size and it does not looks
+ * like this will change in the future. On other hand, there are
+ * no know ARM architectures with page size < 4k.
+ * Thus the next built assert looks redundant. But the following
+ * code heavily relies on this assumption, so it is better be
+ * safe than sorry.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ pages_data = (void *)dst;
+ /*
+ * If linux page is bigger than 4k, and user buffer offset is
+ * larger than 4k/8k/12k/etc this will skip first 4k pages,
+ * because they bear no value data for OP-TEE.
+ */
+ optee_page = page_to_phys(*pages) +
+ round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ while (true) {
+ pages_data->pages_list[n++] = optee_page;
+
+ if (n == PAGELIST_ENTRIES_PER_PAGE) {
+ pages_data->next_page_data =
+ virt_to_phys(pages_data + 1);
+ pages_data++;
+ n = 0;
+ }
+
+ optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+ if (!(optee_page & ~PAGE_MASK)) {
+ if (!--num_pages)
+ break;
+ pages++;
+ optee_page = page_to_phys(*pages);
+ }
+ }
+}
+
+static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm_arg;
+ u64 *pages_list;
+ int rc;
+
+ if (!num_pages)
+ return -EINVAL;
+
+ rc = optee_check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ pages_list = optee_allocate_pages_list(num_pages);
+ if (!pages_list)
+ return -ENOMEM;
+
+ shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
+ if (IS_ERR(shm_arg)) {
+ rc = PTR_ERR(shm_arg);
+ goto out;
+ }
+
+ optee_fill_pages_list(pages_list, pages, num_pages,
+ tee_shm_get_page_offset(shm));
+
+ msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
+ msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
+ msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
+ /*
+ * In the least bits of msg_arg->params->u.tmem.buf_ptr we
+ * store buffer offset from 4k page, as described in OP-TEE ABI.
+ */
+ msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+
+ if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+ tee_shm_free(shm_arg);
+out:
+ optee_free_pages_list(pages_list, num_pages);
+ return rc;
+}
+
+static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm_arg;
+ int rc = 0;
+
+ shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
+ if (IS_ERR(shm_arg))
+ return PTR_ERR(shm_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
+
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
+
+ if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+ tee_shm_free(shm_arg);
+ return rc;
+}
+
+static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ /*
+ * We don't want to register supplicant memory in OP-TEE.
+ * Instead information about it will be passed in RPC code.
+ */
+ return optee_check_mem_type(start, num_pages);
+}
+
+static int optee_shm_unregister_supp(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ return 0;
+}
+
+/*
+ * 3. Dynamic shared memory pool based on alloc_pages()
+ *
+ * Implements an OP-TEE specific shared memory pool which is used
+ * when dynamic shared memory is supported by secure world.
+ *
+ * The main function is optee_shm_pool_alloc_pages().
+ */
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size)
+{
+ /*
+ * Shared memory private to the OP-TEE driver doesn't need
+ * to be registered with OP-TEE.
+ */
+ if (shm->flags & TEE_SHM_PRIV)
+ return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
+
+ return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm)
+{
+ if (!(shm->flags & TEE_SHM_PRIV))
+ optee_shm_unregister(shm->ctx, shm);
+
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+/**
+ * optee_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used when OP-TEE supports dymanic SHM. In this case
+ * command buffers and such are allocated from kernel's own memory.
+ */
+static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ops;
+
+ return mgr;
+}
+
+/*
+ * 4. Do a normal scheduled call into secure world
+ *
+ * The function optee_smc_do_call_with_arg() performs a normal scheduled
+ * call into secure world. During this call may normal world request help
+ * from normal world using RPCs, Remote Procedure Calls. This includes
+ * delivery of non-secure interrupts to for instance allow rescheduling of
+ * the current task.
+ */
+
+static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ optee_rpc_cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ }
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee_msg_arg *arg,
+ struct optee_call_ctx *call_ctx)
+{
+ phys_addr_t pa;
+ struct tee_shm *shm;
+ size_t sz;
+ size_t n;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (!arg->num_params ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ for (n = 1; n < arg->num_params; n++) {
+ if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+ }
+
+ sz = arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (tee_shm_get_pa(shm, 0, &pa)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto bad;
+ }
+
+ sz = tee_shm_get_size(shm);
+
+ if (tee_shm_is_registered(shm)) {
+ struct page **pages;
+ u64 *pages_list;
+ size_t page_num;
+
+ pages = tee_shm_get_pages(shm, &page_num);
+ if (!pages || !page_num) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ pages_list = optee_allocate_pages_list(page_num);
+ if (!pages_list) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ call_ctx->pages_list = pages_list;
+ call_ctx->num_entries = page_num;
+
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ /*
+ * In the least bits of u.tmem.buf_ptr we store buffer offset
+ * from 4k page, as described in OP-TEE ABI.
+ */
+ arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) &
+ (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+
+ optee_fill_pages_list(pages_list, pages, page_num,
+ tee_shm_get_page_offset(shm));
+ } else {
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ arg->params[0].u.tmem.buf_ptr = pa;
+ arg->params[0].u.tmem.size = sz;
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ tee_shm_free(shm);
+}
+
+static void free_pages_list(struct optee_call_ctx *call_ctx)
+{
+ if (call_ctx->pages_list) {
+ optee_free_pages_list(call_ctx->pages_list,
+ call_ctx->num_entries);
+ call_ctx->pages_list = NULL;
+ call_ctx->num_entries = 0;
+ }
+}
+
+static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
+{
+ free_pages_list(call_ctx);
+}
+
+static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
+ struct tee_shm *shm,
+ struct optee_call_ctx *call_ctx)
+{
+ struct optee_msg_arg *arg;
+
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg)) {
+ pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
+ return;
+ }
+
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_SHM_ALLOC:
+ free_pages_list(call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
+ break;
+ case OPTEE_RPC_CMD_SHM_FREE:
+ handle_rpc_func_cmd_shm_free(ctx, arg);
+ break;
+ default:
+ optee_rpc_cmd(ctx, optee, arg);
+ }
+}
+
+/**
+ * optee_handle_rpc() - handle RPC from secure world
+ * @ctx: context doing the RPC
+ * @param: value of registers for the RPC
+ * @call_ctx: call context. Preserved during one OP-TEE invocation
+ *
+ * Result of RPC is written back into @param.
+ */
+static void optee_handle_rpc(struct tee_context *ctx,
+ struct optee_rpc_param *param,
+ struct optee_call_ctx *call_ctx)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct tee_shm *shm;
+ phys_addr_t pa;
+
+ switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
+ case OPTEE_SMC_RPC_FUNC_ALLOC:
+ shm = tee_shm_alloc(ctx, param->a1,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
+ reg_pair_from_64(&param->a1, &param->a2, pa);
+ reg_pair_from_64(&param->a4, &param->a5,
+ (unsigned long)shm);
+ } else {
+ param->a1 = 0;
+ param->a2 = 0;
+ param->a4 = 0;
+ param->a5 = 0;
+ }
+ break;
+ case OPTEE_SMC_RPC_FUNC_FREE:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ tee_shm_free(shm);
+ break;
+ case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
+ /*
+ * A foreign interrupt was raised while secure world was
+ * executing, since they are handled in Linux a dummy RPC is
+ * performed to let Linux take the interrupt through the normal
+ * vector.
+ */
+ break;
+ case OPTEE_SMC_RPC_FUNC_CMD:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n",
+ (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
+ break;
+ }
+
+ param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
+}
+
+/**
+ * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
+ * @ctx: calling context
+ * @arg: shared memory holding the message to pass to secure world
+ *
+ * Does and SMC to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from secure world, 0 is OK
+ */
+static int optee_smc_do_call_with_arg(struct tee_context *ctx,
+ struct tee_shm *arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_call_waiter w;
+ struct optee_rpc_param param = { };
+ struct optee_call_ctx call_ctx = { };
+ phys_addr_t parg;
+ int rc;
+
+ rc = tee_shm_get_pa(arg, 0, &parg);
+ if (rc)
+ return rc;
+
+ param.a0 = OPTEE_SMC_CALL_WITH_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, parg);
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ trace_optee_invoke_fn_begin(&param);
+ optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
+ param.a4, param.a5, param.a6, param.a7,
+ &res);
+ trace_optee_invoke_fn_end(&param, &res);
+
+ if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
+ cond_resched();
+ param.a0 = res.a0;
+ param.a1 = res.a1;
+ param.a2 = res.a2;
+ param.a3 = res.a3;
+ optee_handle_rpc(ctx, &param, &call_ctx);
+ } else {
+ rc = res.a0;
+ break;
+ }
+ }
+
+ optee_rpc_finalize_call(&call_ctx);
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return rc;
+}
+
+/*
+ * 5. Driver initialization
+ *
+ * During driver inititialization is secure world probed to find out which
+ * features it supports so the driver can be initialized with a matching
+ * configuration. This involves for instance support for dynamic shared
+ * memory instead of a static memory carvout.
+ */
+
+static void optee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ v.gen_caps |= TEE_GEN_CAP_REG_MEM;
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
+ v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
+ *vers = v;
+}
+
+static int optee_smc_open(struct tee_context *ctx)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ u32 sec_caps = optee->smc.sec_caps;
+
+ return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
+}
+
+static const struct tee_driver_ops optee_clnt_ops = {
+ .get_version = optee_get_version,
+ .open = optee_smc_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+ .shm_register = optee_shm_register,
+ .shm_unregister = optee_shm_unregister,
+};
+
+static const struct tee_desc optee_clnt_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &optee_clnt_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct tee_driver_ops optee_supp_ops = {
+ .get_version = optee_get_version,
+ .open = optee_smc_open,
+ .release = optee_release_supp,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+ .shm_register = optee_shm_register_supp,
+ .shm_unregister = optee_shm_unregister_supp,
+};
+
+static const struct tee_desc optee_supp_desc = {
+ .name = DRIVER_NAME "-supp",
+ .ops = &optee_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static const struct optee_ops optee_ops = {
+ .do_call_with_arg = optee_smc_do_call_with_arg,
+ .to_msg_param = optee_to_msg_param,
+ .from_msg_param = optee_from_msg_param,
+};
+
+static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
+ res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
+ return true;
+ return false;
+}
+
+static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_call_get_os_revision_result result;
+ } res = {
+ .result = {
+ .build_id = 0
+ }
+ };
+
+ invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.build_id)
+ pr_info("revision %lu.%lu (%08lx)", res.result.major,
+ res.result.minor, res.result.build_id);
+ else
+ pr_info("revision %lu.%lu", res.result.major, res.result.minor);
+}
+
+static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_calls_revision_result result;
+ } res;
+
+ invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+
+ if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
+ (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
+ return true;
+ return false;
+}
+
+static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
+ u32 *sec_caps)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_exchange_capabilities_result result;
+ } res;
+ u32 a1 = 0;
+
+ /*
+ * TODO This isn't enough to tell if it's UP system (from kernel
+ * point of view) or not, is_smp() returns the information
+ * needed, but can't be called directly from here.
+ */
+ if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
+ a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
+
+ invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.status != OPTEE_SMC_RETURN_OK)
+ return false;
+
+ *sec_caps = res.result.capabilities;
+ return true;
+}
+
+static struct tee_shm_pool *optee_config_dyn_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
+
+static struct tee_shm_pool *
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_get_shm_config_result result;
+ } res;
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ phys_addr_t begin;
+ phys_addr_t end;
+ void *va;
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+ const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+
+ invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status != OPTEE_SMC_RETURN_OK) {
+ pr_err("static shm service not available\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
+ pr_err("only normal cached shared memory supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ begin = roundup(res.result.start, PAGE_SIZE);
+ end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
+ paddr = begin;
+ size = end - begin;
+
+ if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
+ pr_err("too small shared memory area\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ va = memremap(paddr, size, MEMREMAP_WB);
+ if (!va) {
+ pr_err("shared memory ioremap failed\n");
+ return ERR_PTR(-EINVAL);
+ }
+ vaddr = (unsigned long)va;
+
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
+ 3 /* 8 bytes aligned */);
+ if (IS_ERR(rc))
+ goto err_memunmap;
+ priv_mgr = rc;
+
+ vaddr += sz;
+ paddr += sz;
+ size -= sz;
+
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
+ if (IS_ERR(rc))
+ goto err_free_priv_mgr;
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc))
+ goto err_free_dmabuf_mgr;
+
+ *memremaped_shm = va;
+
+ return rc;
+
+err_free_dmabuf_mgr:
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+err_free_priv_mgr:
+ tee_shm_pool_mgr_destroy(priv_mgr);
+err_memunmap:
+ memunmap(va);
+ return rc;
+}
+
+/* Simple wrapper functions to be able to use a function pointer */
+static void optee_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static optee_invoke_fn *get_invoke_func(struct device *dev)
+{
+ const char *method;
+
+ pr_info("probing for conduit method.\n");
+
+ if (device_property_read_string(dev, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ if (!strcmp("hvc", method))
+ return optee_smccc_hvc;
+ else if (!strcmp("smc", method))
+ return optee_smccc_smc;
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return ERR_PTR(-EINVAL);
+}
+
+/* optee_remove - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * optee_remove is called by platform subsystem to alert the driver
+ * that it should release the device
+ */
+static int optee_smc_remove(struct platform_device *pdev)
+{
+ struct optee *optee = platform_get_drvdata(pdev);
+
+ /*
+ * Ask OP-TEE to free all cached shared memory objects to decrease
+ * reference counters and also avoid wild pointers in secure world
+ * into the old shared memory range.
+ */
+ optee_disable_shm_cache(optee);
+
+ optee_remove_common(optee);
+
+ if (optee->smc.memremaped_shm)
+ memunmap(optee->smc.memremaped_shm);
+
+ kfree(optee);
+
+ return 0;
+}
+
+/* optee_shutdown - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * platform_shutdown is called by the platform subsystem to alert
+ * the driver that a shutdown, reboot, or kexec is happening and
+ * device must be disabled.
+ */
+static void optee_shutdown(struct platform_device *pdev)
+{
+ optee_disable_shm_cache(platform_get_drvdata(pdev));
+}
+
+static int optee_probe(struct platform_device *pdev)
+{
+ optee_invoke_fn *invoke_fn;
+ struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
+ struct optee *optee = NULL;
+ void *memremaped_shm = NULL;
+ struct tee_device *teedev;
+ u32 sec_caps;
+ int rc;
+
+ invoke_fn = get_invoke_func(&pdev->dev);
+ if (IS_ERR(invoke_fn))
+ return PTR_ERR(invoke_fn);
+
+ if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
+ pr_warn("api uid mismatch\n");
+ return -EINVAL;
+ }
+
+ optee_msg_get_os_revision(invoke_fn);
+
+ if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
+ pr_warn("api revision mismatch\n");
+ return -EINVAL;
+ }
+
+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
+ pr_warn("capabilities mismatch\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Try to use dynamic shared memory if possible
+ */
+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pool = optee_config_dyn_shm();
+
+ /*
+ * If dynamic shared memory is not available or failed - try static one
+ */
+ if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ optee->ops = &optee_ops;
+ optee->smc.invoke_fn = invoke_fn;
+ optee->smc.sec_caps = sec_caps;
+
+ teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err;
+
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_wait_queue_init(&optee->wait_queue);
+ optee_supp_init(&optee->supp);
+ optee->smc.memremaped_shm = memremaped_shm;
+ optee->pool = pool;
+
+ /*
+ * Ensure that there are no pre-existing shm objects before enabling
+ * the shm cache so that there's no chance of receiving an invalid
+ * address during shutdown. This could occur, for example, if we're
+ * kexec booting from an older kernel that did not properly cleanup the
+ * shm cache.
+ */
+ optee_disable_unmapped_shm_cache(optee);
+
+ optee_enable_shm_cache(optee);
+
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pr_info("dynamic shared memory is enabled\n");
+
+ platform_set_drvdata(pdev, optee);
+
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
+ if (rc) {
+ optee_smc_remove(pdev);
+ return rc;
+ }
+
+ pr_info("initialized driver\n");
+ return 0;
+err:
+ if (optee) {
+ /*
+ * tee_device_unregister() is safe to call even if the
+ * devices hasn't been registered with
+ * tee_device_register() yet.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+ kfree(optee);
+ }
+ if (pool)
+ tee_shm_pool_free(pool);
+ if (memremaped_shm)
+ memunmap(memremaped_shm);
+ return rc;
+}
+
+static const struct of_device_id optee_dt_match[] = {
+ { .compatible = "linaro,optee-tz" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, optee_dt_match);
+
+static struct platform_driver optee_driver = {
+ .probe = optee_probe,
+ .remove = optee_smc_remove,
+ .shutdown = optee_shutdown,
+ .driver = {
+ .name = "optee",
+ .of_match_table = optee_dt_match,
+ },
+};
+
+int optee_smc_abi_register(void)
+{
+ return platform_driver_register(&optee_driver);
+}
+
+void optee_smc_abi_unregister(void)
+{
+ platform_driver_unregister(&optee_driver);
+}