summaryrefslogtreecommitdiffstats
path: root/drivers/firmware/qcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firmware/qcom')
-rw-r--r--drivers/firmware/qcom/Kconfig31
-rw-r--r--drivers/firmware/qcom/Makefile1
-rw-r--r--drivers/firmware/qcom/qcom_qseecom_uefisecapp.c256
-rw-r--r--drivers/firmware/qcom/qcom_scm-smc.c30
-rw-r--r--drivers/firmware/qcom/qcom_scm.c197
-rw-r--r--drivers/firmware/qcom/qcom_scm.h9
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c469
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.h13
8 files changed, 777 insertions, 229 deletions
diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig
index 3f05d9854ddf..7f6eb4174734 100644
--- a/drivers/firmware/qcom/Kconfig
+++ b/drivers/firmware/qcom/Kconfig
@@ -7,8 +7,39 @@
menu "Qualcomm firmware drivers"
config QCOM_SCM
+ select QCOM_TZMEM
tristate
+config QCOM_TZMEM
+ tristate
+ select GENERIC_ALLOCATOR
+
+choice
+ prompt "TrustZone interface memory allocator mode"
+ default QCOM_TZMEM_MODE_GENERIC
+ help
+ Selects the mode of the memory allocator providing memory buffers of
+ suitable format for sharing with the TrustZone. If in doubt, select
+ 'Generic'.
+
+config QCOM_TZMEM_MODE_GENERIC
+ bool "Generic"
+ help
+ Use the generic allocator mode. The memory is page-aligned, non-cachable
+ and physically contiguous.
+
+config QCOM_TZMEM_MODE_SHMBRIDGE
+ bool "SHM Bridge"
+ help
+ Use Qualcomm Shared Memory Bridge. The memory has the same alignment as
+ in the 'Generic' allocator but is also explicitly marked as an SHM Bridge
+ buffer.
+
+ With this selected, all buffers passed to the TrustZone must be allocated
+ using the TZMem allocator or else the TrustZone will refuse to use them.
+
+endchoice
+
config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
bool "Qualcomm download mode enabled by default"
depends on QCOM_SCM
diff --git a/drivers/firmware/qcom/Makefile b/drivers/firmware/qcom/Makefile
index c9f12ee8224a..0be40a1abc13 100644
--- a/drivers/firmware/qcom/Makefile
+++ b/drivers/firmware/qcom/Makefile
@@ -5,5 +5,6 @@
obj-$(CONFIG_QCOM_SCM) += qcom-scm.o
qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
+obj-$(CONFIG_QCOM_TZMEM) += qcom_tzmem.o
obj-$(CONFIG_QCOM_QSEECOM) += qcom_qseecom.o
obj-$(CONFIG_QCOM_QSEECOM_UEFISECAPP) += qcom_qseecom_uefisecapp.o
diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
index bc550ad0dbe0..6fefa4fe80e8 100644
--- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
+++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c
@@ -13,11 +13,14 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/ucs2_string.h>
#include <linux/firmware/qcom/qcom_qseecom.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_tzmem.h>
/* -- Qualcomm "uefisecapp" interface definitions. -------------------------- */
@@ -272,6 +275,7 @@ struct qsee_rsp_uefi_query_variable_info {
struct qcuefi_client {
struct qseecom_client *client;
struct efivars efivars;
+ struct qcom_tzmem_pool *mempool;
};
static struct device *qcuefi_dev(struct qcuefi_client *qcuefi)
@@ -293,12 +297,11 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
{
struct qsee_req_uefi_get_variable *req_data;
struct qsee_rsp_uefi_get_variable *rsp_data;
+ void *cmd_buf __free(qcom_tzmem) = NULL;
unsigned long buffer_size = *data_size;
- efi_status_t efi_status = EFI_SUCCESS;
unsigned long name_length;
- dma_addr_t cmd_buf_dma;
+ efi_status_t efi_status;
size_t cmd_buf_size;
- void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
@@ -333,11 +336,9 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
__reqdata_offs(rsp_size, &rsp_offs)
);
- cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
- if (!cmd_buf) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out;
- }
+ cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL);
+ if (!cmd_buf)
+ return EFI_OUT_OF_RESOURCES;
req_data = cmd_buf + req_offs;
rsp_data = cmd_buf + rsp_offs;
@@ -351,30 +352,22 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
req_data->length = req_size;
status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length);
- if (status < 0) {
- efi_status = EFI_INVALID_PARAMETER;
- goto out_free;
- }
+ if (status < 0)
+ return EFI_INVALID_PARAMETER;
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
status = qcom_qseecom_app_send(qcuefi->client,
- cmd_buf_dma + req_offs, req_size,
- cmd_buf_dma + rsp_offs, rsp_size);
- if (status) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ cmd_buf + req_offs, req_size,
+ cmd_buf + rsp_offs, rsp_size);
+ if (status)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->command_id != QSEE_CMD_UEFI_GET_VARIABLE)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->length < sizeof(*rsp_data)) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length < sizeof(*rsp_data))
+ return EFI_DEVICE_ERROR;
if (rsp_data->status) {
dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n",
@@ -388,18 +381,14 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
*attributes = rsp_data->attributes;
}
- goto out_free;
+ return qsee_uefi_status_to_efi(rsp_data->status);
}
- if (rsp_data->length > rsp_size) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length > rsp_size)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->data_offset + rsp_data->data_size > rsp_data->length)
+ return EFI_DEVICE_ERROR;
/*
* Note: We need to set attributes and data size even if the buffer is
@@ -422,22 +411,15 @@ static efi_status_t qsee_uefi_get_variable(struct qcuefi_client *qcuefi, const e
if (attributes)
*attributes = rsp_data->attributes;
- if (buffer_size == 0 && !data) {
- efi_status = EFI_SUCCESS;
- goto out_free;
- }
+ if (buffer_size == 0 && !data)
+ return EFI_SUCCESS;
- if (buffer_size < rsp_data->data_size) {
- efi_status = EFI_BUFFER_TOO_SMALL;
- goto out_free;
- }
+ if (buffer_size < rsp_data->data_size)
+ return EFI_BUFFER_TOO_SMALL;
memcpy(data, ((void *)rsp_data) + rsp_data->data_offset, rsp_data->data_size);
-out_free:
- qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
-out:
- return efi_status;
+ return EFI_SUCCESS;
}
static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const efi_char16_t *name,
@@ -446,11 +428,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
{
struct qsee_req_uefi_set_variable *req_data;
struct qsee_rsp_uefi_set_variable *rsp_data;
- efi_status_t efi_status = EFI_SUCCESS;
+ void *cmd_buf __free(qcom_tzmem) = NULL;
unsigned long name_length;
- dma_addr_t cmd_buf_dma;
size_t cmd_buf_size;
- void *cmd_buf;
size_t name_offs;
size_t guid_offs;
size_t data_offs;
@@ -486,11 +466,9 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
__reqdata_offs(sizeof(*rsp_data), &rsp_offs)
);
- cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
- if (!cmd_buf) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out;
- }
+ cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL);
+ if (!cmd_buf)
+ return EFI_OUT_OF_RESOURCES;
req_data = cmd_buf + req_offs;
rsp_data = cmd_buf + rsp_offs;
@@ -506,10 +484,8 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
req_data->length = req_size;
status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name, name_length);
- if (status < 0) {
- efi_status = EFI_INVALID_PARAMETER;
- goto out_free;
- }
+ if (status < 0)
+ return EFI_INVALID_PARAMETER;
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
@@ -517,33 +493,24 @@ static efi_status_t qsee_uefi_set_variable(struct qcuefi_client *qcuefi, const e
memcpy(((void *)req_data) + req_data->data_offset, data, req_data->data_size);
status = qcom_qseecom_app_send(qcuefi->client,
- cmd_buf_dma + req_offs, req_size,
- cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
- if (status) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ cmd_buf + req_offs, req_size,
+ cmd_buf + rsp_offs, sizeof(*rsp_data));
+ if (status)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->command_id != QSEE_CMD_UEFI_SET_VARIABLE)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->length != sizeof(*rsp_data)) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length != sizeof(*rsp_data))
+ return EFI_DEVICE_ERROR;
if (rsp_data->status) {
dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n",
__func__, rsp_data->status);
- efi_status = qsee_uefi_status_to_efi(rsp_data->status);
+ return qsee_uefi_status_to_efi(rsp_data->status);
}
-out_free:
- qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
-out:
- return efi_status;
+ return EFI_SUCCESS;
}
static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
@@ -552,10 +519,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
{
struct qsee_req_uefi_get_next_variable *req_data;
struct qsee_rsp_uefi_get_next_variable *rsp_data;
- efi_status_t efi_status = EFI_SUCCESS;
- dma_addr_t cmd_buf_dma;
+ void *cmd_buf __free(qcom_tzmem) = NULL;
+ efi_status_t efi_status;
size_t cmd_buf_size;
- void *cmd_buf;
size_t guid_offs;
size_t name_offs;
size_t req_size;
@@ -587,11 +553,9 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
__reqdata_offs(rsp_size, &rsp_offs)
);
- cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
- if (!cmd_buf) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out;
- }
+ cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL);
+ if (!cmd_buf)
+ return EFI_OUT_OF_RESOURCES;
req_data = cmd_buf + req_offs;
rsp_data = cmd_buf + rsp_offs;
@@ -606,28 +570,20 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
memcpy(((void *)req_data) + req_data->guid_offset, guid, req_data->guid_size);
status = ucs2_strscpy(((void *)req_data) + req_data->name_offset, name,
*name_size / sizeof(*name));
- if (status < 0) {
- efi_status = EFI_INVALID_PARAMETER;
- goto out_free;
- }
+ if (status < 0)
+ return EFI_INVALID_PARAMETER;
status = qcom_qseecom_app_send(qcuefi->client,
- cmd_buf_dma + req_offs, req_size,
- cmd_buf_dma + rsp_offs, rsp_size);
- if (status) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ cmd_buf + req_offs, req_size,
+ cmd_buf + rsp_offs, rsp_size);
+ if (status)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->command_id != QSEE_CMD_UEFI_GET_NEXT_VARIABLE)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->length < sizeof(*rsp_data)) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length < sizeof(*rsp_data))
+ return EFI_DEVICE_ERROR;
if (rsp_data->status) {
dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n",
@@ -642,53 +598,40 @@ static efi_status_t qsee_uefi_get_next_variable(struct qcuefi_client *qcuefi,
if (efi_status == EFI_BUFFER_TOO_SMALL)
*name_size = rsp_data->name_size;
- goto out_free;
+ return efi_status;
}
- if (rsp_data->length > rsp_size) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length > rsp_size)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->name_offset + rsp_data->name_size > rsp_data->length)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->guid_offset + rsp_data->guid_size > rsp_data->length)
+ return EFI_DEVICE_ERROR;
if (rsp_data->name_size > *name_size) {
*name_size = rsp_data->name_size;
- efi_status = EFI_BUFFER_TOO_SMALL;
- goto out_free;
+ return EFI_BUFFER_TOO_SMALL;
}
- if (rsp_data->guid_size != sizeof(*guid)) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->guid_size != sizeof(*guid))
+ return EFI_DEVICE_ERROR;
memcpy(guid, ((void *)rsp_data) + rsp_data->guid_offset, rsp_data->guid_size);
status = ucs2_strscpy(name, ((void *)rsp_data) + rsp_data->name_offset,
rsp_data->name_size / sizeof(*name));
*name_size = rsp_data->name_size;
- if (status < 0) {
+ if (status < 0)
/*
* Return EFI_DEVICE_ERROR here because the buffer size should
* have already been validated above, causing this function to
* bail with EFI_BUFFER_TOO_SMALL.
*/
- efi_status = EFI_DEVICE_ERROR;
- }
+ return EFI_DEVICE_ERROR;
-out_free:
- qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
-out:
- return efi_status;
+ return EFI_SUCCESS;
}
static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi, u32 attr,
@@ -697,10 +640,8 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
{
struct qsee_req_uefi_query_variable_info *req_data;
struct qsee_rsp_uefi_query_variable_info *rsp_data;
- efi_status_t efi_status = EFI_SUCCESS;
- dma_addr_t cmd_buf_dma;
+ void *cmd_buf __free(qcom_tzmem) = NULL;
size_t cmd_buf_size;
- void *cmd_buf;
size_t req_offs;
size_t rsp_offs;
int status;
@@ -710,11 +651,9 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
__reqdata_offs(sizeof(*rsp_data), &rsp_offs)
);
- cmd_buf = qseecom_dma_alloc(qcuefi->client, cmd_buf_size, &cmd_buf_dma, GFP_KERNEL);
- if (!cmd_buf) {
- efi_status = EFI_OUT_OF_RESOURCES;
- goto out;
- }
+ cmd_buf = qcom_tzmem_alloc(qcuefi->mempool, cmd_buf_size, GFP_KERNEL);
+ if (!cmd_buf)
+ return EFI_OUT_OF_RESOURCES;
req_data = cmd_buf + req_offs;
rsp_data = cmd_buf + rsp_offs;
@@ -724,28 +663,21 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
req_data->length = sizeof(*req_data);
status = qcom_qseecom_app_send(qcuefi->client,
- cmd_buf_dma + req_offs, sizeof(*req_data),
- cmd_buf_dma + rsp_offs, sizeof(*rsp_data));
- if (status) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ cmd_buf + req_offs, sizeof(*req_data),
+ cmd_buf + rsp_offs, sizeof(*rsp_data));
+ if (status)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->command_id != QSEE_CMD_UEFI_QUERY_VARIABLE_INFO)
+ return EFI_DEVICE_ERROR;
- if (rsp_data->length != sizeof(*rsp_data)) {
- efi_status = EFI_DEVICE_ERROR;
- goto out_free;
- }
+ if (rsp_data->length != sizeof(*rsp_data))
+ return EFI_DEVICE_ERROR;
if (rsp_data->status) {
dev_dbg(qcuefi_dev(qcuefi), "%s: uefisecapp error: 0x%x\n",
__func__, rsp_data->status);
- efi_status = qsee_uefi_status_to_efi(rsp_data->status);
- goto out_free;
+ return qsee_uefi_status_to_efi(rsp_data->status);
}
if (storage_space)
@@ -757,10 +689,7 @@ static efi_status_t qsee_uefi_query_variable_info(struct qcuefi_client *qcuefi,
if (max_variable_size)
*max_variable_size = rsp_data->max_variable_size;
-out_free:
- qseecom_dma_free(qcuefi->client, cmd_buf_size, cmd_buf, cmd_buf_dma);
-out:
- return efi_status;
+ return EFI_SUCCESS;
}
/* -- Global efivar interface. ---------------------------------------------- */
@@ -871,6 +800,7 @@ static const struct efivar_operations qcom_efivar_ops = {
static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
const struct auxiliary_device_id *aux_dev_id)
{
+ struct qcom_tzmem_pool_config pool_config;
struct qcuefi_client *qcuefi;
int status;
@@ -889,6 +819,16 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev,
if (status)
qcuefi_set_reference(NULL);
+ memset(&pool_config, 0, sizeof(pool_config));
+ pool_config.initial_size = SZ_4K;
+ pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER;
+ pool_config.increment = 2;
+ pool_config.max_size = SZ_256K;
+
+ qcuefi->mempool = devm_qcom_tzmem_pool_new(&aux_dev->dev, &pool_config);
+ if (IS_ERR(qcuefi->mempool))
+ return PTR_ERR(qcuefi->mempool);
+
return status;
}
diff --git a/drivers/firmware/qcom/qcom_scm-smc.c b/drivers/firmware/qcom/qcom_scm-smc.c
index 16cf88acfa8e..dca5f3f1883b 100644
--- a/drivers/firmware/qcom/qcom_scm-smc.c
+++ b/drivers/firmware/qcom/qcom_scm-smc.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/delay.h>
@@ -9,6 +10,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_tzmem.h>
#include <linux/arm-smccc.h>
#include <linux/dma-mapping.h>
@@ -150,11 +152,10 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
struct qcom_scm_res *res, bool atomic)
{
+ struct qcom_tzmem_pool *mempool = qcom_scm_get_tzmem_pool();
int arglen = desc->arginfo & 0xf;
int i, ret;
- dma_addr_t args_phys = 0;
- void *args_virt = NULL;
- size_t alloc_len;
+ void *args_virt __free(qcom_tzmem) = NULL;
gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
@@ -172,9 +173,9 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
- alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
- args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
-
+ args_virt = qcom_tzmem_alloc(mempool,
+ SCM_SMC_N_EXT_ARGS * sizeof(u64),
+ flag);
if (!args_virt)
return -ENOMEM;
@@ -192,25 +193,10 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
SCM_SMC_FIRST_EXT_IDX]);
}
- args_phys = dma_map_single(dev, args_virt, alloc_len,
- DMA_TO_DEVICE);
-
- if (dma_mapping_error(dev, args_phys)) {
- kfree(args_virt);
- return -ENOMEM;
- }
-
- smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
+ smc.args[SCM_SMC_LAST_REG_IDX] = qcom_tzmem_to_phys(args_virt);
}
- /* ret error check follows after args_virt cleanup*/
ret = __scm_smc_do(dev, &smc, &smc_res, atomic);
-
- if (args_virt) {
- dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
- kfree(args_virt);
- }
-
if (ret)
return ret;
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 68f4df7e6c3c..00c379a3cceb 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -6,12 +6,15 @@
#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/export.h>
#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/firmware/qcom/qcom_tzmem.h>
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
@@ -20,11 +23,14 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/sizes.h>
#include <linux/types.h>
#include "qcom_scm.h"
+#include "qcom_tzmem.h"
static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
module_param(download_mode, bool, 0);
@@ -43,6 +49,8 @@ struct qcom_scm {
int scm_vote_count;
u64 dload_mode_addr;
+
+ struct qcom_tzmem_pool *mempool;
};
struct qcom_scm_current_perm_info {
@@ -114,7 +122,6 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
};
#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
-#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
#define QCOM_DLOAD_MASK GENMASK(5, 4)
#define QCOM_DLOAD_NODUMP 0
@@ -198,6 +205,11 @@ static void qcom_scm_bw_disable(void)
enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
static DEFINE_SPINLOCK(scm_query_lock);
+struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
+{
+ return __scm->mempool;
+}
+
static enum qcom_scm_convention __get_convention(void)
{
unsigned long flags;
@@ -570,6 +582,13 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
* During the scm call memory protection will be enabled for the meta
* data blob, so make sure it's physically contiguous, 4K aligned and
* non-cachable to avoid XPU violations.
+ *
+ * For PIL calls the hypervisor creates SHM Bridges for the blob
+ * buffers on behalf of Linux so we must not do it ourselves hence
+ * not using the TZMem allocator here.
+ *
+ * If we pass a buffer that is already part of an SHM Bridge to this
+ * call, it will fail.
*/
mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
GFP_KERNEL);
@@ -1008,14 +1027,13 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
struct qcom_scm_mem_map_info *mem_to_map;
phys_addr_t mem_to_map_phys;
phys_addr_t dest_phys;
- dma_addr_t ptr_phys;
+ phys_addr_t ptr_phys;
size_t mem_to_map_sz;
size_t dest_sz;
size_t src_sz;
size_t ptr_sz;
int next_vm;
__le32 *src;
- void *ptr;
int ret, i, b;
u64 srcvm_bits = *srcvm;
@@ -1025,10 +1043,13 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
ALIGN(dest_sz, SZ_64);
- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
+ void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ ptr_sz, GFP_KERNEL);
if (!ptr)
return -ENOMEM;
+ ptr_phys = qcom_tzmem_to_phys(ptr);
+
/* Fill source vmid detail */
src = ptr;
i = 0;
@@ -1057,7 +1078,6 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
ptr_phys, src_sz, dest_phys, dest_sz);
- dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
@@ -1205,32 +1225,21 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
.args[4] = data_unit_size,
.owner = ARM_SMCCC_OWNER_SIP,
};
- void *keybuf;
- dma_addr_t key_phys;
- int ret;
- /*
- * 'key' may point to vmalloc()'ed memory, but we need to pass a
- * physical address that's been properly flushed. The sanctioned way to
- * do this is by using the DMA API. But as is best practice for crypto
- * keys, we also must wipe the key after use. This makes kmemdup() +
- * dma_map_single() not clearly correct, since the DMA API can use
- * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
- * keys is normally rare and thus not performance-critical.
- */
+ int ret;
- keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
- GFP_KERNEL);
+ void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ key_size,
+ GFP_KERNEL);
if (!keybuf)
return -ENOMEM;
memcpy(keybuf, key, key_size);
- desc.args[1] = key_phys;
+ desc.args[1] = qcom_tzmem_to_phys(keybuf);
ret = qcom_scm_call(__scm->dev, &desc, NULL);
memzero_explicit(keybuf, key_size);
- dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
@@ -1342,6 +1351,66 @@ bool qcom_scm_lmh_dcvsh_available(void)
}
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
+int qcom_scm_shm_bridge_enable(void)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
+ .owner = ARM_SMCCC_OWNER_SIP
+ };
+
+ struct qcom_scm_res res;
+
+ if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
+ QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
+ return -EOPNOTSUPP;
+
+ return qcom_scm_call(__scm->dev, &desc, &res) ?: res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
+
+int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
+ u64 ipfn_and_s_perm_flags, u64 size_and_flags,
+ u64 ns_vmids, u64 *handle)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ .args[0] = pfn_and_ns_perm_flags,
+ .args[1] = ipfn_and_s_perm_flags,
+ .args[2] = size_and_flags,
+ .args[3] = ns_vmids,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
+ };
+
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+
+ if (handle && !ret)
+ *handle = res.result[1];
+
+ return ret ?: res.result[0];
+}
+EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
+
+int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_MP,
+ .cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ .args[0] = handle,
+ .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
+
int qcom_scm_lmh_profile_change(u32 profile_id)
{
struct qcom_scm_desc desc = {
@@ -1359,8 +1428,6 @@ EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
u64 limit_node, u32 node_id, u64 version)
{
- dma_addr_t payload_phys;
- u32 *payload_buf;
int ret, payload_size = 5 * sizeof(u32);
struct qcom_scm_desc desc = {
@@ -1375,7 +1442,9 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
.owner = ARM_SMCCC_OWNER_SIP,
};
- payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
+ u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ payload_size,
+ GFP_KERNEL);
if (!payload_buf)
return -ENOMEM;
@@ -1385,15 +1454,28 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
payload_buf[3] = 1;
payload_buf[4] = payload_val;
- desc.args[0] = payload_phys;
+ desc.args[0] = qcom_tzmem_to_phys(payload_buf);
ret = qcom_scm_call(__scm->dev, &desc, NULL);
- dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
+int qcom_scm_gpu_init_regs(u32 gpu_req)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_GPU,
+ .cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
+ .arginfo = QCOM_SCM_ARGS(1),
+ .args[0] = gpu_req,
+ .owner = ARM_SMCCC_OWNER_SIP,
+ };
+
+ return qcom_scm_call(__scm->dev, &desc, NULL);
+}
+EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
+
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;
@@ -1545,37 +1627,27 @@ int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
unsigned long app_name_len = strlen(app_name);
struct qcom_scm_desc desc = {};
struct qcom_scm_qseecom_resp res = {};
- dma_addr_t name_buf_phys;
- char *name_buf;
int status;
if (app_name_len >= name_buf_size)
return -EINVAL;
- name_buf = kzalloc(name_buf_size, GFP_KERNEL);
+ char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
+ name_buf_size,
+ GFP_KERNEL);
if (!name_buf)
return -ENOMEM;
memcpy(name_buf, app_name, app_name_len);
- name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE);
- status = dma_mapping_error(__scm->dev, name_buf_phys);
- if (status) {
- kfree(name_buf);
- dev_err(__scm->dev, "qseecom: failed to map dma address\n");
- return status;
- }
-
desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
desc.svc = QSEECOM_TZ_SVC_APP_MGR;
desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
- desc.args[0] = name_buf_phys;
+ desc.args[0] = qcom_tzmem_to_phys(name_buf);
desc.args[1] = app_name_len;
status = qcom_scm_qseecom_call(&desc, &res);
- dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE);
- kfree(name_buf);
if (status)
return status;
@@ -1597,9 +1669,9 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
/**
* qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
* @app_id: The ID of the target app.
- * @req: DMA address of the request buffer sent to the app.
+ * @req: Request buffer sent to the app (must be TZ memory)
* @req_size: Size of the request buffer.
- * @rsp: DMA address of the response buffer, written to by the app.
+ * @rsp: Response buffer, written to by the app (must be TZ memory)
* @rsp_size: Size of the response buffer.
*
* Sends a request to the QSEE app associated with the given ID and read back
@@ -1610,13 +1682,18 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
*
* Return: Zero on success, nonzero on failure.
*/
-int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
- dma_addr_t rsp, size_t rsp_size)
+int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
{
struct qcom_scm_qseecom_resp res = {};
struct qcom_scm_desc desc = {};
+ phys_addr_t req_phys;
+ phys_addr_t rsp_phys;
int status;
+ req_phys = qcom_tzmem_to_phys(req);
+ rsp_phys = qcom_tzmem_to_phys(rsp);
+
desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
@@ -1624,9 +1701,9 @@ int qcom_scm_qseecom_app_send(u32 app_id, dma_addr_t req, size_t req_size,
QCOM_SCM_RW, QCOM_SCM_VAL,
QCOM_SCM_RW, QCOM_SCM_VAL);
desc.args[0] = app_id;
- desc.args[1] = req;
+ desc.args[1] = req_phys;
desc.args[2] = req_size;
- desc.args[3] = rsp;
+ desc.args[3] = rsp_phys;
desc.args[4] = rsp_size;
status = qcom_scm_qseecom_call(&desc, &res);
@@ -1649,6 +1726,8 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "lenovo,flex-5g" },
{ .compatible = "lenovo,thinkpad-x13s", },
{ .compatible = "qcom,sc8180x-primus" },
+ { .compatible = "qcom,x1e80100-crd" },
+ { .compatible = "qcom,x1e80100-qcp" },
{ }
};
@@ -1793,9 +1872,8 @@ static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
goto out;
}
- if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
- flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
- dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
+ if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
+ dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
goto out;
}
@@ -1810,6 +1888,7 @@ out:
static int qcom_scm_probe(struct platform_device *pdev)
{
+ struct qcom_tzmem_pool_config pool_config;
struct qcom_scm *scm;
int irq, ret;
@@ -1885,6 +1964,26 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled"))
qcom_scm_disable_sdi();
+ ret = of_reserved_mem_device_init(__scm->dev);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(__scm->dev, ret,
+ "Failed to setup the reserved memory region for TZ mem\n");
+
+ ret = qcom_tzmem_enable(__scm->dev);
+ if (ret)
+ return dev_err_probe(__scm->dev, ret,
+ "Failed to enable the TrustZone memory allocator\n");
+
+ memset(&pool_config, 0, sizeof(pool_config));
+ pool_config.initial_size = 0;
+ pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
+ pool_config.max_size = SZ_256K;
+
+ __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
+ if (IS_ERR(__scm->mempool))
+ return dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
+ "Failed to create the SCM memory pool\n");
+
/*
* Initialize the QSEECOM interface.
*
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 4532907e8489..685b8f59e7a6 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -5,6 +5,7 @@
#define __QCOM_SCM_INT_H
struct device;
+struct qcom_tzmem_pool;
enum qcom_scm_convention {
SMC_CONVENTION_UNKNOWN,
@@ -78,6 +79,8 @@ int scm_legacy_call_atomic(struct device *dev, const struct qcom_scm_desc *desc,
int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res);
+struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void);
+
#define QCOM_SCM_SVC_BOOT 0x01
#define QCOM_SCM_BOOT_SET_ADDR 0x01
#define QCOM_SCM_BOOT_TERMINATE_PC 0x02
@@ -113,6 +116,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05
#define QCOM_SCM_MP_VIDEO_VAR 0x08
#define QCOM_SCM_MP_ASSIGN 0x16
+#define QCOM_SCM_MP_SHM_BRIDGE_ENABLE 0x1c
+#define QCOM_SCM_MP_SHM_BRIDGE_DELETE 0x1d
+#define QCOM_SCM_MP_SHM_BRIDGE_CREATE 0x1e
#define QCOM_SCM_SVC_OCMEM 0x0f
#define QCOM_SCM_OCMEM_LOCK_CMD 0x01
@@ -138,6 +144,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_WAITQ_RESUME 0x02
#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
+#define QCOM_SCM_SVC_GPU 0x28
+#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
new file mode 100644
index 000000000000..17948cfc82e7
--- /dev/null
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Memory allocator for buffers shared with the TrustZone.
+ *
+ * Copyright (C) 2023-2024 Linaro Ltd.
+ */
+
+#include <linux/bug.h>
+#include <linux/cleanup.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware/qcom/qcom_tzmem.h>
+#include <linux/genalloc.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/radix-tree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "qcom_tzmem.h"
+
+struct qcom_tzmem_area {
+ struct list_head list;
+ void *vaddr;
+ dma_addr_t paddr;
+ size_t size;
+ void *priv;
+};
+
+struct qcom_tzmem_pool {
+ struct gen_pool *genpool;
+ struct list_head areas;
+ enum qcom_tzmem_policy policy;
+ size_t increment;
+ size_t max_size;
+ spinlock_t lock;
+};
+
+struct qcom_tzmem_chunk {
+ phys_addr_t paddr;
+ size_t size;
+ struct qcom_tzmem_pool *owner;
+};
+
+static struct device *qcom_tzmem_dev;
+static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
+static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
+
+#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
+
+static int qcom_tzmem_init(void)
+{
+ return 0;
+}
+
+static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+{
+ return 0;
+}
+
+static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
+{
+
+}
+
+#elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
+
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/of.h>
+
+#define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
+
+static bool qcom_tzmem_using_shm_bridge;
+
+/* List of machines that are known to not support SHM bridge correctly. */
+static const char *const qcom_tzmem_blacklist[] = {
+ "qcom,sc8180x",
+ "qcom,sdm845", /* reset in rmtfs memory assignment */
+ "qcom,sm8150", /* reset in rmtfs memory assignment */
+ NULL
+};
+
+static int qcom_tzmem_init(void)
+{
+ const char *const *platform;
+ int ret;
+
+ for (platform = qcom_tzmem_blacklist; *platform; platform++) {
+ if (of_machine_is_compatible(*platform))
+ goto notsupp;
+ }
+
+ ret = qcom_scm_shm_bridge_enable();
+ if (ret == -EOPNOTSUPP)
+ goto notsupp;
+
+ if (!ret)
+ qcom_tzmem_using_shm_bridge = true;
+
+ return ret;
+
+notsupp:
+ dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
+ return 0;
+}
+
+static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+{
+ u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
+ int ret;
+
+ if (!qcom_tzmem_using_shm_bridge)
+ return 0;
+
+ pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
+ ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
+ size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+
+ u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm,
+ ipfn_and_s_perm, size_and_flags,
+ QCOM_SCM_VMID_HLOS, handle);
+ if (ret)
+ return ret;
+
+ area->priv = no_free_ptr(handle);
+
+ return 0;
+}
+
+static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
+{
+ u64 *handle = area->priv;
+
+ if (!qcom_tzmem_using_shm_bridge)
+ return;
+
+ qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle);
+ kfree(handle);
+}
+
+#endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
+
+static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
+ size_t size, gfp_t gfp)
+{
+ int ret;
+
+ struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
+ gfp);
+ if (!area)
+ return -ENOMEM;
+
+ area->size = PAGE_ALIGN(size);
+
+ area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
+ &area->paddr, gfp);
+ if (!area->vaddr)
+ return -ENOMEM;
+
+ ret = qcom_tzmem_init_area(area);
+ if (ret) {
+ dma_free_coherent(qcom_tzmem_dev, area->size,
+ area->vaddr, area->paddr);
+ return ret;
+ }
+
+ ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
+ (phys_addr_t)area->paddr, size, -1);
+ if (ret) {
+ dma_free_coherent(qcom_tzmem_dev, area->size,
+ area->vaddr, area->paddr);
+ return ret;
+ }
+
+ scoped_guard(spinlock_irqsave, &pool->lock)
+ list_add_tail(&area->list, &pool->areas);
+
+ area = NULL;
+ return 0;
+}
+
+/**
+ * qcom_tzmem_pool_new() - Create a new TZ memory pool.
+ * @config: Pool configuration.
+ *
+ * Create a new pool of memory suitable for sharing with the TrustZone.
+ *
+ * Must not be used in atomic context.
+ *
+ * Return: New memory pool address or ERR_PTR() on error.
+ */
+struct qcom_tzmem_pool *
+qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
+{
+ int ret = -ENOMEM;
+
+ might_sleep();
+
+ switch (config->policy) {
+ case QCOM_TZMEM_POLICY_STATIC:
+ if (!config->initial_size)
+ return ERR_PTR(-EINVAL);
+ break;
+ case QCOM_TZMEM_POLICY_MULTIPLIER:
+ if (!config->increment)
+ return ERR_PTR(-EINVAL);
+ break;
+ case QCOM_TZMEM_POLICY_ON_DEMAND:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool),
+ GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!pool->genpool)
+ return ERR_PTR(-ENOMEM);
+
+ gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
+
+ pool->policy = config->policy;
+ pool->increment = config->increment;
+ pool->max_size = config->max_size;
+ INIT_LIST_HEAD(&pool->areas);
+ spin_lock_init(&pool->lock);
+
+ if (config->initial_size) {
+ ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
+ GFP_KERNEL);
+ if (ret) {
+ gen_pool_destroy(pool->genpool);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return_ptr(pool);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
+
+/**
+ * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
+ * @pool: Memory pool to free.
+ *
+ * Must not be called if any of the allocated chunks has not been freed.
+ * Must not be used in atomic context.
+ */
+void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
+{
+ struct qcom_tzmem_area *area, *next;
+ struct qcom_tzmem_chunk *chunk;
+ struct radix_tree_iter iter;
+ bool non_empty = false;
+ void __rcu **slot;
+
+ might_sleep();
+
+ if (!pool)
+ return;
+
+ scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
+ radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
+ chunk = radix_tree_deref_slot_protected(slot,
+ &qcom_tzmem_chunks_lock);
+
+ if (chunk->owner == pool)
+ non_empty = true;
+ }
+ }
+
+ WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
+
+ list_for_each_entry_safe(area, next, &pool->areas, list) {
+ list_del(&area->list);
+ qcom_tzmem_cleanup_area(area);
+ dma_free_coherent(qcom_tzmem_dev, area->size,
+ area->vaddr, area->paddr);
+ kfree(area);
+ }
+
+ gen_pool_destroy(pool->genpool);
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
+
+static void devm_qcom_tzmem_pool_free(void *data)
+{
+ struct qcom_tzmem_pool *pool = data;
+
+ qcom_tzmem_pool_free(pool);
+}
+
+/**
+ * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
+ * @dev: Device managing this resource.
+ * @config: Pool configuration.
+ *
+ * Must not be used in atomic context.
+ *
+ * Return: Address of the managed pool or ERR_PTR() on failure.
+ */
+struct qcom_tzmem_pool *
+devm_qcom_tzmem_pool_new(struct device *dev,
+ const struct qcom_tzmem_pool_config *config)
+{
+ struct qcom_tzmem_pool *pool;
+ int ret;
+
+ pool = qcom_tzmem_pool_new(config);
+ if (IS_ERR(pool))
+ return pool;
+
+ ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pool;
+}
+EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
+
+static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
+ size_t requested, gfp_t gfp)
+{
+ size_t current_size = gen_pool_size(pool->genpool);
+
+ if (pool->max_size && (current_size + requested) > pool->max_size)
+ return false;
+
+ switch (pool->policy) {
+ case QCOM_TZMEM_POLICY_STATIC:
+ return false;
+ case QCOM_TZMEM_POLICY_MULTIPLIER:
+ requested = current_size * pool->increment;
+ break;
+ case QCOM_TZMEM_POLICY_ON_DEMAND:
+ break;
+ }
+
+ return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
+}
+
+/**
+ * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
+ * @pool: TZ memory pool from which to allocate memory.
+ * @size: Number of bytes to allocate.
+ * @gfp: GFP flags.
+ *
+ * Can be used in any context.
+ *
+ * Return:
+ * Address of the allocated buffer or NULL if no more memory can be allocated.
+ * The buffer must be released using qcom_tzmem_free().
+ */
+void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
+{
+ unsigned long vaddr;
+ int ret;
+
+ if (!size)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+
+ struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
+ gfp);
+ if (!chunk)
+ return NULL;
+
+again:
+ vaddr = gen_pool_alloc(pool->genpool, size);
+ if (!vaddr) {
+ if (qcom_tzmem_try_grow_pool(pool, size, gfp))
+ goto again;
+
+ return NULL;
+ }
+
+ chunk->paddr = gen_pool_virt_to_phys(pool->genpool, vaddr);
+ chunk->size = size;
+ chunk->owner = pool;
+
+ scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
+ ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
+ if (ret) {
+ gen_pool_free(pool->genpool, vaddr, size);
+ return NULL;
+ }
+
+ chunk = NULL;
+ }
+
+ return (void *)vaddr;
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
+
+/**
+ * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
+ * @vaddr: Virtual address of the buffer.
+ *
+ * Can be used in any context.
+ */
+void qcom_tzmem_free(void *vaddr)
+{
+ struct qcom_tzmem_chunk *chunk;
+
+ scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
+ chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
+ (unsigned long)vaddr, NULL);
+
+ if (!chunk) {
+ WARN(1, "Virtual address %p not owned by TZ memory allocator",
+ vaddr);
+ return;
+ }
+
+ scoped_guard(spinlock_irqsave, &chunk->owner->lock)
+ gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
+ chunk->size);
+ kfree(chunk);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_free);
+
+/**
+ * qcom_tzmem_to_phys() - Map the virtual address of a TZ buffer to physical.
+ * @vaddr: Virtual address of the buffer allocated from a TZ memory pool.
+ *
+ * Can be used in any context. The address must have been returned by a call
+ * to qcom_tzmem_alloc().
+ *
+ * Returns: Physical address of the buffer.
+ */
+phys_addr_t qcom_tzmem_to_phys(void *vaddr)
+{
+ struct qcom_tzmem_chunk *chunk;
+
+ guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
+
+ chunk = radix_tree_lookup(&qcom_tzmem_chunks, (unsigned long)vaddr);
+ if (!chunk)
+ return 0;
+
+ return chunk->paddr;
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
+
+int qcom_tzmem_enable(struct device *dev)
+{
+ if (qcom_tzmem_dev)
+ return -EBUSY;
+
+ qcom_tzmem_dev = dev;
+
+ return qcom_tzmem_init();
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
+
+MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/qcom/qcom_tzmem.h b/drivers/firmware/qcom/qcom_tzmem.h
new file mode 100644
index 000000000000..8fa8a3eb940e
--- /dev/null
+++ b/drivers/firmware/qcom/qcom_tzmem.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023-2024 Linaro Ltd.
+ */
+
+#ifndef __QCOM_TZMEM_PRIV_H
+#define __QCOM_TZMEM_PRIV_H
+
+struct device;
+
+int qcom_tzmem_enable(struct device *dev);
+
+#endif /* __QCOM_TZMEM_PRIV_H */