diff options
author | Avaneesh Kumar Dwivedi <akdwived@codeaurora.org> | 2017-10-24 17:52:24 +0200 |
---|---|---|
committer | Bjorn Andersson <bjorn.andersson@linaro.org> | 2017-10-31 02:37:07 +0100 |
commit | d82bd359972a7fe71a778396cf287bc9f9f3b981 (patch) | |
tree | df307c9a6cbcc61776718603160b3684614854b8 /drivers/firmware/qcom_scm.c | |
parent | Linux 4.14-rc5 (diff) | |
download | linux-d82bd359972a7fe71a778396cf287bc9f9f3b981.tar.xz linux-d82bd359972a7fe71a778396cf287bc9f9f3b981.zip |
firmware: scm: Add new SCM call API for switching memory ownership
Two different processors on a SOC need to switch memory ownership
during load/unload. To enable this, second level memory map table
need to be updated, which is done by secure layer.
This patch adds the interface for making secure monitor call for
memory ownership switching request.
Acked-by: Andy Gross <andy.gross@linaro.org>
Signed-off-by: Avaneesh Kumar Dwivedi <akdwived@codeaurora.org>
[bjorn: Minor style and kerneldoc updates]
Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Diffstat (limited to 'drivers/firmware/qcom_scm.c')
-rw-r--r-- | drivers/firmware/qcom_scm.c | 95 |
1 files changed, 95 insertions, 0 deletions
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index bb16510d75ba..334eaa341828 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -40,6 +40,19 @@ struct qcom_scm { struct reset_controller_dev reset; }; +struct qcom_scm_current_perm_info { + __le32 vmid; + __le32 perm; + __le64 ctx; + __le32 ctx_size; + __le32 unused; +}; + +struct qcom_scm_mem_map_info { + __le64 mem_addr; + __le64 mem_size; +}; + static struct qcom_scm *__scm; static int qcom_scm_clk_enable(void) @@ -348,6 +361,88 @@ int qcom_scm_set_remote_state(u32 state, u32 id) } EXPORT_SYMBOL(qcom_scm_set_remote_state); +/** + * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership + * @mem_addr: mem region whose ownership need to be reassigned + * @mem_sz: size of the region. + * @srcvm: vmid for current set of owners, each set bit in + * flag indicate a unique owner + * @newvm: array having new owners and corrsponding permission + * flags + * @dest_cnt: number of owners in next set. + * + * Return negative errno on failure, 0 on success, with @srcvm updated. + */ +int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, + unsigned int *srcvm, + struct qcom_scm_vmperm *newvm, int dest_cnt) +{ + struct qcom_scm_current_perm_info *destvm; + struct qcom_scm_mem_map_info *mem_to_map; + phys_addr_t mem_to_map_phys; + phys_addr_t dest_phys; + phys_addr_t ptr_phys; + size_t mem_to_map_sz; + size_t dest_sz; + size_t src_sz; + size_t ptr_sz; + int next_vm; + __le32 *src; + void *ptr; + int ret; + int len; + int i; + + src_sz = hweight_long(*srcvm) * sizeof(*src); + mem_to_map_sz = sizeof(*mem_to_map); + dest_sz = dest_cnt * sizeof(*destvm); + ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + + ALIGN(dest_sz, SZ_64); + + ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + /* Fill source vmid detail */ + src = ptr; + len = hweight_long(*srcvm); + for (i = 0; i < len; i++) { + src[i] = cpu_to_le32(ffs(*srcvm) - 1); + *srcvm ^= 1 << (ffs(*srcvm) - 1); + } + + /* Fill details of mem buff to map */ + mem_to_map = ptr + ALIGN(src_sz, SZ_64); + mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); + mem_to_map[0].mem_addr = cpu_to_le64(mem_addr); + mem_to_map[0].mem_size = cpu_to_le64(mem_sz); + + next_vm = 0; + /* Fill details of next vmid detail */ + destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); + dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); + for (i = 0; i < dest_cnt; i++) { + destvm[i].vmid = cpu_to_le32(newvm[i].vmid); + destvm[i].perm = cpu_to_le32(newvm[i].perm); + destvm[i].ctx = 0; + destvm[i].ctx_size = 0; + next_vm |= BIT(newvm[i].vmid); + } + + ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, + ptr_phys, src_sz, dest_phys, dest_sz); + dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys); + if (ret) { + dev_err(__scm->dev, + "Assign memory protection call failed %d.\n", ret); + return -EINVAL; + } + + *srcvm = next_vm; + return 0; +} +EXPORT_SYMBOL(qcom_scm_assign_mem); + static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_scm *scm; |