diff --git a/core/arch/arm/include/kernel/misc.h b/core/arch/arm/include/kernel/misc.h index a9174a88d28..7e5b038651f 100644 --- a/core/arch/arm/include/kernel/misc.h +++ b/core/arch/arm/include/kernel/misc.h @@ -37,6 +37,34 @@ size_t get_core_pos(void); uint32_t read_mode_sp(int cpu_mode); uint32_t read_mode_lr(int cpu_mode); +/** + * extract_pages_from_params() - extract list of pages from + * OPTEE_MSG_ATTR_FRAGMENT parameters + * + * @params: pointer to parameters array + * @pages: output array of page addresses + * @num_params: number of parameters in array + * + * return: + * page count on success + * <0 on error + */ +int extract_pages_from_params(struct optee_msg_param *params, paddr_t *pages, + uint32_t num_pages); + +/** + * map_params_buffer() - map parameters buffer into OP-TEE VA space + * @pa_params - physical pointer to parameters + * @num_params - number of parameters + * @map_offset - offset that should be deducted before mapping buffer + * + * return: + * struct shmem_mapping of mapped buffer on success + * NULL on error. + */ +struct shmem_mapping *map_params_buffer(paddr_t pa_params, uint32_t num_params, + paddr_t map_offset); + static inline uint64_t reg_pair_to_64(uint32_t reg0, uint32_t reg1) { return (uint64_t)reg0 << 32 | reg1; diff --git a/core/arch/arm/include/mm/mobj.h b/core/arch/arm/include/mm/mobj.h index d5eeb692f04..861395a7fc1 100644 --- a/core/arch/arm/include/mm/mobj.h +++ b/core/arch/arm/include/mm/mobj.h @@ -39,6 +39,7 @@ struct mobj { const struct mobj_ops *ops; size_t size; + size_t granule; }; struct mobj_ops { @@ -108,12 +109,25 @@ static inline bool mobj_is_secure(struct mobj *mobj) return mobj_matches(mobj, CORE_MEM_SEC); } +static inline size_t mobj_get_granule(struct mobj *mobj) +{ + if (mobj->granule) + return mobj->granule; + return mobj->size; +} + struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size, tee_mm_pool_t *pool); struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr, enum buf_is_attr battr); +struct mobj *mobj_reg_shm_alloc(paddr_t *pages, + size_t num_pages, + uint64_t cookie); + +struct mobj *mobj_reg_shm_find_by_cookie(uint64_t cookie); + struct mobj *mobj_paged_alloc(size_t size); #ifdef CFG_PAGED_USER_TA diff --git a/core/arch/arm/include/sm/optee_smc.h b/core/arch/arm/include/sm/optee_smc.h index b6fcd6526e5..90c4bd8417a 100644 --- a/core/arch/arm/include/sm/optee_smc.h +++ b/core/arch/arm/include/sm/optee_smc.h @@ -290,6 +290,8 @@ #define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM (1 << 0) /* Secure world can communicate via previously unregistered shared memory */ #define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM (1 << 1) +/* Secure world supporst commands "register/unregister shared memory" */ +#define OPTEE_SMC_SEC_CAP_REGISTER_SHM (1 << 2) #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 #define OPTEE_SMC_EXCHANGE_CAPABILITIES \ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) diff --git a/core/arch/arm/kernel/misc.c b/core/arch/arm/kernel/misc.c new file mode 100644 index 00000000000..1033a8d58b6 --- /dev/null +++ b/core/arch/arm/kernel/misc.c @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2016, EPAM Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * extract_pages_from_params() - extract list of pages from + * OPTEE_MSG_ATTR_FRAGMENT parameters + * + * @params: pointer to parameters array + * @pages: output array of page addresses + * @num_params: number of parameters in array + * + * return: + * page count on success + * <0 on error + */ +int extract_pages_from_params(struct optee_msg_param *params, paddr_t *pages, + uint32_t num_params) +{ + uint32_t pages_cnt = 0; + + if (params[num_params-1].attr & OPTEE_MSG_ATTR_FRAGMENT) + return -1; + + for (uint32_t i = 0; i < num_params; i++) { + uint32_t attr = params[i].attr & OPTEE_MSG_ATTR_TYPE_MASK; + + switch (attr) { + case OPTEE_MSG_ATTR_TYPE_NEXT_FRAGMENT: + continue; + case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: + if (pages_cnt >= num_params) + return -1; + if (!(params[i].attr & OPTEE_MSG_ATTR_FRAGMENT) && + (i != num_params - 1)) + return -1; + pages[pages_cnt++] = params[i].u.tmem.buf_ptr + & ~SMALL_PAGE_MASK; + break; + default: + return -1; + } + } + return pages_cnt; +} + +/** + * map_params_buffer() - map parameters buffer into OP-TEE VA space + * @pa_params - physical pointer to parameters + * @num_params - number of parameters + * @map_offset - offset that should be deducted before mapping buffer + * + * return: + * struct shmem_mapping of mapped buffer on success + * NULL on error. + */ +struct shmem_mapping *map_params_buffer(paddr_t pa_params, uint32_t num_params, + paddr_t map_offset) +{ + struct shmem_mapping *shm_mapping = NULL; + struct optee_msg_param *params; + paddr_t *pages = NULL; + uint32_t max_pages, pages_cnt; + size_t args_size; + + if (shmem_map_page(pa_params, &shm_mapping)) + return NULL; + + args_size = OPTEE_MSG_GET_ARG_SIZE(num_params); + max_pages = args_size/SMALL_PAGE_SIZE + 1; + pages = calloc(max_pages, sizeof(paddr_t)); + if (!pages) + goto err; + + params = (struct optee_msg_param *) shmem_pa2va(pa_params); + pages[0] = pa_params & ~SMALL_PAGE_MASK; + pages_cnt = 1; + for (uint32_t i = 0; i < num_params; i++) { + if ((params->attr & OPTEE_MSG_ATTR_TYPE_MASK) == + OPTEE_MSG_ATTR_TYPE_NEXT_FRAGMENT) { + if (pages_cnt >= max_pages) + goto err; + pages[pages_cnt] = params->u.tmem.buf_ptr; + shmem_unmap_buffer(shm_mapping); + shm_mapping = NULL; + + if (shmem_map_page(pages[pages_cnt], &shm_mapping)) + goto err; + + pages_cnt++; + params = (struct optee_msg_param *) + shmem_get_va(shm_mapping); + } else { + params++; + if (((vaddr_t)params & SMALL_PAGE_MASK) == 0) + goto err; + } + } + + shmem_unmap_buffer(shm_mapping); + shm_mapping = NULL; + + if (shmem_map_buffer(pa_params - map_offset, args_size, + pages, pages_cnt, &shm_mapping)) + goto err; + + goto out; +err: + if (shm_mapping) + shmem_unmap_buffer(shm_mapping); + shm_mapping = NULL; +out: + if (pages) + free(pages); + return shm_mapping; + +} diff --git a/core/arch/arm/kernel/sub.mk b/core/arch/arm/kernel/sub.mk index 20377033f3b..956342d2b39 100644 --- a/core/arch/arm/kernel/sub.mk +++ b/core/arch/arm/kernel/sub.mk @@ -2,6 +2,7 @@ srcs-$(CFG_WITH_USER_TA) += user_ta.c srcs-y += static_ta.c srcs-y += elf_load.c srcs-y += tee_time.c +srcs-y += misc.c srcs-$(CFG_SECURE_TIME_SOURCE_CNTPCT) += tee_time_arm_cntpct.c srcs-$(CFG_SECURE_TIME_SOURCE_REE) += tee_time_ree.c diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c index 7a0dd3f8d82..0a7e199cac5 100644 --- a/core/arch/arm/kernel/thread.c +++ b/core/arch/arm/kernel/thread.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -614,6 +615,7 @@ void __thread_std_smc_entry(struct thread_smc_args *args) tee_fs_rpc_cache_clear(&thr->tsd); if (!thread_prealloc_rpc_cache) { thread_rpc_free_arg(thr->rpc_carg); + shmem_unmap_buffer(shmem_get_by_cookie(thr->rpc_carg)); thr->rpc_carg = 0; thr->rpc_arg = 0; } @@ -1250,22 +1252,40 @@ void thread_rpc_free_arg(uint64_t cookie) void thread_rpc_alloc_arg(size_t size, paddr_t *arg, uint64_t *cookie) { - paddr_t pa; + paddr_t pa, page; uint64_t co; uint32_t rpc_args[THREAD_RPC_NUM_ARGS] = { OPTEE_SMC_RETURN_RPC_ALLOC, size }; + struct shmem_mapping *mapping = NULL; thread_rpc(rpc_args); pa = reg_pair_to_64(rpc_args[1], rpc_args[2]); co = reg_pair_to_64(rpc_args[4], rpc_args[5]); - if (!check_alloced_shm(pa, size, sizeof(uint64_t))) { - thread_rpc_free_arg(co); - pa = 0; - co = 0; + + /* Check if this region is in static shared space */ + if (!core_pbuf_is(CORE_MEM_NSEC_SHM, pa, size)) { + /* If not - then map it into dymanic shared space */ + page = pa & ~SMALL_PAGE_MASK; + if (shmem_map_buffer(pa, size, &page, + 1, &mapping)) + goto err; + shmem_set_cookie(mapping, co); } + if (!check_alloced_shm(pa, size, sizeof(uint64_t))) + goto err; + + *arg = pa; + *cookie = co; + return; +err: + thread_rpc_free_arg(co); + pa = 0; + co = 0; + if (mapping) + shmem_unmap_buffer(mapping); *arg = pa; *cookie = co; } @@ -1283,6 +1303,7 @@ static void thread_rpc_free(unsigned int bt, uint64_t cookie) struct optee_msg_arg *arg = thr->rpc_arg; uint64_t carg = thr->rpc_carg; struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg); + struct shmem_mapping *mapping; memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1)); arg->cmd = OPTEE_MSG_RPC_CMD_SHM_FREE; @@ -1295,6 +1316,8 @@ static void thread_rpc_free(unsigned int bt, uint64_t cookie) params[0].u.value.c = 0; reg_pair_from_64(carg, rpc_args + 1, rpc_args + 2); + mapping = shmem_get_by_cookie(cookie); + shmem_unmap_buffer(mapping); thread_rpc(rpc_args); } @@ -1316,6 +1339,8 @@ static void thread_rpc_alloc(size_t size, size_t align, unsigned int bt, struct optee_msg_arg *arg = thr->rpc_arg; uint64_t carg = thr->rpc_carg; struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg); + struct shmem_mapping *mapping; + paddr_t *pages = NULL; memset(arg, 0, OPTEE_MSG_GET_ARG_SIZE(1)); arg->cmd = OPTEE_MSG_RPC_CMD_SHM_ALLOC; @@ -1335,17 +1360,58 @@ static void thread_rpc_alloc(size_t size, size_t align, unsigned int bt, if (arg->num_params != 1) goto fail; - if (params[0].attr != OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT) - goto fail; + if (!core_pbuf_is(CORE_MEM_NSEC_SHM, + params[0].u.tmem.buf_ptr, + params[0].u.tmem.size) && + (params[0].attr == + (OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | OPTEE_MSG_ATTR_NESTED))) { + + int num_pages = (params[0].u.tmem.size - 1) + / SMALL_PAGE_SIZE + 1; + int num_params = num_pages + + ((num_pages * sizeof(struct optee_msg_param)) / + SMALL_PAGE_SIZE); + + pages = malloc(num_pages * sizeof(paddr_t)); - if (!check_alloced_shm(params[0].u.tmem.buf_ptr, size, align)) { - thread_rpc_free(bt, params[0].u.tmem.shm_ref); + if (!pages) + goto free_first; + + mapping = map_params_buffer(params[0].u.tmem.buf_ptr, + num_params, 0); + + if (!mapping) + goto free_first; + + if (extract_pages_from_params(shmem_get_va(mapping), + pages, + num_params) != num_pages) + goto free_first; + shmem_unmap_buffer(mapping); + mapping = NULL; + + if (shmem_map_buffer(params[0].u.tmem.buf_ptr, + params[0].u.tmem.size, + pages, num_pages, &mapping)) + goto free_first; + + shmem_set_cookie(mapping, params[0].u.tmem.shm_ref); + free(pages); + + } else if (!(params[0].attr & OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT)) goto fail; - } *payload = params[0].u.tmem.buf_ptr; *cookie = params[0].u.tmem.shm_ref; + /* Check if this region is in static shared space */ + if (!core_pbuf_is(CORE_MEM_NSEC_SHM, *payload, size)) + panic("This should not happen"); + return; +free_first: + if (pages) + free(pages); + thread_rpc_free(bt, params[0].u.tmem.shm_ref); fail: *payload = 0; *cookie = 0; diff --git a/core/arch/arm/mm/core_mmu.c b/core/arch/arm/mm/core_mmu.c index 15b14a615d0..60b1464d7e0 100644 --- a/core/arch/arm/mm/core_mmu.c +++ b/core/arch/arm/mm/core_mmu.c @@ -825,6 +825,8 @@ static void set_pg_region(struct core_mmu_table_info *dir_info, r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base), end - r.va); + r.size = MIN(r.size, mobj_get_granule(region->mobj)); + if (!mobj_is_paged(region->mobj)) { size_t granule = BIT(pg_info->shift); size_t offset = r.va - region->va + region->offset; diff --git a/core/arch/arm/mm/mobj.c b/core/arch/arm/mm/mobj.c index 4e012c05c82..3caed06ff2f 100644 --- a/core/arch/arm/mm/mobj.c +++ b/core/arch/arm/mm/mobj.c @@ -282,6 +282,121 @@ struct mobj *mobj_mm_alloc(struct mobj *mobj_parent, size_t size, return &m->mobj; } +/* + * mobj_registered implementation + */ + +struct mobj_reg_shm { + struct mobj mobj; + SLIST_ENTRY(mobj_reg_shm) next; + uint64_t cookie; + paddr_t page_offset; + int num_pages; + paddr_t pages[]; +}; + +#define MOBJ_REG_SHM_SIZE(nr_pages) \ + (sizeof(struct mobj_reg_shm) + sizeof(paddr_t)*(nr_pages)) + +static SLIST_HEAD(reg_shm_head, mobj_reg_shm) reg_shm_list = + SLIST_HEAD_INITIALIZER(reg_shm_head); + +static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj); + +static TEE_Result mobj_reg_shm_get_pa(struct mobj *mobj, size_t offst, + size_t granule, paddr_t *pa) +{ + struct mobj_reg_shm *mobj_reg_shm = to_mobj_reg_shm(mobj); + paddr_t p; + + if (!pa) + return TEE_ERROR_GENERIC; + + if (offst >= mobj->size) + return TEE_ERROR_GENERIC; + + switch (granule) { + case 0: + p = mobj_reg_shm->pages[offst / SMALL_PAGE_SIZE] + + offst % SMALL_PAGE_SIZE; + break; + case SMALL_PAGE_SIZE: + p = mobj_reg_shm->pages[offst / SMALL_PAGE_SIZE]; + break; + default: + return TEE_ERROR_GENERIC; + + } + *pa = p; + + return TEE_SUCCESS; +} + +static void mobj_reg_shm_free(struct mobj *mobj) +{ + struct mobj_reg_shm *mobj_reg_shm = to_mobj_reg_shm(mobj); + + assert(mobj->ops == &mobj_reg_shm_ops); + SLIST_REMOVE(®_shm_list, mobj_reg_shm, + mobj_reg_shm, next); + free(mobj); +} + +static TEE_Result mobj_reg_shm_get_cattr(struct mobj *mobj __unused, + uint32_t *cattr) +{ + if (!cattr) + return TEE_ERROR_GENERIC; + + *cattr = TEE_MATTR_CACHE_CACHED; + + return TEE_SUCCESS; +} + +static const struct mobj_ops mobj_reg_shm_ops __rodata_unpaged = { + .get_pa = mobj_reg_shm_get_pa, + .get_cattr = mobj_reg_shm_get_cattr, + .free = mobj_reg_shm_free, +}; + +static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj) +{ + assert(mobj->ops == &mobj_reg_shm_ops); + return container_of(mobj, struct mobj_reg_shm, mobj); +} + +struct mobj *mobj_reg_shm_alloc(paddr_t *pages, + size_t num_pages, + uint64_t cookie) +{ + struct mobj_reg_shm *mobj_reg_shm; + + mobj_reg_shm = calloc(1, MOBJ_REG_SHM_SIZE(num_pages)); + if (!mobj_reg_shm) + return NULL; + + mobj_reg_shm->mobj.ops = &mobj_reg_shm_ops; + mobj_reg_shm->mobj.size = num_pages * SMALL_PAGE_SIZE; + mobj_reg_shm->mobj.granule = SMALL_PAGE_SIZE; + mobj_reg_shm->cookie = cookie; + mobj_reg_shm->num_pages = num_pages; + memcpy(mobj_reg_shm->pages, pages, sizeof(*pages) * num_pages); + + SLIST_INSERT_HEAD(®_shm_list, mobj_reg_shm, next); + + return &mobj_reg_shm->mobj; +} + +struct mobj *mobj_reg_shm_find_by_cookie(uint64_t cookie) +{ + struct mobj_reg_shm *mobj_reg_shm = NULL; + + SLIST_FOREACH(mobj_reg_shm, ®_shm_list, next) + if (mobj_reg_shm->cookie == cookie) + return &mobj_reg_shm->mobj; + return NULL; +} + #ifdef CFG_PAGED_USER_TA /* * mobj_paged implementation diff --git a/core/arch/arm/tee/entry_fast.c b/core/arch/arm/tee/entry_fast.c index 0e80dc8f397..7e3d141c717 100644 --- a/core/arch/arm/tee/entry_fast.c +++ b/core/arch/arm/tee/entry_fast.c @@ -98,7 +98,9 @@ static void tee_entry_exchange_capabilities(struct thread_smc_args *args) } args->a0 = OPTEE_SMC_RETURN_OK; - args->a1 = OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM; + args->a1 = OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM | + OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM | + OPTEE_SMC_SEC_CAP_REGISTER_SHM; } static void tee_entry_disable_shm_cache(struct thread_smc_args *args) diff --git a/core/arch/arm/tee/entry_std.c b/core/arch/arm/tee/entry_std.c index 7e4e7414ab0..8d7a3db067d 100644 --- a/core/arch/arm/tee/entry_std.c +++ b/core/arch/arm/tee/entry_std.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +43,7 @@ #include #include #include +#include #include #define SHM_CACHE_ATTRS \ @@ -53,7 +55,7 @@ TAILQ_HEAD_INITIALIZER(tee_open_sessions); static struct mobj *shm_mobj; -static TEE_Result set_mem_param(const struct optee_msg_param *param, +static TEE_Result set_tmem_param(const struct optee_msg_param *param, struct param_mem *mem) { paddr_t b; @@ -76,6 +78,22 @@ static TEE_Result set_mem_param(const struct optee_msg_param *param, return TEE_SUCCESS; } +static TEE_Result set_rmem_param(const struct optee_msg_param *param, + struct param_mem *mem) +{ + struct mobj *rmem_mobj = + mobj_reg_shm_find_by_cookie(param->u.rmem.shm_ref); + + if (rmem_mobj == NULL) + return TEE_ERROR_BAD_PARAMETERS; + + mem->mobj = rmem_mobj; + mem->offs = param->u.rmem.offs; + mem->size = param->u.rmem.size; + + return TEE_SUCCESS; +} + static TEE_Result copy_in_params(const struct optee_msg_param *params, uint32_t num_params, struct tee_ta_param *ta_param) { @@ -116,10 +134,20 @@ static TEE_Result copy_in_params(const struct optee_msg_param *params, case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: pt[n] = TEE_PARAM_TYPE_MEMREF_INPUT + attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; - res = set_mem_param(params + n, &ta_param->u[n].mem); + res = set_tmem_param(params + n, &ta_param->u[n].mem); if (res != TEE_SUCCESS) return res; break; + case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: + pt[n] = TEE_PARAM_TYPE_MEMREF_INPUT + attr - + OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; + + res = set_rmem_param(params + n, &ta_param->u[n].mem); + if (res != TEE_SUCCESS) + return res; + break; default: return TEE_ERROR_BAD_PARAMETERS; } @@ -139,7 +167,18 @@ static void copy_out_param(struct tee_ta_param *ta_param, uint32_t num_params, switch (TEE_PARAM_TYPE_GET(ta_param->types, n)) { case TEE_PARAM_TYPE_MEMREF_OUTPUT: case TEE_PARAM_TYPE_MEMREF_INOUT: - params[n].u.tmem.size = ta_param->u[n].mem.size; + switch (params[n].attr & OPTEE_MSG_ATTR_TYPE_MASK) { + case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: + params[n].u.tmem.size = ta_param->u[n].mem.size; + break; + case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: + case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: + params[n].u.rmem.size = ta_param->u[n].mem.size; + break; + default: + break; + } break; case TEE_PARAM_TYPE_VALUE_OUTPUT: case TEE_PARAM_TYPE_VALUE_INOUT: @@ -327,11 +366,112 @@ static void entry_cancel(struct thread_smc_args *smc_args, smc_args->a0 = OPTEE_SMC_RETURN_OK; } +static void register_shm(struct thread_smc_args *smc_args, + struct optee_msg_arg *arg, uint32_t num_params) +{ + paddr_t *pages = NULL; + int num_pages, pages_cnt; + struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg); + + if (num_params == 0) { + arg->ret = TEE_ERROR_BAD_PARAMETERS; + return; + } + + num_pages = (params[0].u.tmem.size + + (params[0].u.tmem.buf_ptr & SMALL_PAGE_MASK) - 1) + / SMALL_PAGE_SIZE + 1; + + pages = malloc(num_pages * sizeof(paddr_t)); + + if (!pages) { + arg->ret = TEE_ERROR_OUT_OF_MEMORY; + return; + } + + pages_cnt = extract_pages_from_params(params, pages, num_params); + if (pages_cnt < 0) { + arg->ret = TEE_ERROR_BAD_PARAMETERS; + goto out; + } + + if (num_pages != pages_cnt) { + arg->ret = TEE_ERROR_BAD_PARAMETERS; + goto out; + } + + if (mobj_reg_shm_alloc(pages, pages_cnt, + params[0].u.tmem.shm_ref) == NULL) { + arg->ret = TEE_ERROR_GENERIC; + goto out; + } + + arg->ret = TEE_SUCCESS; +out: + free(pages); + smc_args->a0 = OPTEE_SMC_RETURN_OK; +} + +static void unregister_shm(struct thread_smc_args *smc_args, + struct optee_msg_arg *arg, uint32_t num_params) +{ + struct optee_msg_param *params = OPTEE_MSG_GET_PARAMS(arg); + + if (num_params == 1) { + struct mobj *mobj; + uint64_t cookie = params[0].u.rmem.shm_ref; + + mobj = mobj_reg_shm_find_by_cookie(cookie); + if (mobj) { + mobj_free(mobj); + arg->ret = TEE_SUCCESS; + } else { + EMSG("Can't find mapping with given cooike\n"); + arg->ret = TEE_ERROR_BAD_PARAMETERS; + } + } else { + arg->ret = TEE_ERROR_BAD_PARAMETERS; + arg->ret_origin = TEE_ORIGIN_TEE; + } + + smc_args->a0 = OPTEE_SMC_RETURN_OK; +} + +static struct shmem_mapping *map_cmd_buffer(paddr_t parg) +{ + struct shmem_mapping *shm_mapping = NULL; + struct optee_msg_arg *arg = NULL; /* fix gcc warning */ + uint32_t num_params; + size_t args_size; + + if (shmem_map_page(parg, &shm_mapping)) + return NULL; + + arg = (struct optee_msg_arg *)shmem_pa2va(parg); + if (arg == NULL) + goto err; + + num_params = arg->num_params; + args_size = OPTEE_MSG_GET_ARG_SIZE(num_params); + if ((parg & SMALL_PAGE_MASK) + args_size <= SMALL_PAGE_SIZE) + return shm_mapping; + + shm_mapping = map_params_buffer(parg + sizeof(struct optee_msg_arg), + num_params, + sizeof(struct optee_msg_arg)); + return shm_mapping; +err: + if (shm_mapping) + shmem_unmap_buffer(shm_mapping); + return NULL; +} + void tee_entry_std(struct thread_smc_args *smc_args) { paddr_t parg; struct optee_msg_arg *arg = NULL; /* fix gcc warning */ uint32_t num_params; + struct shmem_mapping *shm_mapping = NULL; if (smc_args->a0 != OPTEE_SMC_CALL_WITH_ARG) { EMSG("Unknown SMC 0x%" PRIx64, (uint64_t)smc_args->a0); @@ -340,6 +480,19 @@ void tee_entry_std(struct thread_smc_args *smc_args) return; } parg = (uint64_t)smc_args->a1 << 32 | smc_args->a2; + + /* Check if this region is in static shared space */ + if (!core_pbuf_is(CORE_MEM_NSEC_SHM, parg, + sizeof(struct optee_msg_arg))) { + /* If not - then map it into dymanic shared space */ + /* args should be page-aligned */ + if (parg & SMALL_PAGE_MASK) { + smc_args->a0 = OPTEE_SMC_RETURN_EBADADDR; + return; + } + shm_mapping = map_cmd_buffer(parg); + } + if (!tee_pbuf_is_non_sec(parg, sizeof(struct optee_msg_arg)) || !ALIGNMENT_IS_OK(parg, struct optee_msg_arg) || !(arg = phys_to_virt(parg, MEM_AREA_NSEC_SHM))) { @@ -369,10 +522,20 @@ void tee_entry_std(struct thread_smc_args *smc_args) case OPTEE_MSG_CMD_CANCEL: entry_cancel(smc_args, arg, num_params); break; + case OPTEE_MSG_CMD_REGISTER_SHM: + register_shm(smc_args, arg, num_params); + break; + case OPTEE_MSG_CMD_UNREGISTER_SHM: + unregister_shm(smc_args, arg, num_params); + break; + default: EMSG("Unknown cmd 0x%x\n", arg->cmd); smc_args->a0 = OPTEE_SMC_RETURN_EBADCMD; } + + if (shm_mapping) + shmem_unmap_buffer(shm_mapping); } static TEE_Result default_mobj_init(void) diff --git a/core/include/optee_msg.h b/core/include/optee_msg.h index 09be4618fbb..31b60d1a9f7 100644 --- a/core/include/optee_msg.h +++ b/core/include/optee_msg.h @@ -55,6 +55,11 @@ #define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9 #define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa #define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb +/* + * Special parameter type denoting that command buffer continues on + * specified page + */ +#define OPTEE_MSG_ATTR_TYPE_NEXT_FRAGMENT 0xc #define OPTEE_MSG_ATTR_TYPE_MASK 0xff @@ -73,6 +78,12 @@ */ #define OPTEE_MSG_ATTR_FRAGMENT (1 << 9) +/* + * The shared memory object holds array of struct optee_param with actual + * parameters. + */ +#define OPTEE_MSG_ATTR_NESTED BIT(10) + /* * Memory attributes for caching passed with temp memrefs. The actual value * used is defined outside the message protocol with the exception of