Skip to content

Commit

Permalink
core: assign non-sec DDR configuration from DT
Browse files Browse the repository at this point in the history
Assigns non-secure DDR configuration from device tree if CFG_DT=y. Already
present DDR configuration from register_nsec_ddr() is overridden.

Reviewed-by: Volodymyr Babchuk <vlad.babchuk@gmail.com>
Reviewed-by: Etienne Carriere <etienne.carriere@linaro.org>
Tested-by: Jens Wiklander <jens.wiklander@linaro.org> (QEMU)
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
  • Loading branch information
jenswi-linaro committed Jun 22, 2017
1 parent b2f1248 commit 3a702c4
Show file tree
Hide file tree
Showing 3 changed files with 265 additions and 15 deletions.
5 changes: 5 additions & 0 deletions core/arch/arm/include/mm/core_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -484,6 +484,11 @@ bool cpu_mmu_enabled(void);
*/
bool core_mmu_nsec_ddr_is_defined(void);

#ifdef CFG_DT
void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
size_t nelems);
#endif

#ifdef CFG_SECURE_DATA_PATH
/* Alloc and fill SDP memory objects table - table is NULL terminated */
struct mobj **core_sdp_mem_create_mobjs(void);
Expand Down
119 changes: 107 additions & 12 deletions core/arch/arm/kernel/generic_boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -443,15 +443,35 @@ static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
}
}

static int add_optee_res_mem_dt_node(void *fdt)
static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
uint32_t cell_size)
{
uint64_t rv;

if (cell_size == 1) {
uint32_t v;

memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
*offs += sizeof(v);
rv = fdt32_to_cpu(v);
} else {
uint64_t v;

memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
*offs += sizeof(v);
rv = fdt64_to_cpu(v);
}

return rv;
}

static int add_res_mem_dt_node(void *fdt, const char *name, paddr_t pa,
size_t size)
{
int offs;
int ret;
int addr_size = 2;
int len_size = 2;
vaddr_t shm_va_start;
vaddr_t shm_va_end;
paddr_t shm_pa;
char subnode_name[80];

offs = fdt_path_offset(fdt, "/reserved-memory");
Expand Down Expand Up @@ -480,17 +500,14 @@ static int add_optee_res_mem_dt_node(void *fdt)
return -1;
}

core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_va_start, &shm_va_end);
shm_pa = virt_to_phys((void *)shm_va_start);
snprintf(subnode_name, sizeof(subnode_name),
"optee@0x%" PRIxPA, shm_pa);
"%s@0x%" PRIxPA, name, pa);
offs = fdt_add_subnode(fdt, offs, subnode_name);
if (offs >= 0) {
uint32_t data[FDT_MAX_NCELLS * 2];

set_dt_val(data, addr_size, shm_pa);
set_dt_val(data + addr_size, len_size,
shm_va_end - shm_va_start);
set_dt_val(data, addr_size, pa);
set_dt_val(data + addr_size, len_size, size);
ret = fdt_setprop(fdt, offs, "reg", data,
sizeof(uint32_t) * (addr_size + len_size));
if (ret < 0)
Expand All @@ -504,6 +521,84 @@ static int add_optee_res_mem_dt_node(void *fdt)
return 0;
}

static struct core_mmu_phys_mem *get_memory(void *fdt, size_t *nelems)
{
int offs;
int addr_size;
int len_size;
size_t prop_len;
const uint8_t *prop;
size_t prop_offs;
size_t n;
struct core_mmu_phys_mem *mem;

offs = fdt_subnode_offset(fdt, 0, "memory");
if (offs < 0)
return NULL;

prop = fdt_getprop(fdt, offs, "reg", &addr_size);
if (!prop)
return NULL;

prop_len = addr_size;
addr_size = fdt_address_cells(fdt, offs);
if (addr_size < 0)
return NULL;

len_size = fdt_size_cells(fdt, offs);
if (len_size < 0)
return NULL;

for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
get_dt_val_and_advance(prop, &prop_offs, addr_size);
if (prop_offs >= prop_len) {
n--;
break;
}
get_dt_val_and_advance(prop, &prop_offs, len_size);
}

if (!n)
return NULL;

*nelems = n;
mem = calloc(n, sizeof(*mem));
if (!mem)
panic();

for (n = 0, prop_offs = 0; n < *nelems; n++) {
mem[n].type = MEM_AREA_RAM_NSEC;
mem[n].addr = get_dt_val_and_advance(prop, &prop_offs,
addr_size);
mem[n].size = get_dt_val_and_advance(prop, &prop_offs,
len_size);
}

return mem;
}

static int config_nsmem(void *fdt)
{
struct core_mmu_phys_mem *mem;
size_t nelems;
vaddr_t shm_start;
vaddr_t shm_end;

mem = get_memory(fdt, &nelems);
if (mem)
core_mmu_set_discovered_nsec_ddr(mem, nelems);
else
DMSG("No non-secure memory found in FDT");

core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
if (shm_start != shm_end)
return add_res_mem_dt_node(fdt, "optee", shm_start,
shm_end - shm_start);

DMSG("No SHM configured");
return -1;
}

static void init_fdt(unsigned long phys_fdt)
{
void *fdt;
Expand Down Expand Up @@ -539,8 +634,8 @@ static void init_fdt(unsigned long phys_fdt)
if (add_optee_dt_node(fdt))
panic("Failed to add OP-TEE Device Tree node");

if (add_optee_res_mem_dt_node(fdt))
panic("Failed to add OP-TEE reserved memory DT node");
if (config_nsmem(fdt))
panic("Failed to config non-secure memory");

ret = fdt_pack(fdt);
if (ret < 0) {
Expand Down
156 changes: 153 additions & 3 deletions core/arch/arm/mm/core_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,15 +216,165 @@ static bool pbuf_is_special_mem(paddr_t pbuf, size_t len,
return false;
}

#ifdef CFG_DT
static void carve_out_phys_mem(struct core_mmu_phys_mem **mem, size_t *nelems,
paddr_t pa, size_t size)
{
struct core_mmu_phys_mem *m = *mem;
size_t n = 0;

while (true) {
if (n >= *nelems) {
DMSG("No need to carve out %#" PRIxPA " size %#zx",
pa, size);
return;
}
if (core_is_buffer_inside(pa, size, m[n].addr, m[n].size))
break;
if (!core_is_buffer_outside(pa, size, m[n].addr, m[n].size))
panic();
n++;
}

if (pa == m[n].addr && size == m[n].size) {
/* Remove this entry */
(*nelems)--;
memmove(m + n, m + n + 1, sizeof(*m) * (*nelems - n));
m = realloc(m, sizeof(*m) * *nelems);
if (!m)
panic();
*mem = m;
} else if (pa == m[n].addr) {
m[n].addr += size;
} else if ((pa + size) == (m[n].addr + m[n].size)) {
m[n].size -= size;
} else {
/* Need to split the memory entry */
m = realloc(m, sizeof(*m) * (*nelems + 1));
if (!m)
panic();
*mem = m;
memmove(m + n + 1, m + n, sizeof(*m) * (*nelems - n));
(*nelems)++;
m[n].size = pa - m[n].addr;
m[n + 1].size -= size + m[n].size;
m[n + 1].addr = pa + size;
}
}

static void check_phys_mem_is_outside(struct core_mmu_phys_mem *start,
size_t nelems,
struct tee_mmap_region *map)
{
size_t n;

for (n = 0; n < nelems; n++) {
if (!core_is_buffer_outside(start[n].addr, start[n].size,
map->pa, map->size)) {
EMSG(
"Non-sec mem (%#" PRIxPA ":%#zx) overlaps map (type %d %#" PRIxPA ":%#zx)",
start[n].addr, start[n].size,
map->type, map->pa, map->size);
panic();
}
}
}

static const struct core_mmu_phys_mem *discovered_nsec_ddr_start;
static size_t discovered_nsec_ddr_nelems;

static int cmp_pmem_by_addr(const void *a, const void *b)
{
return ((const struct core_mmu_phys_mem *)a)->addr -
((const struct core_mmu_phys_mem *)b)->addr;
}

void core_mmu_set_discovered_nsec_ddr(struct core_mmu_phys_mem *start,
size_t nelems)
{
struct core_mmu_phys_mem *m = start;
size_t num_elems = nelems;
struct tee_mmap_region *map = static_memory_map;
const struct core_mmu_phys_mem __maybe_unused *pmem;

assert(!discovered_nsec_ddr_start);
assert(m && num_elems);

qsort(m, num_elems, sizeof(*m), cmp_pmem_by_addr);

/*
* Non-secure shared memory and also secure data
* path memory are supposed to reside inside
* non-secure memory. Since NSEC_SHM and SDP_MEM
* are used for a specific purpose make holes for
* those memory in the normal non-secure memory.
*
* This has to be done since for instance QEMU
* isn't aware of which memory range in the
* non-secure memory is used for NSEC_SHM.
*/

#ifdef CFG_SECURE_DATA_PATH
for (pmem = &__start_phys_sdp_mem_section;
pmem < &__end_phys_sdp_mem_section; pmem++)
carve_out_phys_mem(&m, &num_elems, pmem->addr, pmem->size);
#endif

for (map = static_memory_map; core_mmap_is_end_of_table(map); map++) {
if (map->type == MEM_AREA_NSEC_SHM)
carve_out_phys_mem(&m, &num_elems, map->pa, map->size);
else
check_phys_mem_is_outside(m, num_elems, map);
}

discovered_nsec_ddr_start = m;
discovered_nsec_ddr_nelems = num_elems;
}

static bool get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start,
const struct core_mmu_phys_mem **end)
{
if (!discovered_nsec_ddr_start)
return false;

*start = discovered_nsec_ddr_start;
*end = discovered_nsec_ddr_start + discovered_nsec_ddr_nelems;

return true;
}
#else /*!CFG_DT*/
static bool
get_discovered_nsec_ddr(const struct core_mmu_phys_mem **start __unused,
const struct core_mmu_phys_mem **end __unused)
{
return false;
}
#endif /*!CFG_DT*/

static bool pbuf_is_nsec_ddr(paddr_t pbuf, size_t len)
{
return pbuf_is_special_mem(pbuf, len, &__start_phys_nsec_ddr_section,
&__end_phys_nsec_ddr_section);
const struct core_mmu_phys_mem *start;
const struct core_mmu_phys_mem *end;

if (!get_discovered_nsec_ddr(&start, &end)) {
start = &__start_phys_nsec_ddr_section;
end = &__end_phys_nsec_ddr_section;
}

return pbuf_is_special_mem(pbuf, len, start, end);
}

bool core_mmu_nsec_ddr_is_defined(void)
{
return &__start_phys_nsec_ddr_section != &__end_phys_nsec_ddr_section;
const struct core_mmu_phys_mem *start;
const struct core_mmu_phys_mem *end;

if (!get_discovered_nsec_ddr(&start, &end)) {
start = &__start_phys_nsec_ddr_section;
end = &__end_phys_nsec_ddr_section;
}

return start != end;
}

#define MSG_MEM_INSTERSECT(pa1, sz1, pa2, sz2) \
Expand Down

0 comments on commit 3a702c4

Please sign in to comment.