diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index ee95d47e1e3..52366417ba6 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -333,7 +334,9 @@ static void init_asan(void) #ifdef CFG_WITH_PAGER asan_tag_access(__pageable_start, __pageable_end); #endif /*CFG_WITH_PAGER*/ +#ifndef CFG_DYN_CONFIG asan_tag_access(__nozi_start, __nozi_end); +#endif #ifdef ARM32 asan_tag_access(__exidx_start, __exidx_end); asan_tag_access(__extab_start, __extab_end); @@ -509,12 +512,6 @@ static void init_pager_runtime(unsigned long pageable_part) assert(hashes); asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); - /* - * The pager is about the be enabled below, eventual temporary boot - * memory allocation must be removed now. - */ - boot_mem_release_tmp_alloc(); - carve_out_asan_mem(); mm = nex_phys_mem_ta_alloc(pageable_size); @@ -879,6 +876,15 @@ static bool add_padding_to_pool(vaddr_t va, size_t len, void *ptr __unused) return true; } +static bool calc_padding_size(vaddr_t va __unused, size_t len, void *ptr) +{ + size_t *tot_size = ptr; + + if (len >= MALLOC_INITIAL_POOL_MIN_SIZE) + (*tot_size) += len; + return false; /* don't consume */ +} + static void init_primary(unsigned long pageable_part) { vaddr_t va = 0; @@ -906,6 +912,7 @@ static void init_primary(unsigned long pageable_part) * every virtual partition separately. Core code uses nex_malloc * instead. */ +#ifndef CFG_DYN_CONFIG #ifdef CFG_WITH_PAGER /* Add heap2 first as heap1 may be too small as initial bget pool */ malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); @@ -915,13 +922,35 @@ static void init_primary(unsigned long pageable_part) __nex_heap_start); #else malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); +#endif #endif IMSG_RAW("\n"); if (IS_ENABLED(CFG_DYN_CONFIG)) { - size_t sz = sizeof(struct thread_core_local) * - CFG_TEE_CORE_NB_CORE; - void *p = boot_mem_alloc(sz, alignof(void *) * 2); + size_t tot_padding_sz = 0; + void *p = NULL; + size_t sz = 0; + + if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) + sz = CFG_CORE_NEX_HEAP_SIZE; + else + sz = CFG_CORE_HEAP_SIZE; + /* + * thread_core_local and threads are allocated from the + * heap so add the needed sizes to the initial heap. + */ + sz += sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE; + if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) + sz += sizeof(struct thread_ctx) * CFG_NUM_THREADS; + + boot_mem_foreach_padding(calc_padding_size, &tot_padding_sz); + if (tot_padding_sz > sz - MALLOC_INITIAL_POOL_MIN_SIZE) + sz = MALLOC_INITIAL_POOL_MIN_SIZE; + else + sz -= tot_padding_sz; + sz = ROUNDUP(sz, alignof(void *) * 2); + p = boot_mem_alloc(sz, alignof(void *) * 2); + boot_mem_foreach_padding(add_padding_to_pool, NULL); #ifdef CFG_NS_VIRTUALIZATION nex_malloc_add_pool(p, sz); #else @@ -932,15 +961,6 @@ static void init_primary(unsigned long pageable_part) core_mmu_save_mem_map(); core_mmu_init_phys_mem(); boot_mem_foreach_padding(add_padding_to_pool, NULL); - va = boot_mem_release_unused(); - if (!IS_ENABLED(CFG_WITH_PAGER)) { - /* - * We must update boot_cached_mem_end to reflect the memory - * just unmapped by boot_mem_release_unused(). - */ - assert(va && va <= boot_cached_mem_end); - boot_cached_mem_end = va; - } if (IS_ENABLED(CFG_DYN_CONFIG)) { /* @@ -962,9 +982,31 @@ static void init_primary(unsigned long pageable_part) * init_runtime()). */ thread_get_core_local()->curr_thread = 0; + if (IS_ENABLED(CFG_DYN_CONFIG)) { + threads = calloc(1, sizeof(*threads)); + if (!threads) + panic(); + thread_count = 1; + } init_pager_runtime(pageable_part); } + va = boot_mem_release_unused(); + if (IS_ENABLED(CFG_WITH_PAGER)) { + /* + * Paging is activated, and anything beyond the start of + * the released unused memory is managed by the pager. + */ + assert(va && va <= core_mmu_linear_map_end); + core_mmu_linear_map_end = va; + } + /* + * We must update boot_cached_mem_end to reflect the memory + * just unmapped by boot_mem_release_unused(). + */ + assert(va && va <= boot_cached_mem_end); + boot_cached_mem_end = va; + /* Initialize canaries around the stacks */ thread_init_canaries(); thread_init_per_cpu(); @@ -1083,9 +1125,7 @@ void __weak boot_init_primary_runtime(void) ~THREAD_EXCP_NATIVE_INTR); init_tee_runtime(); } - - if (!IS_ENABLED(CFG_WITH_PAGER)) - boot_mem_release_tmp_alloc(); + boot_mem_release_tmp_alloc(); } void __weak boot_init_primary_final(void) diff --git a/core/arch/arm/kernel/entry_a32.S b/core/arch/arm/kernel/entry_a32.S index 9dd1cbe05c0..0ae32b93121 100644 --- a/core/arch/arm/kernel/entry_a32.S +++ b/core/arch/arm/kernel/entry_a32.S @@ -451,8 +451,12 @@ shadow_stack_access_ok: #endif #if defined(CFG_DYN_CONFIG) +#ifdef CFG_WITH_PAGER + ldr r0, =__vcore_free_end +#else ldr r0, =boot_embdata_ptr ldr r0, [r0] +#endif sub r1, r0, #THREAD_BOOT_INIT_TMP_ALLOC /* Clear the allocated struct thread_core_local */ @@ -534,23 +538,20 @@ shadow_stack_access_ok: bl boot_save_args add sp, sp, #(2 * 4) + ldr r0, =__vcore_free_start + ldr r2, =__vcore_free_end #ifdef CFG_WITH_PAGER - ldr r0, =__init_end /* pointer to boot_embdata */ - ldr r1, [r0] /* struct boot_embdata::total_len */ - add r0, r0, r1 - mov_imm r1, 0xfff - add r0, r0, r1 /* round up */ - bic r0, r0, r1 /* to next page */ - mov_imm r1, (TEE_RAM_PH_SIZE + TEE_RAM_START) - mov r2, r1 +#ifdef CFG_DYN_CONFIG + sub r1, r2, #THREAD_BOOT_INIT_TMP_ALLOC +#else + mov r1, r2 +#endif #else - ldr r0, =__vcore_free_start ldr r1, =boot_embdata_ptr ldr r1, [r1] #ifdef CFG_DYN_CONFIG sub r1, r1, #THREAD_BOOT_INIT_TMP_ALLOC #endif - ldr r2, =__vcore_free_end #endif bl boot_mem_init diff --git a/core/arch/arm/kernel/entry_a64.S b/core/arch/arm/kernel/entry_a64.S index 5822a33b84a..bc51b5fdcb5 100644 --- a/core/arch/arm/kernel/entry_a64.S +++ b/core/arch/arm/kernel/entry_a64.S @@ -316,8 +316,12 @@ clear_nex_bss: * Point SP_EL1 a temporary struct thread_core_local before the * temporary stack. */ +#ifdef CFG_WITH_PAGER + adr_l x0, __vcore_free_end +#else adr_l x0, boot_embdata_ptr ldr x0, [x0] +#endif sub x1, x0, #THREAD_BOOT_INIT_TMP_ALLOC /* Clear the allocated struct thread_core_local */ @@ -388,22 +392,20 @@ clear_nex_bss: mov x4, xzr bl boot_save_args + adr_l x0, __vcore_free_start + adr_l x2, __vcore_free_end #ifdef CFG_WITH_PAGER - adr_l x0, __init_end /* pointer to boot_embdata */ - ldr w1, [x0] /* struct boot_embdata::total_len */ - add x0, x0, x1 - add x0, x0, #0xfff /* round up */ - bic x0, x0, #0xfff /* to next page */ - mov_imm x1, (TEE_RAM_PH_SIZE + TEE_RAM_START) - mov x2, x1 +#ifdef CFG_DYN_CONFIG + sub x1, x2, #THREAD_BOOT_INIT_TMP_ALLOC +#else + mov x1, x2 +#endif #else - adr_l x0, __vcore_free_start adr_l x1, boot_embdata_ptr ldr x1, [x1] #ifdef CFG_DYN_CONFIG sub x1, x1, #THREAD_BOOT_INIT_TMP_ALLOC #endif - adr_l x2, __vcore_free_end; #endif bl boot_mem_init diff --git a/core/arch/arm/kernel/kern.ld.S b/core/arch/arm/kernel/kern.ld.S index 39d82803dac..602e50017ee 100644 --- a/core/arch/arm/kernel/kern.ld.S +++ b/core/arch/arm/kernel/kern.ld.S @@ -49,6 +49,8 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include +#include #include #include #include @@ -187,6 +189,7 @@ SECTIONS __nex_bss_end = .; } +#ifndef CFG_DYN_CONFIG /* * We want to keep all nexus memory in one place, because * it should be always mapped and it is easier to map one @@ -212,6 +215,7 @@ SECTIONS ASSERT(!(ABSOLUTE(.) & (16 * 1024 - 1)), "align nozi to 16kB"); KEEP(*(.nozi.mmu.base_table .nozi.mmu.l2)) } +#endif . = ALIGN(SMALL_PAGE_SIZE); @@ -242,6 +246,12 @@ SECTIONS __bss_end = .; } +#if defined(CFG_DYN_CONFIG) && !defined(CFG_WITH_PAGER) + . = ALIGN(SMALL_PAGE_SIZE); + __flatmap_free_start = .; +#endif + +#ifndef CFG_DYN_CONFIG .heap1 (NOLOAD) : { /* * We're keeping track of the padding added before the @@ -279,11 +289,7 @@ SECTIONS . = ALIGN(8); __nozi_stack_end = .; } -#ifndef CFG_WITH_PAGER - . = ALIGN(SMALL_PAGE_SIZE); - __flatmap_free_start = .; - __flatmap_unpg_rw_size = __flatmap_free_start - __flatmap_unpg_rw_start; -#else +#ifdef CFG_WITH_PAGER .heap2 (NOLOAD) : { __heap2_start = .; /* @@ -295,6 +301,53 @@ SECTIONS . = ALIGN(SMALL_PAGE_SIZE); __heap2_end = .; } +#endif +#ifndef CFG_WITH_PAGER + . = ALIGN(SMALL_PAGE_SIZE); + __flatmap_free_start = .; +#endif +#endif /*!CFG_DYN_CONFIG*/ + +#ifdef CFG_WITH_PAGER + /* + * This memory is used by the boot_mem*() functions during boot. + * Enough memory must be carved out to support the worst case + * memory allocation, but the remaining unused memory will be + * returned to be managed by the pager at end of boot. The tradeoff + * is that the init code follows this section and must also fit in + * the physical memory. + */ + .pager_boot_mem (NOLOAD): { +#ifdef CFG_DYN_CONFIG +#ifdef CFG_WITH_LPAE + XLAT_TABLE_SIZE = SMALL_PAGE_SIZE; +#ifndef MAX_XLAT_TABLES + MAX_XLAT_TABLES = 3 /* ASLR_EXTRA */ + 5 /* TEE_EXTRA */; +#endif + /* Base tables */ + . += CORE_MMU_BASE_TABLE_OFFSET * CFG_TEE_CORE_NB_CORE; + /* Per thread EL0 tables */ + . += CFG_NUM_THREADS * XLAT_TABLE_SIZE; +#else + XLAT_TABLE_SIZE = 1024; +#ifndef MAX_XLAT_TABLES + MAX_XLAT_TABLES = 6 + 2 /* ASLR_EXTRA */; +#endif + . += 16 * 1024; /* Main L1 table */ + . += 64 * 4 * CFG_NUM_THREADS; /* L1 table for TAs */ +#endif + . += MAX_XLAT_TABLES * XLAT_TABLE_SIZE; + . += CFG_CORE_HEAP_SIZE; + . += CFG_NUM_THREADS * THREAD_CTX_SIZE; + . += CFG_TEE_CORE_NB_CORE * THREAD_CORE_LOCAL_SIZE; + . += THREAD_BOOT_INIT_TMP_ALLOC; +#else + . += SMALL_PAGE_SIZE; +#endif + . = ALIGN(SMALL_PAGE_SIZE); + } + __flatmap_free_start = ADDR(.pager_boot_mem); + __flatmap_free_size = SIZEOF(.pager_boot_mem); /* Start page aligned read-only memory */ __flatmap_unpg_rw_size = . - __flatmap_unpg_rw_start; @@ -376,15 +429,19 @@ SECTIONS "Load address before start of physical memory") ASSERT(TEE_LOAD_ADDR < (TEE_RAM_START + TEE_RAM_PH_SIZE), "Load address after end of physical memory") + ASSERT(TEE_RAM_START + TEE_RAM_PH_SIZE > __init_end, + "TEE_RAM_PH_SIZE too small") ASSERT((TEE_RAM_START + TEE_RAM_PH_SIZE - __init_end) > SMALL_PAGE_SIZE * 2 + (__pageable_end - __pageable_start) / 4096 * 32 + SIZEOF(.rel) / 2 + SIZEOF(.rela) / 3 , "Too few free pages to initialize paging") - - #endif /*CFG_WITH_PAGER*/ +#ifndef CFG_WITH_PAGER + __flatmap_free_start = .; +#endif + #ifdef CFG_CORE_SANITIZE_KADDRESS /* * Guard against moving the location counter backwards in the assignment @@ -426,6 +483,7 @@ SECTIONS #ifndef CFG_CORE_SANITIZE_KADDRESS __flatmap_free_size = _end_of_ram - __flatmap_free_start; #endif + __flatmap_unpg_rw_size = __flatmap_free_start - __flatmap_unpg_rw_start; #endif /* @@ -476,11 +534,9 @@ __vcore_unpg_rw_start = __flatmap_unpg_rw_start; __vcore_unpg_rw_size = __flatmap_unpg_rw_size; __vcore_unpg_rw_end = __vcore_unpg_rw_start + __vcore_unpg_rw_size; -#ifndef CFG_WITH_PAGER __vcore_free_start = __flatmap_free_start; __vcore_free_size = __flatmap_free_size; __vcore_free_end = __flatmap_free_start + __flatmap_free_size; -#endif #ifdef CFG_NS_VIRTUALIZATION /* Nexus read-write memory */ diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c index 24888e2117c..b105b05b2fe 100644 --- a/core/arch/arm/kernel/thread.c +++ b/core/arch/arm/kernel/thread.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -45,6 +46,11 @@ static size_t thread_user_kcode_size __nex_bss; #if defined(CFG_CORE_UNMAP_CORE_AT_EL0) && \ defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) long thread_user_kdata_sp_offset __nex_bss; +#ifdef CFG_DYN_CONFIG +static uint8_t *thread_user_kdata_page __nex_bss; +static size_t thread_user_kdata_page_size __nex_bss; +static struct mobj *thread_user_kdata_page_mobj __nex_bss; +#else static uint8_t thread_user_kdata_page[ ROUNDUP(sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE, SMALL_PAGE_SIZE)] @@ -54,6 +60,9 @@ static uint8_t thread_user_kdata_page[ #else __section(".nex_nozi.kdata_page"); #endif +static size_t thread_user_kdata_page_size __nex_data = + sizeof(thread_user_kdata_page); +#endif #endif #ifdef ARM32 @@ -559,8 +568,31 @@ set_core_local_kcode_offset(struct thread_core_local *cls, long offset) cls[n].kcode_offset = offset; } +static struct mobj __maybe_unused *alloc_kdata_page(size_t count) +{ +#ifdef CFG_WITH_PAGER + uint8_t *p = tee_pager_alloc(count * SMALL_PAGE_SIZE); + size_t n = 0; + + if (!p) + panic(); + /* Make sure all pages are locked in memory */ + for (n = 0; n < count; n++) + p[n * SMALL_PAGE_SIZE] = 0; + + return mobj_phys_alloc_flags((vaddr_t)p, virt_to_phys(p), + count * SMALL_PAGE_SIZE, MEM_AREA_TEE_RAM, + CORE_MEM_TEE_RAM, MAF_NULL); +#else + return mobj_page_alloc(count, MAF_NEX | MAF_CORE_MEM | MAF_ZERO_INIT); +#endif +} + static void init_user_kcode(void) { + __maybe_unused struct mobj *m = NULL; + __maybe_unused size_t c = 0; + #ifdef CFG_CORE_UNMAP_CORE_AT_EL0 vaddr_t v = (vaddr_t)thread_excp_vect; vaddr_t ve = (vaddr_t)thread_excp_vect_end; @@ -575,6 +607,20 @@ static void init_user_kcode(void) set_core_local_kcode_offset(thread_core_local, thread_user_kcode_offset); #if defined(CFG_CORE_WORKAROUND_SPECTRE_BP_SEC) && defined(ARM64) +#ifdef CFG_DYN_CONFIG + assert(!thread_user_kdata_page_size && !thread_user_kdata_page && + !thread_user_kdata_page_mobj); + c = ROUNDUP_DIV(sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE, + SMALL_PAGE_SIZE); + m = alloc_kdata_page(c); + if (!m) + panic(); + thread_user_kdata_page = mobj_get_va(m, 0, c * SMALL_PAGE_SIZE); + if (!thread_user_kdata_page) + panic(); + thread_user_kdata_page_size = c * SMALL_PAGE_SIZE; + thread_user_kdata_page_mobj = m; +#endif set_core_local_kcode_offset((void *)thread_user_kdata_page, thread_user_kcode_offset); /* @@ -1056,10 +1102,15 @@ void thread_get_user_kdata(struct mobj **mobj, size_t *offset, core_mmu_get_user_va_range(&v, NULL); *va = v + thread_user_kcode_size; +#ifdef CFG_DYN_CONFIG + *mobj = thread_user_kdata_page_mobj; + *offset = 0; +#else *mobj = mobj_tee_ram_rw; - *sz = sizeof(thread_user_kdata_page); *offset = (vaddr_t)thread_user_kdata_page - (vaddr_t)mobj_get_va(*mobj, 0, *sz); +#endif + *sz = thread_user_kdata_page_size; } #endif diff --git a/core/arch/arm/kernel/virtualization.c b/core/arch/arm/kernel/virtualization.c index e0f390ad410..6b1ec679b89 100644 --- a/core/arch/arm/kernel/virtualization.c +++ b/core/arch/arm/kernel/virtualization.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -299,11 +300,29 @@ static TEE_Result alloc_gsd(struct guest_partition *prtn) return TEE_SUCCESS; } + +static size_t add_heap_pool(void) +{ +#ifdef CFG_DYN_CONFIG + static uint8_t heap_pool[SMALL_PAGE_SIZE] __aligned(sizeof(long) * 2); + void *buf = heap_pool; + size_t sz = sizeof(heap_pool); +#else + void *buf = __heap1_start; + size_t sz = __heap1_end - __heap1_start; +#endif + + malloc_add_pool(buf, sz); + + return sz; +} + TEE_Result virt_guest_created(uint16_t guest_id) { struct guest_partition *prtn = NULL; TEE_Result res = TEE_SUCCESS; uint32_t exceptions = 0; + size_t heap_sz = 0; if (guest_id == HYP_CLNT_ID) return TEE_ERROR_BAD_PARAMETERS; @@ -325,7 +344,7 @@ TEE_Result virt_guest_created(uint16_t guest_id) set_current_prtn(prtn); - malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); + heap_sz = add_heap_pool(); /* * The TA memory is registered in the core pool to allow it to be * used for both core and TA physical memory allocations. @@ -333,6 +352,19 @@ TEE_Result virt_guest_created(uint16_t guest_id) phys_mem_init(tee_mm_get_smem(prtn->ta_ram), tee_mm_get_bytes(prtn->ta_ram), 0, 0); page_alloc_init(); + if (IS_ENABLED(CFG_DYN_CONFIG)) { + size_t sz = CFG_CORE_HEAP_SIZE; + vaddr_t va = 0; + + sz += sizeof(struct thread_ctx) * CFG_NUM_THREADS; + sz -= heap_sz; + sz = ROUNDUP(sz, SMALL_PAGE_SIZE); + va = virt_page_alloc(sz / SMALL_PAGE_SIZE, + MAF_CORE_MEM | MAF_ZERO_INIT); + if (!va) + goto err_unset_prtn; + malloc_add_pool((void *)va, sz); + } /* Initialize threads */ thread_init_threads(CFG_NUM_THREADS); /* Do the preinitcalls */ @@ -349,6 +381,9 @@ TEE_Result virt_guest_created(uint16_t guest_id) return TEE_SUCCESS; +err_unset_prtn: + set_current_prtn(NULL); + core_mmu_set_default_prtn(); err_free_gsd: destroy_gsd(prtn, true /*free_only*/); err_free_prtn: diff --git a/core/arch/arm/mm/core_mmu_lpae.c b/core/arch/arm/mm/core_mmu_lpae.c index 7fa13d7c8c5..ca1727bc0ed 100644 --- a/core/arch/arm/mm/core_mmu_lpae.c +++ b/core/arch/arm/mm/core_mmu_lpae.c @@ -74,6 +74,7 @@ #include #include #include +#include #include #include #include @@ -643,40 +644,62 @@ void core_mmu_set_default_prtn_tbl(void) } #endif +static void *alloc_table_from_phys_mem(struct mmu_partition *prtn) +{ + tee_mm_entry_t *mm = NULL; + paddr_t pa = 0; + + /* + * The default_partition only has a physical memory + * pool for the nexus when virtualization is + * enabled. We should use the nexus physical memory + * pool if we're allocating memory for another + * partition than our own. + */ + if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && + (prtn == &default_partition || + prtn != get_prtn())) { + mm = nex_phys_mem_core_alloc(XLAT_TABLE_SIZE); + if (!mm) + EMSG("Phys nex mem exhausted"); + } else { + mm = phys_mem_core_alloc(XLAT_TABLE_SIZE); + if (!mm) + EMSG("Phys mem exhausted"); + } + if (!mm) + return NULL; + pa = tee_mm_get_smem(mm); + + return phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL, XLAT_TABLE_SIZE); +} + +static void *alloc_table_from_pager(void) +{ +#ifdef CFG_WITH_PAGER + uint8_t *p = tee_pager_alloc(XLAT_TABLE_SIZE); + + /* Dereference the pointer to map a physical page now. */ + if (p) + *p = 0; + else + EMSG("Pager mem exhausted"); + return p; +#else + return NULL; +#endif +} + static uint64_t *core_mmu_xlat_table_alloc(struct mmu_partition *prtn) { uint64_t *new_table = NULL; if (IS_ENABLED(CFG_DYN_CONFIG)) { if (cpu_mmu_enabled()) { - tee_mm_entry_t *mm = NULL; - paddr_t pa = 0; - - /* - * The default_partition only has a physical memory - * pool for the nexus when virtualization is - * enabled. We should use the nexus physical memory - * pool if we're allocating memory for another - * partition than our own. - */ - if (IS_ENABLED(CFG_NS_VIRTUALIZATION) && - (prtn == &default_partition || - prtn != get_prtn())) { - mm = nex_phys_mem_core_alloc(XLAT_TABLE_SIZE); - if (!mm) - EMSG("Phys nex mem exhausted"); - } else { - mm = phys_mem_core_alloc(XLAT_TABLE_SIZE); - if (!mm) - EMSG("Phys mem exhausted"); - } - if (!mm) - return NULL; - pa = tee_mm_get_smem(mm); - - new_table = phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL, - XLAT_TABLE_SIZE); - assert(new_table); + if (IS_ENABLED(CFG_WITH_PAGER)) + new_table = alloc_table_from_pager(); + else + new_table = alloc_table_from_phys_mem(prtn); } else { new_table = boot_mem_alloc(XLAT_TABLE_SIZE, XLAT_TABLE_SIZE); diff --git a/core/arch/arm/mm/core_mmu_v7.c b/core/arch/arm/mm/core_mmu_v7.c index a9227875c86..cc26708cfd2 100644 --- a/core/arch/arm/mm/core_mmu_v7.c +++ b/core/arch/arm/mm/core_mmu_v7.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -277,6 +278,37 @@ static paddr_t core_mmu_get_ul1_ttb_pa(struct mmu_partition *prtn) return pa; } +static void *alloc_table_from_phys_mem(void) +{ + tee_mm_entry_t *mm = NULL; + paddr_t pa = 0; + + mm = phys_mem_core_alloc(SMALL_PAGE_SIZE); + if (!mm) { + EMSG("Phys mem exhausted"); + return NULL; + } + pa = tee_mm_get_smem(mm); + + return phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL, SMALL_PAGE_SIZE); +} + +static void *alloc_table_from_pager(void) +{ +#ifdef CFG_WITH_PAGER + uint8_t *p = tee_pager_alloc(SMALL_PAGE_SIZE); + + /* Dereference the pointer to map a physical page now. */ + if (p) + *p = 0; + else + EMSG("Pager mem exhausted"); + return p; +#else + return NULL; +#endif +} + static uint32_t *alloc_l2_table(struct mmu_partition *prtn) { uint32_t *new_table = NULL; @@ -290,18 +322,11 @@ static uint32_t *alloc_l2_table(struct mmu_partition *prtn) if (prtn->last_l2_page) goto dyn_out; if (cpu_mmu_enabled()) { - tee_mm_entry_t *mm = NULL; - paddr_t pa = 0; - - mm = phys_mem_core_alloc(SMALL_PAGE_SIZE); - if (!mm) { - EMSG("Phys mem exhausted"); - return NULL; - } - pa = tee_mm_get_smem(mm); - - p = phys_to_virt(pa, MEM_AREA_SEC_RAM_OVERALL, - SMALL_PAGE_SIZE); + if (IS_ENABLED(CFG_WITH_PAGER)) + p = alloc_table_from_pager(); + else + p = alloc_table_from_phys_mem(); + assert(p); } else { p = boot_mem_alloc(SMALL_PAGE_SIZE, SMALL_PAGE_SIZE); diff --git a/core/arch/arm/mm/tee_pager.c b/core/arch/arm/mm/tee_pager.c index 3897cd28ab9..fe33f559714 100644 --- a/core/arch/arm/mm/tee_pager.c +++ b/core/arch/arm/mm/tee_pager.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -226,38 +227,48 @@ static void pager_unlock(uint32_t exceptions) void *tee_pager_phys_to_virt(paddr_t pa, size_t len) { - struct core_mmu_table_info ti; - unsigned idx; - uint32_t a; - paddr_t p; - vaddr_t v; - size_t n; + struct core_mmu_table_info ti = { }; + unsigned long map_offs = 0; + unsigned int page_offs = 0; + unsigned int idx = 0; + uint32_t a = 0; + paddr_t p = 0; + vaddr_t v = 0; + size_t n = 0; + + if (IS_ENABLED(CFG_CORE_ASLR)) + map_offs = boot_mmu_config.map_offset; + + page_offs = pa & SMALL_PAGE_MASK; + pa &= ~(paddr_t)SMALL_PAGE_MASK; - if (pa & SMALL_PAGE_MASK || len > SMALL_PAGE_SIZE) + if (len > SMALL_PAGE_SIZE || len + page_offs > SMALL_PAGE_SIZE) return NULL; /* - * Most addresses are mapped lineary, try that first if possible. + * Most addresses are mapped linearly (+ map_offs, with ASLR). Try + * that first, if possible. */ - if (!tee_pager_get_table_info(pa, &ti)) + if (!tee_pager_get_table_info(pa + map_offs, &ti)) return NULL; /* impossible pa */ - idx = core_mmu_va2idx(&ti, pa); + idx = core_mmu_va2idx(&ti, pa + map_offs); core_mmu_get_entry(&ti, idx, &p, &a); if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) - return (void *)core_mmu_idx2va(&ti, idx); + return (void *)(core_mmu_idx2va(&ti, idx) + page_offs); n = 0; - idx = core_mmu_va2idx(&pager_tables[n].tbl_info, TEE_RAM_START); + idx = core_mmu_va2idx(&pager_tables[n].tbl_info, + TEE_RAM_START + map_offs); while (true) { while (idx < TBL_NUM_ENTRIES) { v = core_mmu_idx2va(&pager_tables[n].tbl_info, idx); - if (v >= (TEE_RAM_START + TEE_RAM_VA_SIZE)) + if (v >= (TEE_RAM_START + TEE_RAM_VA_SIZE + map_offs)) return NULL; core_mmu_get_entry(&pager_tables[n].tbl_info, idx, &p, &a); if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) - return (void *)v; + return (void *)(v + page_offs); idx++; } diff --git a/core/include/kernel/linker.h b/core/include/kernel/linker.h index 5a2c917ed0e..f68a7d0a512 100644 --- a/core/include/kernel/linker.h +++ b/core/include/kernel/linker.h @@ -46,17 +46,10 @@ extern const uint8_t __extab_end[]; #define VCORE_START_VA ((vaddr_t)__text_start) -#ifndef CFG_WITH_PAGER #define VCORE_FREE_PA ((unsigned long)__vcore_free_start) #define VCORE_FREE_SZ ((size_t)(__vcore_free_end - \ __vcore_free_start)) #define VCORE_FREE_END_PA ((unsigned long)__vcore_free_end) -#else -/* No VCORE_FREE range in pager configuration since it uses all memory */ -#define VCORE_FREE_PA PADDR_MAX -#define VCORE_FREE_SZ 0 -#define VCORE_FREE_END_PA PADDR_MAX -#endif #define EMIT_SECTION_INFO_SYMBOLS(section_name) \ extern const uint8_t __vcore_ ## section_name ## _start[]; \ diff --git a/core/include/kernel/user_access.h b/core/include/kernel/user_access.h index 54b3bfd9a7b..b2b390a4589 100644 --- a/core/include/kernel/user_access.h +++ b/core/include/kernel/user_access.h @@ -157,6 +157,12 @@ TEE_Result bb_memdup_user_private(const void *src, size_t len, void **p); TEE_Result bb_strndup_user(const char *src, size_t maxlen, char **dst, size_t *dstlen); +#ifdef CFG_WITH_USER_TA +void uref_base_init(vaddr_t va); +#else +static inline void uref_base_init(vaddr_t va __unused) { } +#endif + TEE_Result copy_kaddr_to_uref(uint32_t *uref, void *kaddr); uint32_t kaddr_to_uref(void *kaddr); diff --git a/core/include/mm/core_mmu.h b/core/include/mm/core_mmu.h index e5db0b62315..723e003820f 100644 --- a/core/include/mm/core_mmu.h +++ b/core/include/mm/core_mmu.h @@ -289,6 +289,8 @@ extern unsigned long default_nsec_shm_paddr; extern unsigned long default_nsec_shm_size; #endif +extern vaddr_t core_mmu_linear_map_end; + /* * Physical load address of OP-TEE updated during boot if needed to reflect * the value used. diff --git a/core/include/mm/mobj.h b/core/include/mm/mobj.h index 452715817c9..44355223ee7 100644 --- a/core/include/mm/mobj.h +++ b/core/include/mm/mobj.h @@ -227,6 +227,9 @@ static inline bool mobj_check_offset_and_len(struct mobj *mobj, size_t offset, struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t cattr, enum buf_is_attr battr); +struct mobj *mobj_phys_alloc_flags(vaddr_t va, paddr_t pa, size_t size, + enum teecore_memtypes memtype, + enum buf_is_attr battr, uint32_t flags); #if defined(CFG_CORE_FFA) struct mobj *mobj_ffa_get_by_cookie(uint64_t cookie, diff --git a/core/include/mm/page_alloc.h b/core/include/mm/page_alloc.h index 9b92e2a3811..e8255aa7a8f 100644 --- a/core/include/mm/page_alloc.h +++ b/core/include/mm/page_alloc.h @@ -14,5 +14,6 @@ void nex_page_alloc_init(void); void page_alloc_init(void); vaddr_t virt_page_alloc(size_t count, uint32_t flags); +struct mobj *mobj_page_alloc(size_t count, uint32_t flags); #endif /*__MM_PAGE_ALLOC_H*/ diff --git a/core/kernel/thread.c b/core/kernel/thread.c index 2dbcbf3af3f..0f694a7d311 100644 --- a/core/kernel/thread.c +++ b/core/kernel/thread.c @@ -580,16 +580,26 @@ static void init_thread_stacks(void) } #endif /*CFG_WITH_PAGER*/ +static struct thread_ctx *swap_thread_ctx(struct thread_ctx *thr, size_t count) +{ + struct thread_ctx *t = threads; + + if (!thr) + panic(); + threads = thr; + thread_count = count; + + return t; +} +DECLARE_KEEP_PAGER(swap_thread_ctx); + void thread_init_threads(size_t count) { size_t n = 0; if (IS_ENABLED(CFG_DYN_CONFIG)) { assert(count <= CFG_NUM_THREADS); - threads = calloc(count, sizeof(*threads)); - if (!threads) - panic(); - thread_count = count; + free(swap_thread_ctx(calloc(count, sizeof(*threads)), count)); } else { assert(count == CFG_NUM_THREADS); } diff --git a/core/kernel/user_access.c b/core/kernel/user_access.c index 029330addbb..62c94b6ed8b 100644 --- a/core/kernel/user_access.c +++ b/core/kernel/user_access.c @@ -16,6 +16,8 @@ #define BB_ALIGNMENT (sizeof(long) * 2) +static vaddr_t uref_base; + static struct user_mode_ctx *get_current_uctx(void) { struct ts_session *s = ts_get_current_session(); @@ -295,6 +297,12 @@ TEE_Result bb_strndup_user(const char *src, size_t maxlen, char **dst, return TEE_SUCCESS; } +void uref_base_init(vaddr_t va) +{ + assert(!uref_base && va); + uref_base = va; +} + TEE_Result copy_kaddr_to_uref(uint32_t *uref, void *kaddr) { uint32_t ref = kaddr_to_uref(kaddr); @@ -308,14 +316,14 @@ uint32_t kaddr_to_uref(void *kaddr) unsigned int uref_tag_shift = 32 - MEMTAG_TAG_WIDTH; vaddr_t uref = memtag_strip_tag_vaddr(kaddr); - uref -= VCORE_START_VA; + uref -= uref_base; assert(uref < (UINT32_MAX >> MEMTAG_TAG_WIDTH)); uref |= (vaddr_t)memtag_get_tag(kaddr) << uref_tag_shift; return uref; } - assert(((vaddr_t)kaddr - VCORE_START_VA) < UINT32_MAX); - return (vaddr_t)kaddr - VCORE_START_VA; + assert(((vaddr_t)kaddr - uref_base) < UINT32_MAX); + return (vaddr_t)kaddr - uref_base; } vaddr_t uref_to_vaddr(uint32_t uref) @@ -325,8 +333,8 @@ vaddr_t uref_to_vaddr(uint32_t uref) unsigned int uref_tag_shift = 32 - MEMTAG_TAG_WIDTH; uint8_t tag = uref >> uref_tag_shift; - return memtag_insert_tag_vaddr(VCORE_START_VA + u, tag); + return memtag_insert_tag_vaddr(uref_base + u, tag); } - return VCORE_START_VA + uref; + return uref_base + uref; } diff --git a/core/mm/boot_mem.c b/core/mm/boot_mem.c index 7686b958ecd..3b43a89c0e7 100644 --- a/core/mm/boot_mem.c +++ b/core/mm/boot_mem.c @@ -14,6 +14,18 @@ #include #include +#ifdef CFG_WITH_PAGER +#include +#endif + +static inline void pager_add_pages(vaddr_t vaddr __maybe_unused, + size_t npages __maybe_unused) +{ +#ifdef CFG_WITH_PAGER + tee_pager_add_pages(vaddr, npages, true); +#endif +} + /* * struct boot_mem_reloc - Pointers relocated in memory during boot * @ptrs: Array of relocation @@ -44,6 +56,7 @@ struct boot_mem_padding { * @orig_mem_end: Boot memory start end address * @mem_start: Boot memory free space start address * @mem_end: Boot memory free space end address + * @final_mem_start: Final @mem_start before end of boot_mem_release_unused() * @reloc: Boot memory pointers requiring relocation * @padding: Linked list of unused memory between allocated blocks */ @@ -52,6 +65,7 @@ struct boot_mem_desc { vaddr_t orig_mem_end; vaddr_t mem_start; vaddr_t mem_end; + vaddr_t final_mem_start; struct boot_mem_reloc *reloc; struct boot_mem_padding *padding; }; @@ -66,7 +80,7 @@ static void *mem_alloc_tmp(struct boot_mem_desc *desc, size_t len, size_t align) align = MAX(align, ASAN_BLOCK_SIZE); assert(desc && desc->mem_start && desc->mem_end); - assert(IS_POWER_OF_TWO(align) && !(len % align)); + assert(IS_POWER_OF_TWO(align)); if (SUB_OVERFLOW(desc->mem_end, len, &va)) panic(); va = ROUNDDOWN2(va, align); @@ -107,9 +121,8 @@ static void *mem_alloc(struct boot_mem_desc *desc, size_t len, size_t align) if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) align = MAX(align, ASAN_BLOCK_SIZE); - runtime_assert(!IS_ENABLED(CFG_WITH_PAGER)); assert(desc && desc->mem_start && desc->mem_end); - assert(IS_POWER_OF_TWO(align) && !(len % align)); + assert(IS_POWER_OF_TWO(align)); va = ROUNDUP2(desc->mem_start, align); if (ADD_OVERFLOW(va, len, &ve)) panic(); @@ -299,8 +312,17 @@ vaddr_t boot_mem_release_unused(void) (size_t)(boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end), boot_mem_desc->mem_end); - if (IS_ENABLED(CFG_WITH_PAGER)) + va = ROUNDUP(boot_mem_desc->mem_start, SMALL_PAGE_SIZE); + tmp_va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE); + + if (IS_ENABLED(CFG_WITH_PAGER)) { + if (tmp_va > va) { + n = tmp_va - va; + DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va); + pager_add_pages(va, n / SMALL_PAGE_SIZE); + } goto out; + } pa = vaddr_to_phys(ROUNDUP(boot_mem_desc->orig_mem_start, SMALL_PAGE_SIZE)); @@ -308,9 +330,6 @@ vaddr_t boot_mem_release_unused(void) if (!mm) panic(); - va = ROUNDUP(boot_mem_desc->mem_start, SMALL_PAGE_SIZE); - - tmp_va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE); tmp_n = boot_mem_desc->orig_mem_end - tmp_va; tmp_pa = vaddr_to_phys(tmp_va); @@ -332,6 +351,7 @@ vaddr_t boot_mem_release_unused(void) core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE); out: + boot_mem_desc->final_mem_start = boot_mem_desc->mem_start; /* Stop further allocations. */ boot_mem_desc->mem_start = boot_mem_desc->mem_end; return va; @@ -340,6 +360,7 @@ vaddr_t boot_mem_release_unused(void) void boot_mem_release_tmp_alloc(void) { tee_mm_entry_t *mm = NULL; + vaddr_t tmp_va = 0; vaddr_t va = 0; paddr_t pa = 0; size_t n = 0; @@ -347,15 +368,21 @@ void boot_mem_release_tmp_alloc(void) assert(boot_mem_desc && boot_mem_desc->mem_start == boot_mem_desc->mem_end); + va = MAX(ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE), + ROUNDUP(boot_mem_desc->final_mem_start, SMALL_PAGE_SIZE)); if (IS_ENABLED(CFG_WITH_PAGER)) { - n = boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end; - va = boot_mem_desc->mem_end; + tmp_va = ROUNDDOWN(boot_mem_desc->orig_mem_end, + SMALL_PAGE_SIZE); + if (tmp_va > va) { + n = tmp_va - va; + DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va); + pager_add_pages(va, n / SMALL_PAGE_SIZE); + } + boot_mem_desc = NULL; - DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va); return; } - va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE); pa = vaddr_to_phys(va); mm = nex_phys_mem_mm_find(pa); diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index 6170ed580a4..7fda2d5c86d 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +45,14 @@ tee_mm_pool_t core_virt_mem_pool; /* Virtual memory pool for shared memory mappings */ tee_mm_pool_t core_virt_shm_pool; +#ifdef CFG_WITH_PAGER +/* this is synced with the generic linker file kern.ld.S */ +vaddr_t core_mmu_linear_map_end = (vaddr_t)__init_start; +#else +/* Not used, but needed to avoid a few #ifdefs */ +vaddr_t core_mmu_linear_map_end; +#endif + #ifdef CFG_CORE_PHYS_RELOCATABLE unsigned long core_mmu_tee_load_pa __nex_bss; #else @@ -1473,6 +1482,29 @@ static bool mem_map_add_id_map(struct memory_map *mem_map, return true; } +static vaddr_t get_uref_base(struct memory_map *mem_map) +{ + vaddr_t va = SIZE_MAX; + size_t n = 0; + + /* Find the lowest address possible for core data mappings */ + for (n = 0; n < mem_map->count; n++) { + switch (mem_map->map[n].type) { + case MEM_AREA_TEE_RAM: + case MEM_AREA_TEE_RAM_RX: + case MEM_AREA_TEE_DYN_VASPACE: + case MEM_AREA_NEX_DYN_VASPACE: + va = MIN(va, mem_map->map[n].va); + break; + default: + break; + } + } + + assert(va); + return va; +} + static struct memory_map *init_mem_map(struct memory_map *mem_map, unsigned long seed, unsigned long *ret_offs) @@ -1528,6 +1560,7 @@ static struct memory_map *init_mem_map(struct memory_map *mem_map, cmp_mmap_by_lower_va); dump_mmap_table(mem_map); + uref_base_init(get_uref_base(mem_map)); *ret_offs = offs; return mem_map; @@ -1598,12 +1631,7 @@ static void check_mem_map(struct memory_map *mem_map) */ void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg) { -#ifndef CFG_NS_VIRTUALIZATION - vaddr_t start = ROUNDDOWN((vaddr_t)__nozi_start, SMALL_PAGE_SIZE); -#else - vaddr_t start = ROUNDDOWN((vaddr_t)__vcore_nex_rw_start, - SMALL_PAGE_SIZE); -#endif + vaddr_t start = ROUNDDOWN((vaddr_t)__text_start, SMALL_PAGE_SIZE); #ifdef CFG_DYN_CONFIG vaddr_t len = ROUNDUP(VCORE_FREE_END_PA, SMALL_PAGE_SIZE) - start; #else @@ -2435,15 +2463,9 @@ void *core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len) } #ifdef CFG_WITH_PAGER -static vaddr_t get_linear_map_end_va(void) -{ - /* this is synced with the generic linker file kern.ld.S */ - return (vaddr_t)__heap2_end; -} - static paddr_t get_linear_map_end_pa(void) { - return get_linear_map_end_va() - boot_mmu_config.map_offset; + return core_mmu_linear_map_end - boot_mmu_config.map_offset; } #endif @@ -2674,16 +2696,25 @@ bool is_unpaged(const void *va) { vaddr_t v = (vaddr_t)va; - return v >= VCORE_START_VA && v < get_linear_map_end_va(); + return v >= VCORE_START_VA && v < core_mmu_linear_map_end; } #endif #ifdef CFG_NS_VIRTUALIZATION bool is_nexus(const void *va) { + struct tee_mmap_region *mm = NULL; vaddr_t v = (vaddr_t)va; - return v >= VCORE_START_VA && v < VCORE_NEX_RW_PA + VCORE_NEX_RW_SZ; + if (v >= VCORE_START_VA && v < VCORE_NEX_RW_PA + VCORE_NEX_RW_SZ) + return true; + + mm = find_map_by_va((void *)v); + if (mm && (mm->type == MEM_AREA_NEX_RAM_RW || + mm->type == MEM_AREA_NEX_DYN_VASPACE)) + return virt_to_phys((void *)v); /* it must be mapped */ + + return false; } #endif diff --git a/core/mm/mobj.c b/core/mm/mobj.c index 50862429755..5f8051b5871 100644 --- a/core/mm/mobj.c +++ b/core/mm/mobj.c @@ -33,6 +33,7 @@ struct mobj_phys { enum buf_is_attr battr; /* Defined by TEE_MATTR_MEM_TYPE_* in tee_mmu_types.h */ uint32_t mem_type; + uint32_t flags; /* MAF_* in malloc_flags.h */ vaddr_t va; paddr_t pa; }; @@ -109,7 +110,7 @@ static void mobj_phys_free(struct mobj *mobj) { struct mobj_phys *moph = to_mobj_phys(mobj); - free(moph); + free_flags(moph->flags, moph); } /* @@ -196,6 +197,40 @@ struct mobj *mobj_phys_alloc(paddr_t pa, size_t size, uint32_t mem_type, return mobj_phys_init(pa, size, mem_type, battr, area_type); } +struct mobj *mobj_phys_alloc_flags(vaddr_t va, paddr_t pa, size_t size, + enum teecore_memtypes memtype, + enum buf_is_attr battr, uint32_t flags) +{ + uint32_t f = (flags & MAF_NEX) | MAF_ZERO_INIT; + struct mobj_phys *m = NULL; + + if ((pa & CORE_MMU_USER_PARAM_MASK) || + (size & CORE_MMU_USER_PARAM_MASK)) { + DMSG("Expect %#x alignment", CORE_MMU_USER_PARAM_SIZE); + return NULL; + } + + /* Only SDP memory may not have a virtual address */ + if (!va && battr != CORE_MEM_SDP_MEM) + return NULL; + + m = malloc_flags(f, NULL, MALLOC_DEFAULT_ALIGNMENT, sizeof(*m)); + if (!m) + return NULL; + + m->flags = f; + m->battr = battr; + m->mem_type = (core_mmu_type_to_attr(memtype) >> + TEE_MATTR_MEM_TYPE_SHIFT) & TEE_MATTR_MEM_TYPE_MASK; + m->mobj.size = size; + m->mobj.ops = &mobj_phys_ops; + refcount_set(&m->mobj.refc, 1); + m->pa = pa; + m->va = va; + + return &m->mobj; +} + /* * mobj_virt implementation */ diff --git a/core/mm/page_alloc.c b/core/mm/page_alloc.c index dec5f0b3bb0..15d4e1b9fbb 100644 --- a/core/mm/page_alloc.c +++ b/core/mm/page_alloc.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -42,13 +43,14 @@ void page_alloc_init(void) MEM_AREA_TEE_DYN_VASPACE); } -vaddr_t virt_page_alloc(size_t count, uint32_t flags) +static vaddr_t page_alloc(size_t count, uint32_t flags, struct mobj **ret_mobj) { enum teecore_memtypes memtype = 0; TEE_Result res = TEE_SUCCESS; tee_mm_pool_t *pool = NULL; tee_mm_entry_t *mmv = NULL; tee_mm_entry_t *mmp = NULL; + struct mobj *mobj = NULL; size_t vcount = count; size_t pcount = count; vaddr_t va = 0; @@ -81,17 +83,46 @@ vaddr_t virt_page_alloc(size_t count, uint32_t flags) pa = tee_mm_get_smem(mmp); assert(pa); + if (ret_mobj) { + mobj = mobj_phys_alloc_flags(va, pa, pcount * SMALL_PAGE_SIZE, + memtype, CORE_MEM_TEE_RAM, flags); + if (!mobj) + goto err_mm_free; + } + res = core_mmu_map_contiguous_pages(va, pa, pcount, memtype); if (res) - goto err; + goto err_mobj_put; if (flags & MAF_ZERO_INIT) memset((void *)va, 0, pcount * SMALL_PAGE_SIZE); + if (ret_mobj) + *ret_mobj = mobj; + return va; -err: + +err_mobj_put: + mobj_put(mobj); +err_mm_free: tee_mm_free(mmp); err_free_mmv: tee_mm_free(mmv); return 0; } + +vaddr_t virt_page_alloc(size_t count, uint32_t flags) +{ + return page_alloc(count, flags, NULL); +} + +struct mobj *mobj_page_alloc(size_t count, uint32_t flags) +{ + struct mobj *m = NULL; + vaddr_t va = 0; + + va = page_alloc(count, flags, &m); + if (!va) + return NULL; + return m; +} diff --git a/core/mm/pgt_cache.c b/core/mm/pgt_cache.c index cd8f11fbc4c..eeabed6e40a 100644 --- a/core/mm/pgt_cache.c +++ b/core/mm/pgt_cache.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -405,6 +406,28 @@ void pgt_init(void) } } } +#elif defined(CFG_DYN_CONFIG) +/* Simple allocation of translation tables with virt_page_alloc() */ +void pgt_init(void) +{ + uint8_t *tbls = NULL; + size_t page_count = 0; + size_t n = 0; + + page_count = ROUNDUP_DIV(PGT_CACHE_SIZE * PGT_SIZE, SMALL_PAGE_SIZE); + tbls = (uint8_t *)virt_page_alloc(page_count, + MAF_CORE_MEM | MAF_ZERO_INIT); + if (!tbls) + panic(); + + for (n = 0; n < PGT_CACHE_SIZE; n++) { + struct pgt *p = pgt_entries + n; + + p->tbl = tbls + n * PGT_SIZE; + SLIST_INSERT_HEAD(&pgt_free_list, p, link); + } +} + #else /* Static allocation of translation tables */ void pgt_init(void) diff --git a/mk/config.mk b/mk/config.mk index 1b58b5ca8c6..73540e72e91 100644 --- a/mk/config.mk +++ b/mk/config.mk @@ -1310,12 +1310,8 @@ CFG_CORE_UNSAFE_MODEXP ?= n CFG_TA_MBEDTLS_UNSAFE_MODEXP ?= n # CFG_DYN_CONFIG, when enabled, use dynamic memory allocation for translation -# tables and stacks. Not supported with pager. -ifeq ($(CFG_WITH_PAGER),y) -$(call force,CFG_DYN_CONFIG,n,conflicts with CFG_WITH_PAGER) -else +# tables and stacks. CFG_DYN_CONFIG ?= y -endif # CFG_EXTERNAL_ABORT_PLAT_HANDLER is used to implement platform-specific # handling of external abort implementing the plat_external_abort_handler()