Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
425a394
core: boot_mem: remove multiple of align restriction
jenswi-linaro Jun 4, 2025
bbeaf8e
core: fix boot_mem_release_tmp_alloc()
jenswi-linaro May 19, 2025
86f682b
core: mm: update is_nexus()
jenswi-linaro May 19, 2025
c75487f
core: mm: add mobj_phys_alloc_flags()
jenswi-linaro May 19, 2025
56653c8
core: mm: add mobj_page_alloc()
jenswi-linaro May 19, 2025
77ca670
core: simplify core_init_mmu_map()
jenswi-linaro May 19, 2025
1cc1f02
core: mm: pgt_init(): use virt_page_alloc() if available
jenswi-linaro May 19, 2025
af90477
core: arm: init_user_kcode(): use mobj_page_alloc() if available
jenswi-linaro May 19, 2025
ef5dc1f
core: set uref_base for uref calculations
jenswi-linaro Jun 9, 2025
64b80b4
core: arm: allocate heap from physical memory pool
jenswi-linaro May 19, 2025
886471a
core: tee_pager_phys_to_virt(): support ASLR and sub-page lookups
jenswi-linaro May 19, 2025
ed7cfb6
core: arm: support allocating xlat table from pager
jenswi-linaro May 19, 2025
756bdec
core: arm: allocate kdata page from pager
jenswi-linaro May 19, 2025
2af0d1f
core: mm: core_mmu.c: add core_mmu_linear_map_end
jenswi-linaro May 19, 2025
d499ca2
core: thread_init_threads(): replace dummy thread ctx
jenswi-linaro May 19, 2025
bb2de53
core: add boot_mem section for pager
jenswi-linaro May 19, 2025
1640820
[fix] core: add boot_mem section for pager
jenswi-linaro Oct 29, 2025
3d3b137
core: arm: enable dynamic config with pager
jenswi-linaro May 19, 2025
75b1531
[fix] core: arm: enable dynamic config with pager
jenswi-linaro Oct 30, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 61 additions & 21 deletions core/arch/arm/kernel/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include <kernel/panic.h>
#include <kernel/tee_misc.h>
#include <kernel/thread.h>
#include <kernel/thread_private.h>
#include <kernel/tpm.h>
#include <kernel/transfer_list.h>
#include <libfdt.h>
Expand Down Expand Up @@ -333,7 +334,9 @@ static void init_asan(void)
#ifdef CFG_WITH_PAGER
asan_tag_access(__pageable_start, __pageable_end);
#endif /*CFG_WITH_PAGER*/
#ifndef CFG_DYN_CONFIG
asan_tag_access(__nozi_start, __nozi_end);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

MMU tables may still be allocated from the nozi section. Shouldn't it still be tagged for ASAN access in these cases?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

__nozi_start and __nozi_end doesn't exist with CFG_DYN_CONFIG. This is instead covered by the boot_mem_init_asan() call above.

#endif
#ifdef ARM32
asan_tag_access(__exidx_start, __exidx_end);
asan_tag_access(__extab_start, __extab_end);
Expand Down Expand Up @@ -509,12 +512,6 @@ static void init_pager_runtime(unsigned long pageable_part)
assert(hashes);
asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);

/*
* The pager is about the be enabled below, eventual temporary boot
* memory allocation must be removed now.
*/
boot_mem_release_tmp_alloc();

carve_out_asan_mem();

mm = nex_phys_mem_ta_alloc(pageable_size);
Expand Down Expand Up @@ -879,6 +876,15 @@ static bool add_padding_to_pool(vaddr_t va, size_t len, void *ptr __unused)
return true;
}

static bool calc_padding_size(vaddr_t va __unused, size_t len, void *ptr)
{
size_t *tot_size = ptr;

if (len >= MALLOC_INITIAL_POOL_MIN_SIZE)
(*tot_size) += len;
return false; /* don't consume */
}

static void init_primary(unsigned long pageable_part)
{
vaddr_t va = 0;
Expand Down Expand Up @@ -906,6 +912,7 @@ static void init_primary(unsigned long pageable_part)
* every virtual partition separately. Core code uses nex_malloc
* instead.
*/
#ifndef CFG_DYN_CONFIG
#ifdef CFG_WITH_PAGER
/* Add heap2 first as heap1 may be too small as initial bget pool */
malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
Expand All @@ -915,13 +922,35 @@ static void init_primary(unsigned long pageable_part)
__nex_heap_start);
#else
malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
#endif
#endif
IMSG_RAW("\n");
if (IS_ENABLED(CFG_DYN_CONFIG)) {
size_t sz = sizeof(struct thread_core_local) *
CFG_TEE_CORE_NB_CORE;
void *p = boot_mem_alloc(sz, alignof(void *) * 2);
size_t tot_padding_sz = 0;
void *p = NULL;
size_t sz = 0;

if (IS_ENABLED(CFG_NS_VIRTUALIZATION))
sz = CFG_CORE_NEX_HEAP_SIZE;
else
sz = CFG_CORE_HEAP_SIZE;

/*
* thread_core_local and threads are allocated from the
* heap so add the needed sizes to the initial heap.
*/
sz += sizeof(struct thread_core_local) * CFG_TEE_CORE_NB_CORE;
if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
sz += sizeof(struct thread_ctx) * CFG_NUM_THREADS;

boot_mem_foreach_padding(calc_padding_size, &tot_padding_sz);
if (tot_padding_sz > sz - MALLOC_INITIAL_POOL_MIN_SIZE)
sz = MALLOC_INITIAL_POOL_MIN_SIZE;
else
sz -= tot_padding_sz;
sz = ROUNDUP(sz, alignof(void *) * 2);
p = boot_mem_alloc(sz, alignof(void *) * 2);
boot_mem_foreach_padding(add_padding_to_pool, NULL);
#ifdef CFG_NS_VIRTUALIZATION
nex_malloc_add_pool(p, sz);
#else
Expand All @@ -932,15 +961,6 @@ static void init_primary(unsigned long pageable_part)
core_mmu_save_mem_map();
core_mmu_init_phys_mem();
boot_mem_foreach_padding(add_padding_to_pool, NULL);
va = boot_mem_release_unused();
if (!IS_ENABLED(CFG_WITH_PAGER)) {
/*
* We must update boot_cached_mem_end to reflect the memory
* just unmapped by boot_mem_release_unused().
*/
assert(va && va <= boot_cached_mem_end);
boot_cached_mem_end = va;
}

if (IS_ENABLED(CFG_DYN_CONFIG)) {
/*
Expand All @@ -962,9 +982,31 @@ static void init_primary(unsigned long pageable_part)
* init_runtime()).
*/
thread_get_core_local()->curr_thread = 0;
if (IS_ENABLED(CFG_DYN_CONFIG)) {
threads = calloc(1, sizeof(*threads));
if (!threads)
panic();
thread_count = 1;
}
init_pager_runtime(pageable_part);
}

va = boot_mem_release_unused();
if (IS_ENABLED(CFG_WITH_PAGER)) {
/*
* Paging is activated, and anything beyond the start of
* the released unused memory is managed by the pager.
*/
assert(va && va <= core_mmu_linear_map_end);
core_mmu_linear_map_end = va;
}
/*
* We must update boot_cached_mem_end to reflect the memory
* just unmapped by boot_mem_release_unused().
*/
assert(va && va <= boot_cached_mem_end);
boot_cached_mem_end = va;

/* Initialize canaries around the stacks */
thread_init_canaries();
thread_init_per_cpu();
Expand Down Expand Up @@ -1083,9 +1125,7 @@ void __weak boot_init_primary_runtime(void)
~THREAD_EXCP_NATIVE_INTR);
init_tee_runtime();
}

if (!IS_ENABLED(CFG_WITH_PAGER))
boot_mem_release_tmp_alloc();
boot_mem_release_tmp_alloc();
}

void __weak boot_init_primary_final(void)
Expand Down
21 changes: 11 additions & 10 deletions core/arch/arm/kernel/entry_a32.S
Original file line number Diff line number Diff line change
Expand Up @@ -451,8 +451,12 @@ shadow_stack_access_ok:
#endif

#if defined(CFG_DYN_CONFIG)
#ifdef CFG_WITH_PAGER
ldr r0, =__vcore_free_end
#else
ldr r0, =boot_embdata_ptr
ldr r0, [r0]
#endif
sub r1, r0, #THREAD_BOOT_INIT_TMP_ALLOC

/* Clear the allocated struct thread_core_local */
Expand Down Expand Up @@ -534,23 +538,20 @@ shadow_stack_access_ok:
bl boot_save_args
add sp, sp, #(2 * 4)

ldr r0, =__vcore_free_start
ldr r2, =__vcore_free_end
#ifdef CFG_WITH_PAGER
ldr r0, =__init_end /* pointer to boot_embdata */
ldr r1, [r0] /* struct boot_embdata::total_len */
add r0, r0, r1
mov_imm r1, 0xfff
add r0, r0, r1 /* round up */
bic r0, r0, r1 /* to next page */
mov_imm r1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
mov r2, r1
#ifdef CFG_DYN_CONFIG
sub r1, r2, #THREAD_BOOT_INIT_TMP_ALLOC
#else
mov r1, r2
#endif
#else
ldr r0, =__vcore_free_start
ldr r1, =boot_embdata_ptr
ldr r1, [r1]
#ifdef CFG_DYN_CONFIG
sub r1, r1, #THREAD_BOOT_INIT_TMP_ALLOC
#endif
ldr r2, =__vcore_free_end
#endif
bl boot_mem_init

Expand Down
20 changes: 11 additions & 9 deletions core/arch/arm/kernel/entry_a64.S
Original file line number Diff line number Diff line change
Expand Up @@ -316,8 +316,12 @@ clear_nex_bss:
* Point SP_EL1 a temporary struct thread_core_local before the
* temporary stack.
*/
#ifdef CFG_WITH_PAGER
adr_l x0, __vcore_free_end
#else
adr_l x0, boot_embdata_ptr
ldr x0, [x0]
#endif
sub x1, x0, #THREAD_BOOT_INIT_TMP_ALLOC

/* Clear the allocated struct thread_core_local */
Expand Down Expand Up @@ -388,22 +392,20 @@ clear_nex_bss:
mov x4, xzr
bl boot_save_args

adr_l x0, __vcore_free_start
adr_l x2, __vcore_free_end
#ifdef CFG_WITH_PAGER
adr_l x0, __init_end /* pointer to boot_embdata */
ldr w1, [x0] /* struct boot_embdata::total_len */
add x0, x0, x1
add x0, x0, #0xfff /* round up */
bic x0, x0, #0xfff /* to next page */
mov_imm x1, (TEE_RAM_PH_SIZE + TEE_RAM_START)
mov x2, x1
#ifdef CFG_DYN_CONFIG
sub x1, x2, #THREAD_BOOT_INIT_TMP_ALLOC
#else
mov x1, x2
#endif
#else
adr_l x0, __vcore_free_start
adr_l x1, boot_embdata_ptr
ldr x1, [x1]
#ifdef CFG_DYN_CONFIG
sub x1, x1, #THREAD_BOOT_INIT_TMP_ALLOC
#endif
adr_l x2, __vcore_free_end;
#endif
bl boot_mem_init

Expand Down
74 changes: 65 additions & 9 deletions core/arch/arm/kernel/kern.ld.S
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/

#include <generated/asm-defines.h>
#include <kernel/thread_private_arch.h>
#include <mm/core_mmu.h>
#include <platform_config.h>
#include <util.h>
Expand Down Expand Up @@ -187,6 +189,7 @@ SECTIONS
__nex_bss_end = .;
}

#ifndef CFG_DYN_CONFIG
/*
* We want to keep all nexus memory in one place, because
* it should be always mapped and it is easier to map one
Expand All @@ -212,6 +215,7 @@ SECTIONS
ASSERT(!(ABSOLUTE(.) & (16 * 1024 - 1)), "align nozi to 16kB");
KEEP(*(.nozi.mmu.base_table .nozi.mmu.l2))
}
#endif

. = ALIGN(SMALL_PAGE_SIZE);

Expand Down Expand Up @@ -242,6 +246,12 @@ SECTIONS
__bss_end = .;
}

#if defined(CFG_DYN_CONFIG) && !defined(CFG_WITH_PAGER)
. = ALIGN(SMALL_PAGE_SIZE);
__flatmap_free_start = .;
#endif

#ifndef CFG_DYN_CONFIG
.heap1 (NOLOAD) : {
/*
* We're keeping track of the padding added before the
Expand Down Expand Up @@ -279,11 +289,7 @@ SECTIONS
. = ALIGN(8);
__nozi_stack_end = .;
}
#ifndef CFG_WITH_PAGER
. = ALIGN(SMALL_PAGE_SIZE);
__flatmap_free_start = .;
__flatmap_unpg_rw_size = __flatmap_free_start - __flatmap_unpg_rw_start;
#else
#ifdef CFG_WITH_PAGER
.heap2 (NOLOAD) : {
__heap2_start = .;
/*
Expand All @@ -295,6 +301,53 @@ SECTIONS
. = ALIGN(SMALL_PAGE_SIZE);
__heap2_end = .;
}
#endif
#ifndef CFG_WITH_PAGER
. = ALIGN(SMALL_PAGE_SIZE);
__flatmap_free_start = .;
#endif
#endif /*!CFG_DYN_CONFIG*/

#ifdef CFG_WITH_PAGER
/*
* This memory is used by the boot_mem*() functions during boot.
* Enough memory must be carved out to support the worst case
* memory allocation, but the remaining unused memory will be
* returned to be managed by the pager at end of boot. The tradeoff
* is that the init code follows this section and must also fit in
* the physical memory.
*/
.pager_boot_mem (NOLOAD): {
#ifdef CFG_DYN_CONFIG
#ifdef CFG_WITH_LPAE
XLAT_TABLE_SIZE = SMALL_PAGE_SIZE;
#ifndef MAX_XLAT_TABLES
MAX_XLAT_TABLES = 3 /* ASLR_EXTRA */ + 5 /* TEE_EXTRA */;
#endif
/* Base tables */
. += CORE_MMU_BASE_TABLE_OFFSET * CFG_TEE_CORE_NB_CORE;
/* Per thread EL0 tables */
. += CFG_NUM_THREADS * XLAT_TABLE_SIZE;
#else
XLAT_TABLE_SIZE = 1024;
#ifndef MAX_XLAT_TABLES
MAX_XLAT_TABLES = 6 + 2 /* ASLR_EXTRA */;
#endif
. += 16 * 1024; /* Main L1 table */
. += 64 * 4 * CFG_NUM_THREADS; /* L1 table for TAs */
#endif
. += MAX_XLAT_TABLES * XLAT_TABLE_SIZE;
. += CFG_CORE_HEAP_SIZE;
. += CFG_NUM_THREADS * THREAD_CTX_SIZE;
. += CFG_TEE_CORE_NB_CORE * THREAD_CORE_LOCAL_SIZE;
. += THREAD_BOOT_INIT_TMP_ALLOC;
#else
. += SMALL_PAGE_SIZE;
#endif
. = ALIGN(SMALL_PAGE_SIZE);
}
__flatmap_free_start = ADDR(.pager_boot_mem);
__flatmap_free_size = SIZEOF(.pager_boot_mem);

/* Start page aligned read-only memory */
__flatmap_unpg_rw_size = . - __flatmap_unpg_rw_start;
Expand Down Expand Up @@ -376,15 +429,19 @@ SECTIONS
"Load address before start of physical memory")
ASSERT(TEE_LOAD_ADDR < (TEE_RAM_START + TEE_RAM_PH_SIZE),
"Load address after end of physical memory")
ASSERT(TEE_RAM_START + TEE_RAM_PH_SIZE > __init_end,
"TEE_RAM_PH_SIZE too small")
ASSERT((TEE_RAM_START + TEE_RAM_PH_SIZE - __init_end) >
SMALL_PAGE_SIZE * 2 +
(__pageable_end - __pageable_start) / 4096 * 32 +
SIZEOF(.rel) / 2 + SIZEOF(.rela) / 3 ,
"Too few free pages to initialize paging")


#endif /*CFG_WITH_PAGER*/

#ifndef CFG_WITH_PAGER
__flatmap_free_start = .;
#endif

#ifdef CFG_CORE_SANITIZE_KADDRESS
/*
* Guard against moving the location counter backwards in the assignment
Expand Down Expand Up @@ -426,6 +483,7 @@ SECTIONS
#ifndef CFG_CORE_SANITIZE_KADDRESS
__flatmap_free_size = _end_of_ram - __flatmap_free_start;
#endif
__flatmap_unpg_rw_size = __flatmap_free_start - __flatmap_unpg_rw_start;
#endif

/*
Expand Down Expand Up @@ -476,11 +534,9 @@ __vcore_unpg_rw_start = __flatmap_unpg_rw_start;
__vcore_unpg_rw_size = __flatmap_unpg_rw_size;
__vcore_unpg_rw_end = __vcore_unpg_rw_start + __vcore_unpg_rw_size;

#ifndef CFG_WITH_PAGER
__vcore_free_start = __flatmap_free_start;
__vcore_free_size = __flatmap_free_size;
__vcore_free_end = __flatmap_free_start + __flatmap_free_size;
#endif

#ifdef CFG_NS_VIRTUALIZATION
/* Nexus read-write memory */
Expand Down
Loading
Loading