diff --git a/code/amcssth.c b/code/amcssth.c index 35bd6a14ed..56011acfc4 100644 --- a/code/amcssth.c +++ b/code/amcssth.c @@ -108,19 +108,22 @@ static mps_res_t area_scan(mps_ss_t ss, void *base, void *limit, void *closure) static void churn(mps_ap_t ap, size_t roots_count) { - size_t i; - size_t r; + size_t i, j, r; ++objs; r = (size_t)rnd(); if (r & 1) { + mps_addr_t root; i = (r >> 1) % exactRootsCOUNT; - if (exactRoots[i] != objNULL) - cdie(dylan_check(exactRoots[i]), "dying root check"); - exactRoots[i] = make(ap, roots_count); - if (exactRoots[(exactRootsCOUNT-1) - i] != objNULL) - dylan_write(exactRoots[(exactRootsCOUNT-1) - i], - exactRoots, exactRootsCOUNT); + atomic_load(&exactRoots[i], &root); + if (root != objNULL) + cdie(dylan_check(root), "dying root check"); + root = make(ap, roots_count); + atomic_store(&exactRoots[i], &root); + j = exactRootsCOUNT - i - 1; + atomic_load(&exactRoots[j], &root); + if (root != objNULL) + dylan_write(root, exactRoots, exactRootsCOUNT); } else { i = (r >> 1) % ambigRootsCOUNT; ambigRoots[(ambigRootsCOUNT-1) - i] = make(ap, roots_count); @@ -221,9 +224,11 @@ static void test_pool(const char *name, mps_pool_t pool, size_t roots_count) (unsigned long)collections, objs, (unsigned long)mps_arena_committed(arena)); - for (i = 0; i < exactRootsCOUNT; ++i) - cdie(exactRoots[i] == objNULL || dylan_check(exactRoots[i]), - "all roots check"); + for (i = 0; i < exactRootsCOUNT; ++i) { + mps_addr_t root; + atomic_load(&exactRoots[i], &root); + cdie(root == objNULL || dylan_check(root), "all roots check"); + } if (collections >= collectionsCOUNT / 2 && !walked) { @@ -248,9 +253,12 @@ static void test_pool(const char *name, mps_pool_t pool, size_t roots_count) ramping = 0; /* kill half of the roots */ for(i = 0; i < exactRootsCOUNT; i += 2) { - if (exactRoots[i] != objNULL) { - cdie(dylan_check(exactRoots[i]), "ramp kill check"); - exactRoots[i] = objNULL; + mps_addr_t root; + atomic_load(&exactRoots[i], &root); + if (root != objNULL) { + cdie(dylan_check(root), "ramp kill check"); + root = objNULL; + atomic_store(&exactRoots[i], &root); } } } @@ -298,6 +306,8 @@ static void test_arena(void) mps_pool_t amc_pool, amcz_pool; void *marker = ▮ + die(dylan_make_wrappers(), "make wrappers"); + MPS_ARGS_BEGIN(args) { MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, testArenaSIZE); MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, rnd_grain(testArenaSIZE)); diff --git a/code/config.h b/code/config.h index aa9b70c7cc..987f4a8034 100644 --- a/code/config.h +++ b/code/config.h @@ -710,6 +710,23 @@ #define WB_DEFER_HIT 1 /* boring scans after barrier hit */ +/* Apple Hardened Runtime + * + * .hardened-runtime: On Apple Silicon, applications may be compiled + * with Hardened Runtime enabled. These applications have restricted + * capabilities: in particular, unless the "Allow Unsigned Executable + * Memory Entitlement" is enabled, these applications cannot create + * memory that is simultaneously writable and executable. Attempts to + * do so using mmap() and mprotect() fail with EACCES. + * + * See + */ +#if defined(MPS_OS_XC) && defined(MPS_ARCH_A6) +#define MAYBE_HARDENED_RUNTIME 1 +#else +#define MAYBE_HARDENED_RUNTIME 0 +#endif + #endif /* config_h */ diff --git a/code/fmtdytst.c b/code/fmtdytst.c index 6a48860fe9..97b1f30e8e 100644 --- a/code/fmtdytst.c +++ b/code/fmtdytst.c @@ -68,7 +68,7 @@ mps_res_t dylan_make_wrappers(void) return MPS_RES_OK; } -/* dylan_init -- turn raw memory into initialised dylan-vector (or pad) +/* dylan_init -- turn raw memory into initialised dylan-vector * * If the raw memory is large enough, initialises it to a dylan-vector, * whose slots are initialised to either dylan-ints, or valid refs, at @@ -78,8 +78,6 @@ mps_res_t dylan_make_wrappers(void) * and "nr_refs" arguments. If "nr_refs" is 0, all slots are * initialized to dylan-ints: this may be useful for making leaf * objects. - * - * (Makes a pad if the raw memory is too small to hold a dylan-vector) */ mps_res_t dylan_init(mps_addr_t addr, size_t size, @@ -93,8 +91,7 @@ mps_res_t dylan_init(mps_addr_t addr, size_t size, if (res != MPS_RES_OK) return res; - /* If there is enough room, make a vector, otherwise just */ - /* make a padding object. */ + /* If there is enough room, make a vector. */ if(size >= sizeof(mps_word_t) * 2) { mps_word_t *p = (mps_word_t *)addr; mps_word_t i, t = (size / sizeof(mps_word_t)) - 2; @@ -110,7 +107,7 @@ mps_res_t dylan_init(mps_addr_t addr, size_t size, p[2+i] = (mps_word_t)refs[(r >> 1) % nr_refs]; /* random ptr */ } } else { - dylan_pad(addr, size); + return MPS_RES_UNIMPL; } return MPS_RES_OK; diff --git a/code/protix.c b/code/protix.c index 3c9f520398..2f1ad1f62e 100644 --- a/code/protix.c +++ b/code/protix.c @@ -41,13 +41,21 @@ #include "vm.h" +#include #include +#include /* sig_atomic_t */ #include #include #include SRCID(protix, "$Id$"); + +/* Value for memory protection corresponding to AccessSetEMPTY. */ + +static sig_atomic_t prot_all = PROT_READ | PROT_WRITE | PROT_EXEC; + + /* ProtSet -- set protection * * This is just a thin veneer on top of mprotect(2). @@ -55,7 +63,7 @@ SRCID(protix, "$Id$"); void ProtSet(Addr base, Addr limit, AccessSet mode) { - int flags; + int flags, result; AVER(sizeof(size_t) == sizeof(Addr)); AVER(base < limit); @@ -82,7 +90,7 @@ void ProtSet(Addr base, Addr limit, AccessSet mode) flags = PROT_READ | PROT_EXEC; break; case AccessSetEMPTY: - flags = PROT_READ | PROT_WRITE | PROT_EXEC; + flags = prot_all; break; default: NOTREACHED; @@ -90,7 +98,15 @@ void ProtSet(Addr base, Addr limit, AccessSet mode) } /* .assume.mprotect.base */ - if(mprotect((void *)base, (size_t)AddrOffset(base, limit), flags) != 0) + result = mprotect((void *)base, (size_t)AddrOffset(base, limit), flags); + if (MAYBE_HARDENED_RUNTIME && result != 0 && errno == EACCES + && (flags & PROT_WRITE) && (flags & PROT_EXEC)) + { + /* See . */ + prot_all = PROT_READ | PROT_WRITE; + result = mprotect((void *)base, (size_t)AddrOffset(base, limit), flags & prot_all); + } + if (result != 0) NOTREACHED; } diff --git a/code/testlib.h b/code/testlib.h index f140b0a9e1..f86635bc71 100644 --- a/code/testlib.h +++ b/code/testlib.h @@ -292,6 +292,25 @@ extern void randomize(int argc, char *argv[]); extern void testlib_init(int argc, char *argv[]); +/* Memory-model-aware operations */ + +#if defined(MPS_BUILD_GC) || defined(MPS_BUILD_LL) + +/* See + * and */ +#define atomic_load(SRC, DEST) __atomic_load(SRC, DEST, __ATOMIC_ACQUIRE) +#define atomic_store(DEST, SRC) __atomic_store(DEST, SRC, __ATOMIC_RELEASE) + +#elif defined(MPS_BUILD_MV) + +/* Microsoft Visual C/C++ does not need memory-model-aware load and store as + * loads and stores of register-sized values are atomic on Intel. */ +#define atomic_load(SRC, DEST) (*(DEST) = *(SRC)) +#define atomic_store(DEST, SRC) (*(DEST) = *(SRC)) + +#endif + + #endif /* testlib_h */ diff --git a/code/vmix.c b/code/vmix.c index 418c26f700..941939f170 100644 --- a/code/vmix.c +++ b/code/vmix.c @@ -47,6 +47,7 @@ #include "vm.h" #include /* errno */ +#include /* sig_atomic_t */ #include /* see .feature.li in config.h */ #include /* mmap, munmap */ #include /* getpagesize */ @@ -156,11 +157,17 @@ void VMFinish(VM vm) } +/* Value to use for protection of newly allocated pages. */ + +static sig_atomic_t vm_prot = PROT_READ | PROT_WRITE | PROT_EXEC; + + /* VMMap -- map the given range of memory */ Res VMMap(VM vm, Addr base, Addr limit) { Size size; + void *result; AVERT(VM, vm); AVER(sizeof(void *) == sizeof(Addr)); @@ -172,11 +179,19 @@ Res VMMap(VM vm, Addr base, Addr limit) size = AddrOffset(base, limit); - if(mmap((void *)base, (size_t)size, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_ANON | MAP_PRIVATE | MAP_FIXED, - -1, 0) - == MAP_FAILED) { + result = mmap((void *)base, (size_t)size, vm_prot, + MAP_ANON | MAP_PRIVATE | MAP_FIXED, + -1, 0); + if (MAYBE_HARDENED_RUNTIME && result == MAP_FAILED && errno == EACCES + && (vm_prot & PROT_WRITE) && (vm_prot & PROT_EXEC)) + { + /* See . */ + vm_prot = PROT_READ | PROT_WRITE; + result = mmap((void *)base, (size_t)size, vm_prot, + MAP_ANON | MAP_PRIVATE | MAP_FIXED, + -1, 0); + } + if (result == MAP_FAILED) { AVER(errno == ENOMEM); /* .assume.mmap.err */ return ResMEMORY; } diff --git a/test/function/136.c b/test/function/136.c index 5ad00dcbf4..a326797ed9 100644 --- a/test/function/136.c +++ b/test/function/136.c @@ -6,7 +6,7 @@ TEST_HEADER link = testlib.o OUTPUT_SPEC assert = true - limit < 160000 + limit_grains < 40 END_HEADER */ @@ -155,22 +155,36 @@ static void test(void *stack_pointer) { mps_thr_t thread; int symm; - size_t grainSize = 4096; + size_t grainSize, smallSize = 8; size_t comlimit; mps_bool_t slotHigh, arenaHigh, firstFit; + mps_pool_t pool; + mps_addr_t addr; MPS_ARGS_BEGIN(args) { MPS_ARGS_ADD(args, MPS_KEY_ARENA_SIZE, 1024*1024*50); - MPS_ARGS_ADD(args, MPS_KEY_ARENA_GRAIN_SIZE, grainSize); cdie(mps_arena_create_k(&arena, mps_arena_class_vm(), args), "create arena"); } MPS_ARGS_END(args); + /* Deduce arena grain size by creating an MVFF pool, allocating a + * small object, and querying the total size. */ + MPS_ARGS_BEGIN(args) { + MPS_ARGS_ADD(args, MPS_KEY_EXTEND_BY, smallSize); + MPS_ARGS_ADD(args, MPS_KEY_MEAN_SIZE, smallSize); + die(mps_pool_create_k(&pool, arena, mps_class_mvff(), args), + "create MVFF pool"); + } MPS_ARGS_END(args); + die(mps_alloc(&addr, pool, smallSize), "allocate small object"); + grainSize = mps_pool_total_size(pool); + mps_free(pool, addr, smallSize); + mps_pool_destroy(pool); + cdie(mps_thread_reg(&thread, arena), "register thread"); for (comlimit = 128 * grainSize; comlimit > 0; comlimit -= grainSize) { mps_arena_commit_limit_set(arena, comlimit); - report("limit", "%d", comlimit); + report("limit_grains", "%d", comlimit / grainSize); symm = ranint(8); slotHigh = (symm >> 2) & 1; arenaHigh = (symm >> 1) & 1;