diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml
index 1ab63c1159..f90e3e0dcf 100644
--- a/.github/workflows/build-and-test.yml
+++ b/.github/workflows/build-and-test.yml
@@ -66,6 +66,16 @@ jobs:
nmake /f w3i6mv.nmk all testci testansi testpollnone
shell: cmd
+ # See design.mps.tests.ci.run.other.deep.
+ deep-check:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - run: ./configure
+ - run: make test-make-build-deep
+
# A. REFERENCES
#
diff --git a/Makefile.in b/Makefile.in
index a1e8759406..a8b468fa00 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -79,6 +79,9 @@ test-make-build:
$(MAKE) -C code -f anan$(MPS_BUILD_NAME).gmk VARIETY=cool clean testansi
$(MAKE) -C code -f anan$(MPS_BUILD_NAME).gmk VARIETY=cool CFLAGS="-DCONFIG_POLL_NONE" clean testpollnone
+test-make-build-deep:
+ $(MAKE) $(TARGET_OPTS) VARIETY=cool CFLAGS='-DCHECKLEVEL=CheckLevelDEEP' testci
+
test-xcode-build:
$(XCODEBUILD) -config Debug -target testci
$(XCODEBUILD) -config Release -target testci
@@ -88,7 +91,7 @@ test: @TEST_TARGET@
# C. COPYRIGHT AND LICENSE
#
-# Copyright (C) 2012-2020 Ravenbrook Limited .
+# Copyright (C) 2012-2023 Ravenbrook Limited .
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
diff --git a/code/dbgpool.c b/code/dbgpool.c
index 54ab6864bb..306bcf8177 100644
--- a/code/dbgpool.c
+++ b/code/dbgpool.c
@@ -356,12 +356,12 @@ static void debugPoolSegIterate(Arena arena, Addr base, Addr limit,
static void debugPoolShieldExpose(Arena arena, Seg seg)
{
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
}
static void debugPoolShieldCover(Arena arena, Seg seg)
{
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
}
diff --git a/code/global.c b/code/global.c
index 6c7cd39b94..12223f342a 100644
--- a/code/global.c
+++ b/code/global.c
@@ -78,7 +78,7 @@ void GlobalsReleaseAll(void)
static void arenaReinitLock(Arena arena)
{
AVERT(Arena, arena);
- ShieldLeave(arena);
+ ShieldLeave(ArenaShield(arena));
LockInit(ArenaGlobals(arena)->lock);
}
@@ -579,7 +579,7 @@ void ArenaEnterLock(Arena arena, Bool recursive)
if(recursive) {
/* already in shield */
} else {
- ShieldEnter(arena);
+ ShieldEnter(ArenaShield(arena));
}
}
@@ -611,7 +611,7 @@ void ArenaLeaveLock(Arena arena, Bool recursive)
if(recursive) {
/* no need to leave shield */
} else {
- ShieldLeave(arena);
+ ShieldLeave(ArenaShield(arena));
}
ProtSync(arena); /* */
if(recursive) {
@@ -917,9 +917,9 @@ Ref ArenaPeekSeg(Arena arena, Seg seg, Ref *p)
/* We don't need to update the Seg Summary as in PoolSingleAccess
* because we are not changing it after it has been scanned. */
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
ref = *p;
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
return ref;
}
@@ -953,12 +953,12 @@ void ArenaPokeSeg(Arena arena, Seg seg, Ref *p, Ref ref)
/* TODO: Consider checking p's alignment using seg->pool->alignment */
/* ref is arbitrary and can't be checked */
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
*p = ref;
summary = SegSummary(seg);
summary = RefSetAdd(arena, summary, (Addr)ref);
SegSetSummary(seg, summary);
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
}
/* ArenaRead -- like ArenaPeek, but reference known to be owned by arena */
diff --git a/code/ld.c b/code/ld.c
index 483c2f90cb..51342e786f 100644
--- a/code/ld.c
+++ b/code/ld.c
@@ -149,11 +149,11 @@ void LDReset(mps_ld_t ld, Arena arena)
b = SegOfAddr(&seg, arena, (Addr)ld);
if (b)
- ShieldExpose(arena, seg); /* .ld.access */
+ ShieldExpose(ArenaShield(arena), seg); /* .ld.access */
ld->_epoch = ArenaHistory(arena)->epoch;
ld->_rs = RefSetEMPTY;
if (b)
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
}
diff --git a/code/mpm.h b/code/mpm.h
index 9979d20180..a5941ec4c7 100644
--- a/code/mpm.h
+++ b/code/mpm.h
@@ -872,37 +872,38 @@ extern ZoneSet ZoneSetBlacklist(Arena arena);
/* Shield Interface -- see */
+#define ShieldArena(shield) PARENT(ArenaStruct, shieldStruct, shield)
extern void ShieldInit(Shield shield);
extern void ShieldFinish(Shield shield);
extern Bool ShieldCheck(Shield shield);
extern Res ShieldDescribe(Shield shield, mps_lib_FILE *stream, Count depth);
extern void ShieldDestroyQueue(Shield shield, Arena arena);
-extern void (ShieldRaise)(Arena arena, Seg seg, AccessSet mode);
-extern void (ShieldLower)(Arena arena, Seg seg, AccessSet mode);
-extern void (ShieldEnter)(Arena arena);
-extern void (ShieldLeave)(Arena arena);
-extern void (ShieldExpose)(Arena arena, Seg seg);
-extern void (ShieldCover)(Arena arena, Seg seg);
-extern void (ShieldHold)(Arena arena);
-extern void (ShieldRelease)(Arena arena);
-extern void (ShieldFlush)(Arena arena);
+extern void (ShieldRaise)(Shield shield, Seg seg, AccessSet mode);
+extern void (ShieldLower)(Shield shield, Seg seg, AccessSet mode);
+extern void (ShieldEnter)(Shield shield);
+extern void (ShieldLeave)(Shield shield);
+extern void (ShieldExpose)(Shield shield, Seg seg);
+extern void (ShieldCover)(Shield shield, Seg seg);
+extern void (ShieldHold)(Shield shield);
+extern void (ShieldRelease)(Shield shield);
+extern void (ShieldFlush)(Shield shield);
#if defined(SHIELD)
/* Nothing to do: functions declared in all shield configurations. */
#elif defined(SHIELD_NONE)
-#define ShieldRaise(arena, seg, mode) \
- BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END
-#define ShieldLower(arena, seg, mode) \
- BEGIN UNUSED(arena); UNUSED(seg); UNUSED(mode); END
-#define ShieldEnter(arena) BEGIN UNUSED(arena); END
-#define ShieldLeave(arena) AVER(arena->busyTraces == TraceSetEMPTY)
-#define ShieldExpose(arena, seg) \
- BEGIN UNUSED(arena); UNUSED(seg); END
-#define ShieldCover(arena, seg) \
- BEGIN UNUSED(arena); UNUSED(seg); END
-#define ShieldHold(arena) BEGIN UNUSED(arena); END
-#define ShieldRelease(arena) BEGIN UNUSED(arena); END
-#define ShieldFlush(arena) BEGIN UNUSED(arena); END
+#define ShieldRaise(shield, seg, mode) \
+ BEGIN UNUSED(shield); UNUSED(seg); UNUSED(mode); END
+#define ShieldLower(shield, seg, mode) \
+ BEGIN UNUSED(shield); UNUSED(seg); UNUSED(mode); END
+#define ShieldEnter(shield) BEGIN UNUSED(shield); END
+#define ShieldLeave(shield) AVER(ShieldArena(shield)->busyTraces == TraceSetEMPTY)
+#define ShieldExpose(shield, seg) \
+ BEGIN UNUSED(shield); UNUSED(seg); END
+#define ShieldCover(shield, seg) \
+ BEGIN UNUSED(shield); UNUSED(seg); END
+#define ShieldHold(shield) BEGIN UNUSED(shield); END
+#define ShieldRelease(shield) BEGIN UNUSED(shield); END
+#define ShieldFlush(shield) BEGIN UNUSED(shield); END
#else
#error "No shield configuration."
#endif /* SHIELD */
diff --git a/code/mpmst.h b/code/mpmst.h
index e03033ae99..0e8800ffb3 100644
--- a/code/mpmst.h
+++ b/code/mpmst.h
@@ -669,13 +669,14 @@ typedef struct ShieldStruct {
Sig sig; /* design.mps.sig.field */
BOOLFIELD(inside); /* */
BOOLFIELD(suspended); /* mutator suspended? */
- BOOLFIELD(queuePending); /* queue insertion pending? */
Seg *queue; /* queue of unsynced segs */
Count length; /* number of elements in shield queue */
Index next; /* next free element in shield queue */
Index limit; /* high water mark for cache usage */
Count depth; /* sum of depths of all segs */
Count unsynced; /* number of unsynced segments */
+ Count unqueued; /* number of unsynced unqueued segments */
+ Count synced; /* number of synced queued segments */
Count holds; /* number of holds */
SortStruct sortStruct; /* workspace for queue sort */
} ShieldStruct;
diff --git a/code/poolamc.c b/code/poolamc.c
index 8a29aee552..a22ef0e2e6 100644
--- a/code/poolamc.c
+++ b/code/poolamc.c
@@ -985,9 +985,9 @@ static Res AMCBufferFill(Addr *baseReturn, Addr *limitReturn,
AVER(SizeIsAligned(padSize, PoolAlignment(pool)));
AVER(AddrAdd(limit, padSize) == SegLimit(seg));
if(padSize > 0) {
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
(*pool->format->pad)(limit, padSize);
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
}
}
@@ -1032,9 +1032,9 @@ static void amcSegBufferEmpty(Seg seg, Buffer buffer)
/* */
if (init < limit) {
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
(*pool->format->pad)(init, AddrOffset(init, limit));
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
}
/* Any allocation in the buffer (including the padding object just
@@ -1508,9 +1508,9 @@ static Res amcSegFixEmergency(Seg seg, ScanState ss, Ref *refIO)
if(ss->rank == RankAMBIG)
goto fixInPlace;
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
newRef = (*pool->format->isMoved)(*refIO);
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
if(newRef != (Addr)0) {
/* Object has been forwarded already, so snap-out pointer. */
/* TODO: Implement weak pointer semantics in emergency fixing. This
@@ -1592,7 +1592,7 @@ static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO)
/* .exposed.seg: Statements tagged ".exposed.seg" below require */
/* that "seg" (that is: the 'from' seg) has been ShieldExposed. */
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
newRef = (*format->isMoved)(ref); /* .exposed.seg */
if(newRef == (Addr)0) {
@@ -1637,7 +1637,7 @@ static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO)
newRef = AddrAdd(newBase, headerSize);
toSeg = BufferSeg(buffer);
- ShieldExpose(arena, toSeg);
+ ShieldExpose(ArenaShield(arena), toSeg);
/* Since we're moving an object from one segment to another, */
/* union the greyness and the summaries together. */
@@ -1653,7 +1653,7 @@ static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO)
/* */
(void)AddrCopy(newBase, base, length); /* .exposed.seg */
- ShieldCover(arena, toSeg);
+ ShieldCover(ArenaShield(arena), toSeg);
} while (!BUFFER_COMMIT(buffer, newBase, length));
STATISTIC(ss->copiedSize += length);
@@ -1675,7 +1675,7 @@ static Res amcSegFix(Seg seg, ScanState ss, Ref *refIO)
res = ResOK;
returnRes:
- ShieldCover(arena, seg); /* .exposed.seg */
+ ShieldCover(ArenaShield(arena), seg); /* .exposed.seg */
return res;
}
@@ -1706,7 +1706,7 @@ static void amcSegReclaimNailed(Pool pool, Trace trace, Seg seg)
/* see for improvements */
headerSize = format->headerSize;
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
p = SegBase(seg);
limit = SegBufferScanLimit(seg);
padBase = p;
@@ -1753,7 +1753,7 @@ static void amcSegReclaimNailed(Pool pool, Trace trace, Seg seg)
(*format->pad)(padBase, padLength);
STATISTIC(bytesReclaimed += padLength);
}
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
SegSetNailed(seg, TraceSetDel(SegNailed(seg), trace));
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
@@ -1887,9 +1887,9 @@ static void amcWalkAll(Pool pool, FormattedObjectsVisitor f, void *p, size_t s)
RING_FOR(node, ring, next) {
Seg seg = SegOfPoolRing(node);
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
amcSegWalk(seg, format, f, p, s);
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
}
}
diff --git a/code/poolams.c b/code/poolams.c
index a76c5a86d8..7a24da0fca 100644
--- a/code/poolams.c
+++ b/code/poolams.c
@@ -1491,10 +1491,11 @@ static Res amsSegFix(Seg seg, ScanState ss, Ref *refIO)
if (SegRankSet(seg) == RankSetEMPTY && ss->rank != RankAMBIG) {
/* */
Addr clientNext, next;
+ Shield shield = ArenaShield(PoolArena(pool));
- ShieldExpose(PoolArena(pool), seg);
+ ShieldExpose(shield, seg);
clientNext = (*pool->format->skip)(clientRef);
- ShieldCover(PoolArena(pool), seg);
+ ShieldCover(shield, seg);
next = AddrSub(clientNext, format->headerSize);
/* Part of the object might be grey, because of ambiguous */
/* fixes, but that's OK, because scan will ignore that. */
diff --git a/code/poolawl.c b/code/poolawl.c
index 17347d7af5..4e60d1bc9c 100644
--- a/code/poolawl.c
+++ b/code/poolawl.c
@@ -869,7 +869,7 @@ static Res awlScanObject(Arena arena, AWL awl, ScanState ss,
dependent = SegOfAddr(&dependentSeg, arena, dependentObject);
if (dependent) {
/* */
- ShieldExpose(arena, dependentSeg);
+ ShieldExpose(ArenaShield(arena), dependentSeg);
/* */
SegSetSummary(dependentSeg, RefSetUNIV);
}
@@ -877,7 +877,7 @@ static Res awlScanObject(Arena arena, AWL awl, ScanState ss,
res = TraceScanFormat(ss, base, limit);
if (dependent)
- ShieldCover(arena, dependentSeg);
+ ShieldCover(ArenaShield(arena), dependentSeg);
return res;
}
diff --git a/code/poolmrg.c b/code/poolmrg.c
index 426aaf8667..631fd64a07 100644
--- a/code/poolmrg.c
+++ b/code/poolmrg.c
@@ -829,13 +829,13 @@ static Res MRGDescribe(Inst inst, mps_lib_FILE *stream, Count depth)
Bool outsideShield = !ArenaShield(arena)->inside;
refPart = MRGRefPartOfLink(linkOfRing(node), arena);
if (outsideShield) {
- ShieldEnter(arena);
+ ShieldEnter(ArenaShield(arena));
}
res = WriteF(stream, depth + 2, "at $A Ref $A\n",
(WriteFA)refPart, (WriteFA)MRGRefPartRef(arena, refPart),
NULL);
if (outsideShield) {
- ShieldLeave(arena);
+ ShieldLeave(ArenaShield(arena));
}
if (res != ResOK)
return res;
diff --git a/code/poolsnc.c b/code/poolsnc.c
index 6b9d87f21e..75b75b7063 100644
--- a/code/poolsnc.c
+++ b/code/poolsnc.c
@@ -287,9 +287,9 @@ static void sncRecordFreeSeg(Arena arena, SNC snc, Seg seg)
SegSetRankAndSummary(seg, RankSetEMPTY, RefSetEMPTY);
/* Pad the whole segment so we don't try to walk it. */
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
(*SNCPool(snc)->format->pad)(SegBase(seg), SegSize(seg));
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
sncSegSetNext(seg, snc->freeSegs);
snc->freeSegs = seg;
@@ -498,9 +498,9 @@ static void sncSegBufferEmpty(Seg seg, Buffer buffer)
/* Pad the unused space at the end of the segment */
if (init < limit) {
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
(*pool->format->pad)(init, AddrOffset(init, limit));
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
}
}
diff --git a/code/protan.c b/code/protan.c
index c83a25197c..c2335c320d 100644
--- a/code/protan.c
+++ b/code/protan.c
@@ -60,9 +60,9 @@ void ProtSync(Arena arena)
if (SegFirst(&seg, arena)) {
do {
if (SegPM(seg) != AccessSetEMPTY) { /* */
- ShieldEnter(arena);
+ ShieldEnter(ArenaShield(arena));
TraceSegAccess(arena, seg, SegPM(seg));
- ShieldLeave(arena);
+ ShieldLeave(ArenaShield(arena));
synced = FALSE;
}
} while(SegNext(&seg, arena, seg));
diff --git a/code/seg.c b/code/seg.c
index 9782e43b3c..441fc6020d 100644
--- a/code/seg.c
+++ b/code/seg.c
@@ -185,16 +185,17 @@ static void segAbsFinish(Inst inst)
AVERT(Seg, seg);
- RingRemove(SegPoolRing(seg));
-
arena = PoolArena(SegPool(seg));
+ /* Don't finish exposed segments (design.mps.shield.def.depth). */
+ AVER(seg->depth == 0);
+
/* TODO: It would be good to avoid deprotecting segments eagerly
when we free them, especially if they're going to be
unmapped. This would require tracking of protection independent
of the existence of a SegStruct. */
if (seg->sm != AccessSetEMPTY) {
- ShieldLower(arena, seg, seg->sm);
+ ShieldLower(ArenaShield(arena), seg, seg->sm);
}
seg->rankSet = RankSetEMPTY;
@@ -202,11 +203,17 @@ static void segAbsFinish(Inst inst)
/* See */
AVER(seg->depth == 0);
if (seg->queued)
- ShieldFlush(PoolArena(SegPool(seg)));
+ ShieldFlush(ArenaShield(PoolArena(SegPool(seg))));
AVER(!seg->queued);
- limit = SegLimit(seg);
+ /* Ensure spare committed pages are not protected
+ (design.mps.arena.spare-committed). */
+ AVER(seg->sm == AccessSetEMPTY);
+ AVER(seg->pm == AccessSetEMPTY);
+ RingRemove(SegPoolRing(seg));
+
+ limit = SegLimit(seg);
TRACT_TRACT_FOR(tract, addr, arena, seg->firstTract, limit) {
AVERT(Tract, tract);
TRACT_UNSET_SEG(tract);
@@ -215,14 +222,6 @@ static void segAbsFinish(Inst inst)
RingFinish(SegPoolRing(seg));
- /* Check that the segment is not exposed, or in the shield */
- /* cache . */
- AVER(seg->depth == 0);
- /* Check not shielded or protected (so that pages in hysteresis */
- /* fund are not protected) */
- AVER(seg->sm == AccessSetEMPTY);
- AVER(seg->pm == AccessSetEMPTY);
-
seg->sig = SigInvalid;
InstFinish(CouldBeA(Inst, seg));
}
@@ -624,7 +623,7 @@ Res SegMerge(Seg *mergedSegReturn, Seg segLo, Seg segHi)
arena = PoolArena(SegPool(segLo));
if (segLo->queued || segHi->queued)
- ShieldFlush(arena); /* see */
+ ShieldFlush(ArenaShield(arena)); /* see */
/* Invoke class-specific methods to do the merge */
res = Method(Seg, segLo, merge)(segLo, segHi, base, mid, limit);
@@ -678,7 +677,7 @@ Res SegSplit(Seg *segLoReturn, Seg *segHiReturn, Seg seg, Addr at)
AVER(!SegBuffer(&buffer, seg) || BufferLimit(buffer) <= at);
if (seg->queued)
- ShieldFlush(arena); /* see */
+ ShieldFlush(ArenaShield(arena)); /* see */
AVER(SegSM(seg) == SegPM(seg));
/* Allocate the new segment object from the control pool */
@@ -1297,7 +1296,7 @@ Res SegSingleAccess(Seg seg, Arena arena, Addr addr,
Ref ref;
Res res;
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
if(mode & SegSM(seg) & AccessREAD) {
/* Read access. */
@@ -1325,7 +1324,7 @@ Res SegSingleAccess(Seg seg, Arena arena, Addr addr,
* this is conservative. */
SegSetSummary(seg, RefSetAdd(arena, SegSummary(seg), ref));
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
return ResOK;
} else {
@@ -1608,10 +1607,10 @@ static void mutatorSegSetGrey(Seg seg, TraceSet grey)
flippedTraces = arena->flippedTraces;
if (TraceSetInter(oldGrey, flippedTraces) == TraceSetEMPTY) {
if (TraceSetInter(grey, flippedTraces) != TraceSetEMPTY)
- ShieldRaise(arena, seg, AccessREAD);
+ ShieldRaise(ArenaShield(arena), seg, AccessREAD);
} else {
if (TraceSetInter(grey, flippedTraces) == TraceSetEMPTY)
- ShieldLower(arena, seg, AccessREAD);
+ ShieldLower(ArenaShield(arena), seg, AccessREAD);
}
}
@@ -1631,7 +1630,7 @@ static void mutatorSegFlip(Seg seg, Trace trace)
/* Raise the read barrier if the segment was not grey for any
currently flipped trace. */
if (TraceSetInter(SegGrey(seg), flippedTraces) == TraceSetEMPTY) {
- ShieldRaise(arena, seg, AccessREAD);
+ ShieldRaise(ArenaShield(arena), seg, AccessREAD);
} else {
/* If the segment is grey for some currently flipped trace then
the read barrier must already have been raised, either in this
@@ -1702,12 +1701,12 @@ static void mutatorSegSetRankSet(Seg seg, RankSet rankSet)
if (oldRankSet == RankSetEMPTY) {
if (rankSet != RankSetEMPTY) {
AVER_CRITICAL(SegGCSeg(seg)->summary == RefSetEMPTY);
- ShieldRaise(PoolArena(SegPool(seg)), seg, AccessWRITE);
+ ShieldRaise(ArenaShield(PoolArena(SegPool(seg))), seg, AccessWRITE);
}
} else {
if (rankSet == RankSetEMPTY) {
AVER_CRITICAL(SegGCSeg(seg)->summary == RefSetEMPTY);
- ShieldLower(PoolArena(SegPool(seg)), seg, AccessWRITE);
+ ShieldLower(ArenaShield(PoolArena(SegPool(seg))), seg, AccessWRITE);
}
}
}
@@ -1727,9 +1726,9 @@ static void mutatorSegSyncWriteBarrier(Seg seg)
Arena arena = PoolArena(SegPool(seg));
/* Can't check seg -- this function enforces invariants tested by SegCheck. */
if (SegSummary(seg) == RefSetUNIV)
- ShieldLower(arena, seg, AccessWRITE);
+ ShieldLower(ArenaShield(arena), seg, AccessWRITE);
else
- ShieldRaise(arena, seg, AccessWRITE);
+ ShieldRaise(ArenaShield(arena), seg, AccessWRITE);
}
@@ -1888,7 +1887,7 @@ static Res gcSegMerge(Seg seg, Seg segHi,
/* This shield won't cope with a partially-protected segment, so
flush the shield queue to bring both halves in sync. See also
. */
- ShieldFlush(PoolArena(SegPool(seg)));
+ ShieldFlush(ArenaShield(PoolArena(SegPool(seg))));
}
/* Merge the superclass fields via next-method call */
diff --git a/code/shield.c b/code/shield.c
index 180374cfb8..8cf0edc812 100644
--- a/code/shield.c
+++ b/code/shield.c
@@ -1,7 +1,7 @@
/* shield.c: SHIELD IMPLEMENTATION
*
* $Id$
- * Copyright (c) 2001-2020 Ravenbrook Limited. See end of file for license.
+ * Copyright (c) 2001-2023 Ravenbrook Limited. See end of file for license.
*
* See: idea.shield, .
*
@@ -19,13 +19,14 @@ void ShieldInit(Shield shield)
{
shield->inside = FALSE;
shield->suspended = FALSE;
- shield->queuePending = FALSE;
shield->queue = NULL;
shield->length = 0;
shield->next = 0;
shield->limit = 0;
shield->depth = 0;
shield->unsynced = 0;
+ shield->unqueued = 0;
+ shield->synced = 0;
shield->holds = 0;
shield->sig = ShieldSig;
}
@@ -55,6 +56,8 @@ void ShieldFinish(Shield shield)
AVER(shield->depth == 0);
AVER(shield->unsynced == 0);
+ AVER(shield->unqueued == 0);
+ AVER(shield->synced == 0);
AVER(shield->holds == 0);
shield->sig = SigInvalid;
}
@@ -69,6 +72,8 @@ Bool ShieldCheck(Shield shield)
CHECKL(shield->queue == NULL || shield->length > 0);
CHECKL(shield->limit <= shield->length);
CHECKL(shield->next <= shield->limit);
+ CHECKL(shield->unqueued <= shield->unsynced);
+ CHECKL(shield->synced <= shield->limit);
/* The mutator is not suspended while outside the shield
. */
@@ -85,11 +90,11 @@ Bool ShieldCheck(Shield shield)
/* There are no unsynced segments when we're outside the shield. */
CHECKL(shield->inside || shield->unsynced == 0);
- /* Every unsynced segment should be on the queue, because we have to
- remember to sync it before we return to the mutator. */
- CHECKL(shield->limit + shield->queuePending >= shield->unsynced);
+ /* The queue contains exactly all the unsynced segments that aren't
+ unqueued, and the queued synced segments. */
+ CHECKL(shield->limit == shield->unsynced - shield->unqueued + shield->synced);
- /* The mutator is suspeneded if there are any holds. */
+ /* The mutator is suspended if there are any holds. */
CHECKL(shield->holds == 0 || shield->suspended);
/* This is too expensive to check all the time since we have an
@@ -105,7 +110,8 @@ Bool ShieldCheck(Shield shield)
if (!SegIsSynced(seg))
++unsynced;
}
- CHECKL(unsynced + shield->queuePending == shield->unsynced);
+ CHECKL(unsynced == shield->limit - shield->synced);
+ CHECKL(unsynced + shield->unqueued == shield->unsynced);
}
#endif
@@ -125,6 +131,8 @@ Res ShieldDescribe(Shield shield, mps_lib_FILE *stream, Count depth)
" next $U\n", (WriteFU)shield->next,
" length $U\n", (WriteFU)shield->length,
" unsynced $U\n", (WriteFU)shield->unsynced,
+ " unqueued $U\n", (WriteFU)shield->unqueued,
+ " synced $U\n", (WriteFU)shield->synced,
" holds $U\n", (WriteFU)shield->holds,
"} Shield $P\n", (WriteFP)shield,
NULL);
@@ -168,11 +176,22 @@ static void shieldSetSM(Shield shield, Seg seg, AccessSet mode)
if (SegIsSynced(seg)) {
SegSetSM(seg, mode);
++shield->unsynced;
+ if (seg->queued) {
+ AVER(shield->synced > 0);
+ --shield->synced;
+ } else
+ ++shield->unqueued;
} else {
SegSetSM(seg, mode);
if (SegIsSynced(seg)) {
AVER(shield->unsynced > 0);
--shield->unsynced;
+ if (seg->queued)
+ ++shield->synced;
+ else {
+ AVER(shield->unqueued > 0);
+ --shield->unqueued;
+ }
}
}
}
@@ -187,11 +206,22 @@ static void shieldSetPM(Shield shield, Seg seg, AccessSet mode)
if (SegIsSynced(seg)) {
SegSetPM(seg, mode);
++shield->unsynced;
+ if (seg->queued) {
+ AVER(shield->synced > 0);
+ --shield->synced;
+ } else
+ ++shield->unqueued;
} else {
SegSetPM(seg, mode);
if (SegIsSynced(seg)) {
AVER(shield->unsynced > 0);
--shield->unsynced;
+ if (seg->queued)
+ ++shield->synced;
+ else {
+ AVER(shield->unqueued > 0);
+ --shield->unqueued;
+ }
}
}
}
@@ -220,9 +250,11 @@ static void shieldSync(Shield shield, Seg seg)
SHIELD_AVERT_CRITICAL(Seg, seg);
if (!SegIsSynced(seg)) {
+ /* TODO: Could assert something about the unsync count going down. */
shieldSetPM(shield, seg, SegSM(seg));
ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg));
}
+ AVER_CRITICAL(SegIsSynced(seg));
}
@@ -233,15 +265,13 @@ static void shieldSync(Shield shield, Seg seg)
* .inv.unsynced.suspended.
*/
-static void shieldSuspend(Arena arena)
+static void shieldSuspend(Shield shield)
{
- Shield shield;
-
- AVERT(Arena, arena);
- shield = ArenaShield(arena);
+ AVERT(Shield, shield);
AVER(shield->inside);
if (!shield->suspended) {
+ Arena arena = ShieldArena(shield);
ThreadRingSuspend(ArenaThreadRing(arena), ArenaDeadRing(arena));
shield->suspended = TRUE;
}
@@ -255,11 +285,11 @@ static void shieldSuspend(Arena arena)
* when we must scan all thread registers at once.
*/
-void (ShieldHold)(Arena arena)
+void (ShieldHold)(Shield shield)
{
- AVERT(Arena, arena);
- shieldSuspend(arena);
- ++ArenaShield(arena)->holds;
+ AVERT(Shield, shield);
+ shieldSuspend(shield);
+ ++shield->holds;
}
@@ -269,15 +299,11 @@ void (ShieldHold)(Arena arena)
* this marks the earliest point at which we could resume.
*/
-void (ShieldRelease)(Arena arena)
+void (ShieldRelease)(Shield shield)
{
- Shield shield;
-
- AVERT(Arena, arena);
- shield = ArenaShield(arena);
+ AVERT(Shield, shield);
AVER(shield->inside);
AVER(shield->suspended);
-
AVER(shield->holds > 0);
--shield->holds;
@@ -318,6 +344,11 @@ static Seg shieldDequeue(Shield shield, Index i)
AVER(seg->queued);
shield->queue[i] = NULL; /* to ensure it can't get re-used */
seg->queued = FALSE;
+ if (SegIsSynced(seg)) {
+ AVER(shield->synced > 0);
+ --shield->synced;
+ } else
+ ++shield->unqueued;
return seg;
}
@@ -405,12 +436,16 @@ static void shieldFlushEntries(Shield shield)
shieldQueueEntryCompare, UNUSED_POINTER,
&shield->sortStruct);
+ /* Coalesce runs of segments that need the same protection mode. */
mode = AccessSetEMPTY;
limit = NULL;
for (i = 0; i < shield->limit; ++i) {
Seg seg = shieldDequeue(shield, i);
if (!SegIsSynced(seg)) {
shieldSetPM(shield, seg, SegSM(seg));
+ /* If the segment has a different mode from the previous run, or
+ it's not contiguous with the previous run, then set the OS
+ protection on the previous run, and start a new run. */
if (SegSM(seg) != mode || SegBase(seg) != limit) {
if (base != NULL) {
AVER(base < limit);
@@ -422,6 +457,7 @@ static void shieldFlushEntries(Shield shield)
limit = SegLimit(seg);
}
}
+ /* Set the OS protection on the last run, if there is one. */
if (base != NULL) {
AVER(base < limit);
ProtSet(base, limit, mode);
@@ -437,24 +473,24 @@ static void shieldFlushEntries(Shield shield)
* queued and the mutator is suspended.
*/
-static void shieldQueue(Arena arena, Seg seg)
+static void shieldQueue(Shield shield, Seg seg)
{
- Shield shield;
-
/* */
- AVERT_CRITICAL(Arena, arena);
- shield = ArenaShield(arena);
+ AVERT_CRITICAL(Shield, shield);
SHIELD_AVERT_CRITICAL(Seg, seg);
if (SegIsSynced(seg) || seg->queued)
return;
+ /* This segment is unsynced and not queued. */
+ AVER(shield->unqueued > 0);
+
if (SegIsExposed(seg)) {
/* This can occur if the mutator isn't suspended, we expose a
segment, then raise the shield on it. In this case, the
mutator isn't allowed to see the segment, but we don't need to
queue it until its covered. */
- shieldSuspend(arena);
+ shieldSuspend(shield);
return;
}
@@ -471,7 +507,7 @@ static void shieldQueue(Arena arena, Seg seg)
else
length = shield->length * 2;
- res = ControlAlloc(&p, arena, length * sizeof shield->queue[0]);
+ res = ControlAlloc(&p, ShieldArena(shield), length * sizeof shield->queue[0]);
if (res != ResOK) {
AVER(ResIsAllocFailure(res));
/* Carry on with the existing queue. */
@@ -480,7 +516,7 @@ static void shieldQueue(Arena arena, Seg seg)
Size oldSize = shield->length * sizeof shield->queue[0];
AVER(shield->queue != NULL);
mps_lib_memcpy(p, shield->queue, oldSize);
- ControlFree(arena, shield->queue, oldSize);
+ ControlFree(ShieldArena(shield), shield->queue, oldSize);
}
shield->queue = p;
shield->length = length;
@@ -519,6 +555,7 @@ static void shieldQueue(Arena arena, Seg seg)
shield->queue[shield->next] = seg;
++shield->next;
seg->queued = TRUE;
+ --shield->unqueued;
if (shield->next >= shield->limit)
shield->limit = shield->next;
@@ -531,39 +568,30 @@ static void shieldQueue(Arena arena, Seg seg)
* covered and the shield queue is unavailable.
*/
-void (ShieldRaise)(Arena arena, Seg seg, AccessSet mode)
+void (ShieldRaise)(Shield shield, Seg seg, AccessSet mode)
{
- Shield shield;
-
- SHIELD_AVERT(Arena, arena);
+ SHIELD_AVERT(Shield, shield);
SHIELD_AVERT(Seg, seg);
AVERT(AccessSet, mode);
- shield = ArenaShield(arena);
- AVER(!shield->queuePending);
- shield->queuePending = TRUE;
/* preserved */
- shieldSetSM(ArenaShield(arena), seg, BS_UNION(SegSM(seg), mode));
+ shieldSetSM(shield, seg, BS_UNION(SegSM(seg), mode));
/* Ensure and
*/
- shieldQueue(arena, seg);
- shield->queuePending = FALSE;
+ shieldQueue(shield, seg);
/* Check queue and segment consistency. */
- AVERT(Arena, arena);
+ AVERT(Shield, shield);
AVERT(Seg, seg);
}
/* ShieldLower -- declare segment may be accessed by mutator */
-void (ShieldLower)(Arena arena, Seg seg, AccessSet mode)
+void (ShieldLower)(Shield shield, Seg seg, AccessSet mode)
{
- Shield shield;
-
- AVERT(Arena, arena);
- shield = ArenaShield(arena);
+ AVERT(Shield, shield);
SHIELD_AVERT(Seg, seg);
AVERT(AccessSet, mode);
@@ -574,23 +602,20 @@ void (ShieldLower)(Arena arena, Seg seg, AccessSet mode)
/* TODO: Do we need to promptly call shieldProtLower here? It
loses the opportunity to coalesce the protection call. It would
violate . */
- /* shieldQueue(arena, seg); */
+ /* shieldQueue(shield, seg); */
shieldProtLower(shield, seg, mode);
/* Check queue and segment consistency. */
- AVERT(Arena, arena);
+ AVERT(Shield, shield);
AVERT(Seg, seg);
}
/* ShieldEnter -- enter the shield, allowing exposes */
-void (ShieldEnter)(Arena arena)
+void (ShieldEnter)(Shield shield)
{
- Shield shield;
-
- AVERT(Arena, arena);
- shield = ArenaShield(arena);
+ AVERT(Shield, shield);
AVER(!shield->inside);
AVER(shield->depth == 0);
AVER(!shield->suspended);
@@ -614,34 +639,46 @@ void (ShieldEnter)(Arena arena)
*/
#if defined(SHIELD_DEBUG)
-static void shieldDebugCheck(Arena arena)
+static void shieldDebugCheck(Shield shield)
{
- Shield shield;
+ Arena arena;
Seg seg;
Count queued = 0;
Count depth = 0;
+ Count unqueued = 0;
+ Count unsynced = 0;
+ Count synced = 0;
- AVERT(Arena, arena);
- shield = ArenaShield(arena);
+ AVERT(Shield, shield);
AVER(shield->inside || shield->limit == 0);
- if (SegFirst(&seg, arena))
+ arena = ShieldArena(shield);
+ if (SegFirst(&seg, arena)) {
do {
depth += SegDepth(seg);
if (shield->limit == 0) {
AVER(!seg->queued);
- AVER(SegIsSynced(seg));
+ AVER(shield->unsynced > 0 || SegIsSynced(seg));
/* You can directly set protections here to see if it makes a
difference. */
/* ProtSet(SegBase(seg), SegLimit(seg), SegPM(seg)); */
- } else {
- if (seg->queued)
- ++queued;
}
+ if (seg->queued)
+ ++queued;
+ if (!SegIsSynced(seg))
+ ++unsynced;
+ if (seg->queued && SegIsSynced(seg))
+ ++synced;
+ if (!seg->queued && !SegIsSynced(seg))
+ ++unqueued;
} while(SegNext(&seg, arena, seg));
+ }
AVER(depth == shield->depth);
AVER(queued == shield->limit);
+ AVER(unsynced == shield->unsynced);
+ AVER(unqueued == shield->unqueued);
+ AVER(synced == shield->synced);
}
#endif
@@ -662,42 +699,37 @@ static void shieldDebugCheck(Arena arena)
* .
*/
-void (ShieldFlush)(Arena arena)
+void (ShieldFlush)(Shield shield)
{
- Shield shield;
-
- AVERT(Arena, arena);
- shield = ArenaShield(arena);
+ AVERT(Shield, shield);
#ifdef SHIELD_DEBUG
- shieldDebugCheck(arena);
+ shieldDebugCheck(shield);
#endif
shieldFlushEntries(shield);
AVER(shield->unsynced == 0); /* everything back in sync */
#ifdef SHIELD_DEBUG
- shieldDebugCheck(arena);
+ shieldDebugCheck(shield);
#endif
}
/* ShieldLeave -- leave the shield, protect segs from mutator */
-void (ShieldLeave)(Arena arena)
+void (ShieldLeave)(Shield shield)
{
- Shield shield;
-
- AVERT(Arena, arena);
- shield = ArenaShield(arena);
+ AVERT(Shield, shield);
AVER(shield->inside);
AVER(shield->depth == 0); /* no pending covers */
AVER(shield->holds == 0);
- ShieldFlush(arena);
+ ShieldFlush(shield);
AVER(shield->unsynced == 0); /* everything back in sync */
/* Ensuring the mutator is running at this point guarantees
.inv.outside.running */
if (shield->suspended) {
+ Arena arena = ShieldArena(shield);
ThreadRingResume(ArenaThreadRing(arena), ArenaDeadRing(arena));
shield->suspended = FALSE;
}
@@ -713,14 +745,12 @@ void (ShieldLeave)(Arena arena)
* ensure the MPS has exclusive access.
*/
-void (ShieldExpose)(Arena arena, Seg seg)
+void (ShieldExpose)(Shield shield, Seg seg)
{
- Shield shield;
AccessSet mode = AccessREAD | AccessWRITE;
/* */
- AVERT_CRITICAL(Arena, arena);
- shield = ArenaShield(arena);
+ AVERT_CRITICAL(Shield, shield);
AVER_CRITICAL(shield->inside);
SegSetDepth(seg, SegDepth(seg) + 1);
@@ -729,7 +759,7 @@ void (ShieldExpose)(Arena arena, Seg seg)
AVER_CRITICAL(shield->depth > 0); /* overflow */
if (BS_INTER(SegPM(seg), mode) != AccessSetEMPTY)
- shieldSuspend(arena);
+ shieldSuspend(shield);
/* Ensure . */
/* TODO: Mass exposure -- see
@@ -740,13 +770,10 @@ void (ShieldExpose)(Arena arena, Seg seg)
/* ShieldCover -- declare MPS no longer needs access to seg */
-void (ShieldCover)(Arena arena, Seg seg)
+void (ShieldCover)(Shield shield, Seg seg)
{
- Shield shield;
-
/* */
- AVERT_CRITICAL(Arena, arena);
- shield = ArenaShield(arena);
+ AVERT_CRITICAL(Shield, shield);
AVERT_CRITICAL(Seg, seg);
AVER_CRITICAL(SegPM(seg) == AccessSetEMPTY);
@@ -756,13 +783,13 @@ void (ShieldCover)(Arena arena, Seg seg)
--shield->depth;
/* Ensure . */
- shieldQueue(arena, seg);
+ shieldQueue(shield, seg);
}
/* C. COPYRIGHT AND LICENSE
*
- * Copyright (C) 2001-2020 Ravenbrook Limited .
+ * Copyright (C) 2001-2023 Ravenbrook Limited .
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
diff --git a/code/trace.c b/code/trace.c
index 9507eb8f72..faccb5b727 100644
--- a/code/trace.c
+++ b/code/trace.c
@@ -191,20 +191,20 @@ Bool TraceCheck(Trace trace)
break;
case TraceUNFLIPPED:
- CHECKL(!RingIsSingle(&trace->genRing));
+ /* CHECKL(!RingIsSingle(&trace->genRing)); FIXME: Not true for walks and probably transforms */
CHECKL(!TraceSetIsMember(trace->arena->flippedTraces, trace));
/* @@@@ Assert that mutator is grey for trace. */
break;
case TraceFLIPPED:
- CHECKL(!RingIsSingle(&trace->genRing));
+ /* CHECKL(!RingIsSingle(&trace->genRing)); FIXME: Not true for walks and probably transforms */
CHECKL(TraceSetIsMember(trace->arena->flippedTraces, trace));
CHECKL(RankCheck(trace->band));
/* @@@@ Assert that mutator is black for trace. */
break;
case TraceRECLAIM:
- CHECKL(!RingIsSingle(&trace->genRing));
+ /* CHECKL(!RingIsSingle(&trace->genRing)); FIXME: Not true for walks and probably transforms */
CHECKL(TraceSetIsMember(trace->arena->flippedTraces, trace));
/* @@@@ Assert that grey set is empty for trace. */
break;
@@ -454,7 +454,7 @@ Res TraceCondemnEnd(double *mortalityReturn, Trace trace)
AVER(trace->state == TraceINIT);
AVER(trace->white == ZoneSetEMPTY);
- ShieldHold(trace->arena);
+ ShieldHold(ArenaShield(trace->arena));
RING_FOR(genNode, &trace->genRing, genNext) {
Size condemnedBefore, condemnedGen;
Ring segNode, segNext;
@@ -472,7 +472,7 @@ Res TraceCondemnEnd(double *mortalityReturn, Trace trace)
condemnedGen = trace->condemned - condemnedBefore;
casualtySize += (Size)((double)condemnedGen * gen->mortality);
}
- ShieldRelease(trace->arena);
+ ShieldRelease(ArenaShield(trace->arena));
if (TraceIsEmpty(trace))
return ResFAIL;
@@ -489,7 +489,7 @@ Res TraceCondemnEnd(double *mortalityReturn, Trace trace)
triggered. In that case, we'll have to recover here by blackening
the segments again. */
AVER(TraceIsEmpty(trace));
- ShieldRelease(trace->arena);
+ ShieldRelease(ArenaShield(trace->arena));
return res;
}
@@ -624,7 +624,7 @@ static Res traceFlip(Trace trace)
arena = trace->arena;
rfc.arena = arena;
- ShieldHold(arena);
+ ShieldHold(ArenaShield(arena));
AVER(trace->state == TraceUNFLIPPED);
AVER(!TraceSetIsMember(arena->flippedTraces, trace));
@@ -681,11 +681,11 @@ static Res traceFlip(Trace trace)
EVENT2(TraceFlipEnd, trace, arena);
- ShieldRelease(arena);
+ ShieldRelease(ArenaShield(arena));
return ResOK;
failRootFlip:
- ShieldRelease(arena);
+ ShieldRelease(ArenaShield(arena));
return res;
}
@@ -814,7 +814,7 @@ static void traceDestroyCommon(Trace trace)
GenDesc gen = GenDescOfTraceRing(node, trace);
GenDescEndTrace(gen, trace);
}
- RingFinish(&trace->genRing);
+ AVER(RingIsSingle(&trace->genRing));
/* Ensure that address space is returned to the operating system for
* traces that don't have any condemned objects (there might be
@@ -828,6 +828,8 @@ static void traceDestroyCommon(Trace trace)
* violating . */
ArenaSetEmergency(trace->arena, FALSE);
+ RingFinish(&trace->genRing);
+
trace->sig = SigInvalid;
trace->arena->busyTraces = TraceSetDel(trace->arena->busyTraces, trace);
trace->arena->flippedTraces = TraceSetDel(trace->arena->flippedTraces, trace);
@@ -1200,10 +1202,10 @@ static Res traceScanSegRes(TraceSet ts, Rank rank, Arena arena, Seg seg)
ScanStateInitSeg(ss, ts, arena, rank, white, seg);
/* Expose the segment to make sure we can scan it. */
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
res = SegScan(&wasTotal, seg, ss);
/* Cover, regardless of result */
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
traceSetUpdateCounts(ts, arena, ss, traceAccountingPhaseSegScan);
/* Count segments scanned pointlessly */
@@ -1471,7 +1473,7 @@ static Res traceScanSingleRefRes(TraceSet ts, Rank rank, Arena arena,
}
ScanStateInit(&ss, ts, arena, rank, white);
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
TRACE_SCAN_BEGIN(&ss) {
res = TRACE_FIX12(&ss, refIO);
@@ -1481,7 +1483,7 @@ static Res traceScanSingleRefRes(TraceSet ts, Rank rank, Arena arena,
summary = SegSummary(seg);
summary = RefSetAdd(arena, summary, *refIO);
SegSetSummary(seg, summary);
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
traceSetUpdateCounts(ts, arena, &ss, traceAccountingPhaseSingleScan);
ScanStateFinish(&ss);
diff --git a/code/tract.c b/code/tract.c
index baa01ef611..979916aa4b 100644
--- a/code/tract.c
+++ b/code/tract.c
@@ -324,7 +324,11 @@ Bool ChunkOfAddr(Chunk *chunkReturn, Arena arena, Addr addr)
Tree tree;
AVER_CRITICAL(chunkReturn != NULL);
- AVERT_CRITICAL(Arena, arena);
+
+ /* Avoid AVERT on arena because ChunkOfAddr is used by ArenaCheck
+ and it causes indefinite recursion in deep checking. */
+ AVER_CRITICAL(TESTT(Arena, arena));
+
/* addr is arbitrary */
if (TreeFind(&tree, ArenaChunkTree(arena), TreeKeyOfAddrVar(addr),
diff --git a/code/walk.c b/code/walk.c
index c1738d91a0..2af6d56f24 100644
--- a/code/walk.c
+++ b/code/walk.c
@@ -75,9 +75,9 @@ static void ArenaFormattedObjectsWalk(Arena arena, FormattedObjectsVisitor f,
if (SegFirst(&seg, arena)) {
do {
if (PoolFormat(&format, SegPool(seg))) {
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
SegWalk(seg, format, f, p, s);
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
}
} while(SegNext(&seg, arena, seg));
}
@@ -305,9 +305,12 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f,
/* operations of its own. */
res = TraceCreate(&trace, arena, TraceStartWhyWALK);
- /* Have to fail if no trace available. Unlikely due to .assume.parked. */
- if (res != ResOK)
+ if (res != ResOK) {
+ NOTREACHED; /* .assume.parked means a trace should be available */
return res;
+ }
+
+ /* Whiten everything. This step is equivalent to TraceCondemn. */
/* .roots-walk.first-stage: In order to fool MPS_FIX12 into calling
_mps_fix2 for a reference in a root, the reference must pass the
@@ -325,14 +328,17 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f,
} while (SegNext(&seg, arena, seg));
}
+ /* Start the trace. This step is equivalent to TraceStart. */
+
/* Make the roots grey so that they are scanned */
res = RootsIterate(arenaGlobals, rootWalkGrey, trace);
- /* Make this trace look like any other trace. */
- arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace);
+ trace->state = TraceUNFLIPPED;
+
+ /* Scan the roots. This step is equivalent to traceFlip. */
+
rootsStepClosureInit(rsc, arenaGlobals, trace, RootsWalkFix, f, p, s);
ss = rootsStepClosure2ScanState(rsc);
-
for(rank = RankMIN; rank < RankLIMIT; ++rank) {
ss->rank = rank;
AVERT(ScanState, ss);
@@ -340,16 +346,24 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f,
if (res != ResOK)
break;
}
+ rootsStepClosureFinish(rsc);
- /* Turn segments black again. */
+ trace->state = TraceFLIPPED;
+ arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace);
+
+ /* At this point in a normal trace we'd search and scan grey
+ segments, but there aren't any, so we can proceed directly to
+ reclaim. */
+ trace->state = TraceRECLAIM;
+
+ /* Turn segments black again. This is equivalent to traceReclaim,
+ but in this case nothing is reclaimed. */
if (SegFirst(&seg, arena)) {
do {
SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace));
} while (SegNext(&seg, arena, seg));
}
- rootsStepClosureFinish(rsc);
- /* Make this trace look like any other finished trace. */
trace->state = TraceFINISHED;
TraceDestroyFinished(trace);
AVER(!ArenaEmergency(arena)); /* There was no allocation. */
@@ -438,9 +452,10 @@ static Res poolWalk(Arena arena, Pool pool, mps_area_scan_t area_scan, void *clo
* white set means that the MPS_FIX1 test will always fail and
* _mps_fix2 will never be called. */
res = TraceCreate(&trace, arena, TraceStartWhyWALK);
- /* Fail if no trace available. Unlikely due to .assume.parked. */
- if (res != ResOK)
+ if (res != ResOK) {
+ NOTREACHED; /* .assume.parked means a trace should be available */
return res;
+ }
trace->white = ZoneSetEMPTY;
trace->state = TraceFLIPPED;
arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace);
@@ -461,9 +476,9 @@ static Res poolWalk(Arena arena, Pool pool, mps_area_scan_t area_scan, void *clo
ScanStateSetSummary(&ss, RefSetEMPTY);
/* Expose the segment to make sure we can scan it. */
- ShieldExpose(arena, seg);
+ ShieldExpose(ArenaShield(arena), seg);
res = SegScan(&wasTotal, seg, &ss);
- ShieldCover(arena, seg);
+ ShieldCover(ArenaShield(arena), seg);
if (needSummary)
ScanStateUpdateSummary(&ss, seg, res == ResOK && wasTotal);
diff --git a/design/tests.txt b/design/tests.txt
index eed0c4fc85..0c860702d0 100644
--- a/design/tests.txt
+++ b/design/tests.txt
@@ -311,6 +311,14 @@ least::
as defined by the `.ci.github.config`_.
+_`.ci.run.other.deep`: CI services also run the test-make-build-deep
+target (defined in `Makefile.in`_) in order to exercise
+`design.mps.check.level.deep`_ and ensure that it stays usable. This
+target is only run on GitHub's "ubuntu-latest" platform in order to
+save resources, but could be extended.
+
+.. _design.mps.check.level.deep: check.txt#level-deep
+
_`.ci.run.other.targets`: On some platforms we arrange to run the testansi,
testpollnone, testratio, and testscheme targets. [Need to explain
why, where, etc. RB 2023-01-15]