Skip to content

Commit 2793728

Browse files
ioworker0thepudds
authored andcommitted
runtime: avoid zeroing scavenged memory
On Linux, memory returned to the kernel via MADV_DONTNEED is guaranteed to be zero-filled on its next use. This commit leverages this kernel behavior to avoid a redundant software zeroing pass in the runtime, improving performance. Change-Id: Ia14343b447a2cec7af87644fe8050e23e983c787 GitHub-Last-Rev: 6c8df32 GitHub-Pull-Request: #76063 Reviewed-on: https://go-review.googlesource.com/c/go/+/715160 LUCI-TryBot-Result: Go LUCI <[email protected]> Reviewed-by: Michael Knyszek <[email protected]> Reviewed-by: David Chase <[email protected]>
1 parent 89dee70 commit 2793728

File tree

9 files changed

+44
-4
lines changed

9 files changed

+44
-4
lines changed

src/runtime/arena.go

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1051,7 +1051,11 @@ func (h *mheap) allocUserArenaChunk() *mspan {
10511051

10521052
// Model the user arena as a heap span for a large object.
10531053
spc := makeSpanClass(0, false)
1054-
h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
1054+
// A user arena chunk is always fresh from the OS. It's either newly allocated
1055+
// via sysAlloc() or reused from the readyList after a sysFault(). The memory is
1056+
// then re-mapped via sysMap(), so we can safely treat it as scavenged; the
1057+
// kernel guarantees it will be zero-filled on its next use.
1058+
h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages, userArenaChunkBytes)
10551059
s.isUserArenaChunk = true
10561060
s.elemsize -= userArenaChunkReserveBytes()
10571061
s.freeindex = 1

src/runtime/mem.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,12 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
7070
sysUnusedOS(v, n)
7171
}
7272

73+
// needZeroAfterSysUnused reports whether memory returned by sysUnused must be
74+
// zeroed for use.
75+
func needZeroAfterSysUnused() bool {
76+
return needZeroAfterSysUnusedOS()
77+
}
78+
7379
// sysUsed transitions a memory region from Prepared to Ready. It notifies the
7480
// operating system that the memory region is needed and ensures that the region
7581
// may be safely accessed. This is typically a no-op on systems that don't have

src/runtime/mem_aix.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,3 +79,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
7979
throw("runtime: cannot map pages in arena address space")
8080
}
8181
}
82+
83+
func needZeroAfterSysUnusedOS() bool {
84+
return true
85+
}

src/runtime/mem_bsd.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,3 +85,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
8585
throw("runtime: cannot map pages in arena address space")
8686
}
8787
}
88+
89+
func needZeroAfterSysUnusedOS() bool {
90+
return true
91+
}

src/runtime/mem_darwin.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,3 +74,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
7474
throw("runtime: cannot map pages in arena address space")
7575
}
7676
}
77+
78+
func needZeroAfterSysUnusedOS() bool {
79+
return true
80+
}

src/runtime/mem_linux.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -188,3 +188,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, vmaName string) {
188188
sysNoHugePageOS(v, n)
189189
}
190190
}
191+
192+
func needZeroAfterSysUnusedOS() bool {
193+
return debug.madvdontneed == 0
194+
}

src/runtime/mem_sbrk.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -296,3 +296,7 @@ func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) {
296296
})
297297
return unsafe.Pointer(p), size
298298
}
299+
300+
func needZeroAfterSysUnusedOS() bool {
301+
return true
302+
}

src/runtime/mem_windows.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -132,3 +132,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
132132

133133
func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
134134
}
135+
136+
func needZeroAfterSysUnusedOS() bool {
137+
return true
138+
}

src/runtime/mheap.go

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1394,7 +1394,7 @@ HaveSpan:
13941394
}
13951395

13961396
// Initialize the span.
1397-
h.initSpan(s, typ, spanclass, base, npages)
1397+
h.initSpan(s, typ, spanclass, base, npages, scav)
13981398

13991399
if valgrindenabled {
14001400
valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize)
@@ -1440,11 +1440,17 @@ HaveSpan:
14401440

14411441
// initSpan initializes a blank span s which will represent the range
14421442
// [base, base+npages*pageSize). typ is the type of span being allocated.
1443-
func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages uintptr) {
1443+
func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages, scav uintptr) {
14441444
// At this point, both s != nil and base != 0, and the heap
14451445
// lock is no longer held. Initialize the span.
14461446
s.init(base, npages)
1447-
if h.allocNeedsZero(base, npages) {
1447+
// Always call allocNeedsZero to update the arena's zeroedBase watermark
1448+
// and determine if the memory is considered dirty.
1449+
needZero := h.allocNeedsZero(base, npages)
1450+
// If these pages were scavenged (returned to the OS), the kernel guarantees
1451+
// they will be zero-filled on next use (fault-in), so we can treat them as
1452+
// already zeroed and skip explicit clearing.
1453+
if (needZeroAfterSysUnused() || scav != npages*pageSize) && needZero {
14481454
s.needzero = 1
14491455
}
14501456
nbytes := npages * pageSize

0 commit comments

Comments
 (0)