diff --git a/src/runtime/arena.go b/src/runtime/arena.go index 52a2a99d6cae87..2095bfa8e02d15 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -1051,7 +1051,11 @@ func (h *mheap) allocUserArenaChunk() *mspan { // Model the user arena as a heap span for a large object. spc := makeSpanClass(0, false) - h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages) + // A user arena chunk is always fresh from the OS. It's either newly allocated + // via sysAlloc() or reused from the readyList after a sysFault(). The memory is + // then re-mapped via sysMap(), so we can safely treat it as scavenged; the + // kernel guarantees it will be zero-filled on its next use. + h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages, userArenaChunkBytes) s.isUserArenaChunk = true s.elemsize -= userArenaChunkReserveBytes() s.freeindex = 1 diff --git a/src/runtime/mem.go b/src/runtime/mem.go index cd06ea323d8a7c..f0b00c77152c53 100644 --- a/src/runtime/mem.go +++ b/src/runtime/mem.go @@ -70,6 +70,12 @@ func sysUnused(v unsafe.Pointer, n uintptr) { sysUnusedOS(v, n) } +// needZeroAfterSysUnused reports whether memory returned by sysUnused must be +// zeroed for use. +func needZeroAfterSysUnused() bool { + return needZeroAfterSysUnusedOS() +} + // sysUsed transitions a memory region from Prepared to Ready. It notifies the // operating system that the memory region is needed and ensures that the region // may be safely accessed. This is typically a no-op on systems that don't have diff --git a/src/runtime/mem_aix.go b/src/runtime/mem_aix.go index c5e4710dacfb0f..1203af579726ad 100644 --- a/src/runtime/mem_aix.go +++ b/src/runtime/mem_aix.go @@ -79,3 +79,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { throw("runtime: cannot map pages in arena address space") } } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_bsd.go b/src/runtime/mem_bsd.go index 0c05b44c08f9c3..70375615da3fe0 100644 --- a/src/runtime/mem_bsd.go +++ b/src/runtime/mem_bsd.go @@ -85,3 +85,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { throw("runtime: cannot map pages in arena address space") } } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_darwin.go b/src/runtime/mem_darwin.go index 9d4de516228bf5..100512f5cdfa31 100644 --- a/src/runtime/mem_darwin.go +++ b/src/runtime/mem_darwin.go @@ -74,3 +74,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { throw("runtime: cannot map pages in arena address space") } } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index 24e006debca94a..ce25537611400e 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -188,3 +188,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, vmaName string) { sysNoHugePageOS(v, n) } } + +func needZeroAfterSysUnusedOS() bool { + return debug.madvdontneed == 0 +} diff --git a/src/runtime/mem_sbrk.go b/src/runtime/mem_sbrk.go index 5284bbd0009865..9e752df2c338b4 100644 --- a/src/runtime/mem_sbrk.go +++ b/src/runtime/mem_sbrk.go @@ -296,3 +296,7 @@ func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) { }) return unsafe.Pointer(p), size } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mem_windows.go b/src/runtime/mem_windows.go index 3db6fc2ba408fb..afc2dee19ff261 100644 --- a/src/runtime/mem_windows.go +++ b/src/runtime/mem_windows.go @@ -132,3 +132,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { } + +func needZeroAfterSysUnusedOS() bool { + return true +} diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index f2dc3717b1bd31..711c7790eb76bb 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1394,7 +1394,7 @@ HaveSpan: } // Initialize the span. - h.initSpan(s, typ, spanclass, base, npages) + h.initSpan(s, typ, spanclass, base, npages, scav) if valgrindenabled { valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize) @@ -1440,11 +1440,17 @@ HaveSpan: // initSpan initializes a blank span s which will represent the range // [base, base+npages*pageSize). typ is the type of span being allocated. -func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages uintptr) { +func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages, scav uintptr) { // At this point, both s != nil and base != 0, and the heap // lock is no longer held. Initialize the span. s.init(base, npages) - if h.allocNeedsZero(base, npages) { + // Always call allocNeedsZero to update the arena's zeroedBase watermark + // and determine if the memory is considered dirty. + needZero := h.allocNeedsZero(base, npages) + // If these pages were scavenged (returned to the OS), the kernel guarantees + // they will be zero-filled on next use (fault-in), so we can treat them as + // already zeroed and skip explicit clearing. + if (needZeroAfterSysUnused() || scav != npages*pageSize) && needZero { s.needzero = 1 } nbytes := npages * pageSize