Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/runtime/arena.go
Original file line number Diff line number Diff line change
Expand Up @@ -1051,7 +1051,11 @@ func (h *mheap) allocUserArenaChunk() *mspan {

// Model the user arena as a heap span for a large object.
spc := makeSpanClass(0, false)
h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
// A user arena chunk is always fresh from the OS. It's either newly allocated
// via sysAlloc() or reused from the readyList after a sysFault(). The memory is
// then re-mapped via sysMap(), so we can safely treat it as scavenged; the
// kernel guarantees it will be zero-filled on its next use.
h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages, userArenaChunkBytes)
s.isUserArenaChunk = true
s.elemsize -= userArenaChunkReserveBytes()
s.freeindex = 1
Expand Down
6 changes: 6 additions & 0 deletions src/runtime/mem.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,12 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
sysUnusedOS(v, n)
}

// needZeroAfterSysUnused reports whether memory returned by sysUnused must be
// zeroed for use.
func needZeroAfterSysUnused() bool {
return needZeroAfterSysUnusedOS()
}

// sysUsed transitions a memory region from Prepared to Ready. It notifies the
// operating system that the memory region is needed and ensures that the region
// may be safely accessed. This is typically a no-op on systems that don't have
Expand Down
4 changes: 4 additions & 0 deletions src/runtime/mem_aix.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,3 +79,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
throw("runtime: cannot map pages in arena address space")
}
}

func needZeroAfterSysUnusedOS() bool {
return true
}
4 changes: 4 additions & 0 deletions src/runtime/mem_bsd.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,3 +85,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
throw("runtime: cannot map pages in arena address space")
}
}

func needZeroAfterSysUnusedOS() bool {
return true
}
4 changes: 4 additions & 0 deletions src/runtime/mem_darwin.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,3 +74,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
throw("runtime: cannot map pages in arena address space")
}
}

func needZeroAfterSysUnusedOS() bool {
return true
}
4 changes: 4 additions & 0 deletions src/runtime/mem_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,3 +188,7 @@ func sysMapOS(v unsafe.Pointer, n uintptr, vmaName string) {
sysNoHugePageOS(v, n)
}
}

func needZeroAfterSysUnusedOS() bool {
return debug.madvdontneed == 0
}
4 changes: 4 additions & 0 deletions src/runtime/mem_sbrk.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,3 +296,7 @@ func sysReserveAlignedSbrk(size, align uintptr) (unsafe.Pointer, uintptr) {
})
return unsafe.Pointer(p), size
}

func needZeroAfterSysUnusedOS() bool {
return true
}
4 changes: 4 additions & 0 deletions src/runtime/mem_windows.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,3 +132,7 @@ func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {

func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
}

func needZeroAfterSysUnusedOS() bool {
return true
}
12 changes: 9 additions & 3 deletions src/runtime/mheap.go
Original file line number Diff line number Diff line change
Expand Up @@ -1394,7 +1394,7 @@ HaveSpan:
}

// Initialize the span.
h.initSpan(s, typ, spanclass, base, npages)
h.initSpan(s, typ, spanclass, base, npages, scav)

if valgrindenabled {
valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize)
Expand Down Expand Up @@ -1440,11 +1440,17 @@ HaveSpan:

// initSpan initializes a blank span s which will represent the range
// [base, base+npages*pageSize). typ is the type of span being allocated.
func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages uintptr) {
func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages, scav uintptr) {
// At this point, both s != nil and base != 0, and the heap
// lock is no longer held. Initialize the span.
s.init(base, npages)
if h.allocNeedsZero(base, npages) {
// Always call allocNeedsZero to update the arena's zeroedBase watermark
// and determine if the memory is considered dirty.
needZero := h.allocNeedsZero(base, npages)
// If these pages were scavenged (returned to the OS), the kernel guarantees
// they will be zero-filled on next use (fault-in), so we can treat them as
// already zeroed and skip explicit clearing.
if (needZeroAfterSysUnused() || scav != npages*pageSize) && needZero {
s.needzero = 1
}
nbytes := npages * pageSize
Expand Down