diff --git a/api/next/73794.txt b/api/next/73794.txt new file mode 100644 index 00000000000000..4018c149ecbecd --- /dev/null +++ b/api/next/73794.txt @@ -0,0 +1 @@ +pkg bytes, method (*Buffer) Peek(int) ([]uint8, error) #73794 diff --git a/doc/next/6-stdlib/99-minor/bytes/73794.md b/doc/next/6-stdlib/99-minor/bytes/73794.md new file mode 100644 index 00000000000000..a44dfc10e693a6 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/bytes/73794.md @@ -0,0 +1,2 @@ +The new [Buffer.Peek] method returns the next n bytes from the buffer without +advancing it. diff --git a/src/bytes/buffer.go b/src/bytes/buffer.go index 9684513942da88..3eb5b350c382c6 100644 --- a/src/bytes/buffer.go +++ b/src/bytes/buffer.go @@ -77,6 +77,18 @@ func (b *Buffer) String() string { return string(b.buf[b.off:]) } +// Peek returns the next n bytes without advancing the buffer. +// If Peek returns fewer than n bytes, it also returns [io.EOF]. +// The slice is only valid until the next call to a read or write method. +// The slice aliases the buffer content at least until the next buffer modification, +// so immediate changes to the slice will affect the result of future reads. +func (b *Buffer) Peek(n int) ([]byte, error) { + if b.Len() < n { + return b.buf[b.off:], io.EOF + } + return b.buf[b.off:n], nil +} + // empty reports whether the unread portion of the buffer is empty. func (b *Buffer) empty() bool { return len(b.buf) <= b.off } diff --git a/src/bytes/buffer_test.go b/src/bytes/buffer_test.go index b46ba1204eb806..5f5cc483b03f2d 100644 --- a/src/bytes/buffer_test.go +++ b/src/bytes/buffer_test.go @@ -531,6 +531,34 @@ func TestReadString(t *testing.T) { } } +var peekTests = []struct { + buffer string + n int + expected string + err error +}{ + {"", 0, "", nil}, + {"aaa", 3, "aaa", nil}, + {"foobar", 2, "fo", nil}, + {"a", 2, "a", io.EOF}, +} + +func TestPeek(t *testing.T) { + for _, test := range peekTests { + buf := NewBufferString(test.buffer) + bytes, err := buf.Peek(test.n) + if string(bytes) != test.expected { + t.Errorf("expected %q, got %q", test.expected, bytes) + } + if err != test.err { + t.Errorf("expected error %v, got %v", test.err, err) + } + if buf.Len() != len(test.buffer) { + t.Errorf("bad length after peek: %d, want %d", buf.Len(), len(test.buffer)) + } + } +} + func BenchmarkReadString(b *testing.B) { const n = 32 << 10 diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go index f18915c879e097..9547ede312fc0f 100644 --- a/src/bytes/bytes_test.go +++ b/src/bytes/bytes_test.go @@ -1224,7 +1224,7 @@ func TestMap(t *testing.T) { // Run a couple of awful growth/shrinkage tests a := tenRunes('a') - // 1. Grow. This triggers two reallocations in Map. + // 1. Grow. This triggers two reallocations in Map. maxRune := func(r rune) rune { return unicode.MaxRune } m := Map(maxRune, []byte(a)) expect := tenRunes(unicode.MaxRune) diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index 109a3d8316678b..ae10f347bba101 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -630,6 +630,8 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 FMOVS F1, 0x44332211(R2) // FMOVS F1, 1144201745(R2) FMOVD F1, 0x1007000(R2) // FMOVD F1, 16805888(R2) FMOVD F1, 0x44332211(R2) // FMOVD F1, 1144201745(R2) + FMOVQ F1, 0x1003000(R2) // FMOVQ F1, 16789504(R2) + FMOVQ F1, 0x44332211(R2) // FMOVQ F1, 1144201745(R2) MOVB 0x1000000(R1), R2 // MOVB 16777216(R1), R2 MOVB 0x44332211(R1), R2 // MOVB 1144201745(R1), R2 @@ -643,6 +645,8 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 FMOVS 0x44332211(R1), F2 // FMOVS 1144201745(R1), F2 FMOVD 0x1000000(R1), F2 // FMOVD 16777216(R1), F2 FMOVD 0x44332211(R1), F2 // FMOVD 1144201745(R1), F2 + FMOVQ 0x1000000(R1), F2 // FMOVQ 16777216(R1), F2 + FMOVQ 0x44332211(R1), F2 // FMOVQ 1144201745(R1), F2 // shifted or extended register offset. MOVD (R2)(R6.SXTW), R4 // 44c866f8 diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s index 6e2a86969d54cb..e0619f8ecddd4c 100644 --- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s +++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s @@ -533,12 +533,18 @@ lable2: XVMOVQ X28.V[3], X8 // 88ef0377 XVMOVQ X27.V[0], X9 // 69e30377 - //Move vector element to vector. + // Move vector element to vector. VMOVQ V1.B[3], V9.B16 // 298cf772 VMOVQ V2.H[2], V8.H8 // 48c8f772 VMOVQ V3.W[1], V7.W4 // 67e4f772 VMOVQ V4.V[0], V6.V2 // 86f0f772 + // Move vector register to vector register. + VMOVQ V1, V9 // 29002d73 + VMOVQ V2, V8 // 48002d73 + XVMOVQ X3, X7 // 67002d77 + XVMOVQ X4, X6 // 86002d77 + // Load data from memory and broadcast to each element of a vector register: VMOVQ offset(Rj), . VMOVQ (R4), V0.B16 // 80008030 VMOVQ 1(R4), V0.B16 // 80048030 @@ -1017,6 +1023,12 @@ lable2: XVSHUF4IV $8, X1, X2 // 22209c77 XVSHUF4IV $15, X1, X2 // 223c9c77 + // VPERMIW, XVPERMI{W,V,Q} instructions + VPERMIW $0x1B, V1, V2 // VPERMIW $27, V1, V2 // 226ce473 + XVPERMIW $0x2B, X1, X2 // XVPERMIW $43, X1, X2 // 22ace477 + XVPERMIV $0x3B, X1, X2 // XVPERMIV $59, X1, X2 // 22ece877 + XVPERMIQ $0x4B, X1, X2 // XVPERMIQ $75, X1, X2 // 222ced77 + // [X]VSETEQZ.V, [X]VSETNEZ.V VSETEQV V1, FCC0 // 20989c72 VSETNEV V1, FCC0 // 209c9c72 diff --git a/src/cmd/cgo/ast.go b/src/cmd/cgo/ast.go index 861479db7acf5b..97b18cd22d4d4c 100644 --- a/src/cmd/cgo/ast.go +++ b/src/cmd/cgo/ast.go @@ -363,7 +363,8 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa // everything else just recurs default: - f.walkUnexpected(x, context, visit) + error_(token.NoPos, "unexpected type %T in walk", x) + panic("unexpected type") case nil: @@ -396,6 +397,9 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa case *ast.IndexExpr: f.walk(&n.X, ctxExpr, visit) f.walk(&n.Index, ctxExpr, visit) + case *ast.IndexListExpr: + f.walk(&n.X, ctxExpr, visit) + f.walk(n.Indices, ctxExpr, visit) case *ast.SliceExpr: f.walk(&n.X, ctxExpr, visit) if n.Low != nil { @@ -434,8 +438,8 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa case *ast.StructType: f.walk(n.Fields, ctxField, visit) case *ast.FuncType: - if tparams := funcTypeTypeParams(n); tparams != nil { - f.walk(tparams, ctxParam, visit) + if n.TypeParams != nil { + f.walk(n.TypeParams, ctxParam, visit) } f.walk(n.Params, ctxParam, visit) if n.Results != nil { @@ -524,8 +528,8 @@ func (f *File) walk(x interface{}, context astContext, visit func(*File, interfa f.walk(n.Values, ctxExpr, visit) } case *ast.TypeSpec: - if tparams := typeSpecTypeParams(n); tparams != nil { - f.walk(tparams, ctxParam, visit) + if n.TypeParams != nil { + f.walk(n.TypeParams, ctxParam, visit) } f.walk(&n.Type, ctxType, visit) diff --git a/src/cmd/cgo/ast_go1.go b/src/cmd/cgo/ast_go1.go deleted file mode 100644 index 2f65f0f718356f..00000000000000 --- a/src/cmd/cgo/ast_go1.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build compiler_bootstrap - -package main - -import ( - "go/ast" - "go/token" -) - -func (f *File) walkUnexpected(x interface{}, context astContext, visit func(*File, interface{}, astContext)) { - error_(token.NoPos, "unexpected type %T in walk", x) - panic("unexpected type") -} - -func funcTypeTypeParams(n *ast.FuncType) *ast.FieldList { - return nil -} - -func typeSpecTypeParams(n *ast.TypeSpec) *ast.FieldList { - return nil -} diff --git a/src/cmd/cgo/ast_go118.go b/src/cmd/cgo/ast_go118.go deleted file mode 100644 index ced30728dc9a79..00000000000000 --- a/src/cmd/cgo/ast_go118.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !compiler_bootstrap - -package main - -import ( - "go/ast" - "go/token" -) - -func (f *File) walkUnexpected(x interface{}, context astContext, visit func(*File, interface{}, astContext)) { - switch n := x.(type) { - default: - error_(token.NoPos, "unexpected type %T in walk", x) - panic("unexpected type") - - case *ast.IndexListExpr: - f.walk(&n.X, ctxExpr, visit) - f.walk(n.Indices, ctxExpr, visit) - } -} - -func funcTypeTypeParams(n *ast.FuncType) *ast.FieldList { - return n.TypeParams -} - -func typeSpecTypeParams(n *ast.TypeSpec) *ast.FieldList { - return n.TypeParams -} diff --git a/src/cmd/cgo/doc.go b/src/cmd/cgo/doc.go index ef5272299bbf07..7e8486874ef142 100644 --- a/src/cmd/cgo/doc.go +++ b/src/cmd/cgo/doc.go @@ -127,7 +127,7 @@ environment variable when running the go tool: set it to 1 to enable the use of cgo, and to 0 to disable it. The go tool will set the build constraint "cgo" if cgo is enabled. The special import "C" implies the "cgo" build constraint, as though the file also said -"//go:build cgo". Therefore, if cgo is disabled, files that import +"//go:build cgo". Therefore, if cgo is disabled, files that import "C" will not be built by the go tool. (For more about build constraints see https://golang.org/pkg/go/build/#hdr-Build_Constraints). diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index 886ddf2d461b65..d1b629057ab4a8 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -1056,7 +1056,7 @@ func (p *Package) rewriteCall(f *File, call *Call) (string, bool) { func (p *Package) needsPointerCheck(f *File, t ast.Expr, arg ast.Expr) bool { // An untyped nil does not need a pointer check, and when // _cgoCheckPointer returns the untyped nil the type assertion we - // are going to insert will fail. Easier to just skip nil arguments. + // are going to insert will fail. Easier to just skip nil arguments. // TODO: Note that this fails if nil is shadowed. if id, ok := arg.(*ast.Ident); ok && id.Name == "nil" { return false @@ -3010,7 +3010,7 @@ func (c *typeConv) FuncType(dtype *dwarf.FuncType, pos token.Pos) *FuncType { for i, f := range dtype.ParamType { // gcc's DWARF generator outputs a single DotDotDotType parameter for // function pointers that specify no parameters (e.g. void - // (*__cgo_0)()). Treat this special case as void. This case is + // (*__cgo_0)()). Treat this special case as void. This case is // invalid according to ISO C anyway (i.e. void (*__cgo_1)(...) is not // legal). if _, ok := f.(*dwarf.DotDotDotType); ok && i == 0 { @@ -3081,7 +3081,7 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct off := int64(0) // Rename struct fields that happen to be named Go keywords into - // _{keyword}. Create a map from C ident -> Go ident. The Go ident will + // _{keyword}. Create a map from C ident -> Go ident. The Go ident will // be mangled. Any existing identifier that already has the same name on // the C-side will cause the Go-mangled version to be prefixed with _. // (e.g. in a struct with fields '_type' and 'type', the latter would be @@ -3309,7 +3309,7 @@ func godefsFields(fld []*ast.Field) { // fieldPrefix returns the prefix that should be removed from all the // field names when generating the C or Go code. For generated // C, we leave the names as is (tv_sec, tv_usec), since that's what -// people are used to seeing in C. For generated Go code, such as +// people are used to seeing in C. For generated Go code, such as // package syscall's data structures, we drop a common prefix // (so sec, usec, which will get turned into Sec, Usec for exporting). func fieldPrefix(fld []*ast.Field) string { @@ -3456,7 +3456,7 @@ func (c *typeConv) badCFType(dt *dwarf.TypedefType) bool { // Tagged pointer support // Low-bit set means tagged object, next 3 bits (currently) // define the tagged object class, next 4 bits are for type -// information for the specific tagged object class. Thus, +// information for the specific tagged object class. Thus, // the low byte is for type info, and the rest of a pointer // (32 or 64-bit) is for payload, whatever the tagged class. // diff --git a/src/cmd/cgo/internal/test/buildid_linux.go b/src/cmd/cgo/internal/test/buildid_linux.go index 84d3edb664eb25..7e0fd0fd126a02 100644 --- a/src/cmd/cgo/internal/test/buildid_linux.go +++ b/src/cmd/cgo/internal/test/buildid_linux.go @@ -4,9 +4,9 @@ package cgotest -// Test that we have no more than one build ID. In the past we used +// Test that we have no more than one build ID. In the past we used // to generate a separate build ID for each package using cgo, and the -// linker concatenated them all. We don't want that--we only want +// linker concatenated them all. We don't want that--we only want // one. import ( @@ -42,7 +42,7 @@ sections: for len(d) > 0 { // ELF standards differ as to the sizes in - // note sections. Both the GNU linker and + // note sections. Both the GNU linker and // gold always generate 32-bit sizes, so that // is what we assume here. diff --git a/src/cmd/cgo/internal/test/callback.go b/src/cmd/cgo/internal/test/callback.go index 478bf8294af3a5..8f8dd8fded6f15 100644 --- a/src/cmd/cgo/internal/test/callback.go +++ b/src/cmd/cgo/internal/test/callback.go @@ -40,7 +40,7 @@ func nestedCall(f func()) { callbackMutex.Unlock() // Pass the address of i because the C function was written to - // take a pointer. We could pass an int if we felt like + // take a pointer. We could pass an int if we felt like // rewriting the C code. C.callback(unsafe.Pointer(&i)) diff --git a/src/cmd/cgo/internal/test/gcc68255/a.go b/src/cmd/cgo/internal/test/gcc68255/a.go index e106dee3ec023d..cc4804b90bd122 100644 --- a/src/cmd/cgo/internal/test/gcc68255/a.go +++ b/src/cmd/cgo/internal/test/gcc68255/a.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Test that it's OK to have C code that does nothing other than -// initialize a global variable. This used to fail with gccgo. +// initialize a global variable. This used to fail with gccgo. package gcc68255 diff --git a/src/cmd/cgo/internal/teststdio/testdata/fib.go b/src/cmd/cgo/internal/teststdio/testdata/fib.go index 96173683353151..69147880c20df2 100644 --- a/src/cmd/cgo/internal/teststdio/testdata/fib.go +++ b/src/cmd/cgo/internal/teststdio/testdata/fib.go @@ -5,7 +5,7 @@ //go:build test_run // Compute Fibonacci numbers with two goroutines -// that pass integers back and forth. No actual +// that pass integers back and forth. No actual // concurrency, just threads and synchronization // and foreign code on multiple pthreads. diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index 5e08427daf9cc2..955d64b9569406 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -72,8 +72,8 @@ type File struct { ExpFunc []*ExpFunc // exported functions for this file Name map[string]*Name // map from Go name to Name NamePos map[*Name]token.Pos // map from Name to position of the first reference - NoCallbacks map[string]bool // C function names that with #cgo nocallback directive - NoEscapes map[string]bool // C function names that with #cgo noescape directive + NoCallbacks map[string]bool // C function names with #cgo nocallback directive + NoEscapes map[string]bool // C function names with #cgo noescape directive Edit *edit.Buffer debugs []*debug // debug data from iterations of gccDebug. Initialized by File.loadDebug. diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index a2bcdf89c5ad44..394e766d4e5328 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -1144,6 +1144,10 @@ func (p *Package) writeExports(fgo2, fm, fgcc, fgcch io.Writer) { if !p.hasPointer(nil, atype, false) { return } + + // Use the export'ed file/line in error messages. + pos := fset.Position(exp.Func.Pos()) + fmt.Fprintf(fgo2, "//line %s:%d\n", pos.Filename, pos.Line) fmt.Fprintf(fgo2, "\t_cgoCheckResult(a.r%d)\n", i) }) } diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules index 49bdbc875fc3cd..31829a5eed7d0f 100644 --- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules +++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules @@ -823,16 +823,16 @@ (F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z) // Test for -∞ (bit 0) using 64 bit classify instruction. -(FLTD x (FMOVDconst [c])) && float64ExactBits(c, -math.MaxFloat64) => (ANDI [1] (FCLASSD x)) -(FLED (FMOVDconst [c]) x) && float64ExactBits(c, -math.MaxFloat64) => (SNEZ (ANDI [0xff &^ 1] (FCLASSD x))) -(FEQD x (FMOVDconst [c])) && float64ExactBits(c, math.Inf(-1)) => (ANDI [1] (FCLASSD x)) -(FNED x (FMOVDconst [c])) && float64ExactBits(c, math.Inf(-1)) => (SEQZ (ANDI [1] (FCLASSD x))) +(FLTD x (FMOVDconst [-math.MaxFloat64])) => (ANDI [0b00_0000_0001] (FCLASSD x)) +(FLED (FMOVDconst [-math.MaxFloat64]) x) => (SNEZ (ANDI [0b00_1111_1110] (FCLASSD x))) +(FEQD x (FMOVDconst [math.Inf(-1)])) => (ANDI [0b00_0000_0001] (FCLASSD x)) +(FNED x (FMOVDconst [math.Inf(-1)])) => (SEQZ (ANDI [0b00_0000_0001] (FCLASSD x))) // Test for +∞ (bit 7) using 64 bit classify instruction. -(FLTD (FMOVDconst [c]) x) && float64ExactBits(c, math.MaxFloat64) => (SNEZ (ANDI [1<<7] (FCLASSD x))) -(FLED x (FMOVDconst [c])) && float64ExactBits(c, math.MaxFloat64) => (SNEZ (ANDI [0xff &^ (1<<7)] (FCLASSD x))) -(FEQD x (FMOVDconst [c])) && float64ExactBits(c, math.Inf(1)) => (SNEZ (ANDI [1<<7] (FCLASSD x))) -(FNED x (FMOVDconst [c])) && float64ExactBits(c, math.Inf(1)) => (SEQZ (ANDI [1<<7] (FCLASSD x))) +(FLTD (FMOVDconst [math.MaxFloat64]) x) => (SNEZ (ANDI [0b00_1000_0000] (FCLASSD x))) +(FLED x (FMOVDconst [math.MaxFloat64])) => (SNEZ (ANDI [0b00_0111_1111] (FCLASSD x))) +(FEQD x (FMOVDconst [math.Inf(1)])) => (SNEZ (ANDI [0b00_1000_0000] (FCLASSD x))) +(FNED x (FMOVDconst [math.Inf(1)])) => (SEQZ (ANDI [0b00_1000_0000] (FCLASSD x))) // // Optimisations for rva22u64 and above. diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules index f632a01109f764..60281522539c73 100644 --- a/src/cmd/compile/internal/ssa/_gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +(Last ___) => v.Args[len(v.Args)-1] + // Lowering arithmetic (Add(64|32|16|8|Ptr) ...) => (I64Add ...) (Add(64|32)F ...) => (F(64|32)Add ...) @@ -44,6 +46,37 @@ (Not ...) => (I64Eqz ...) +(Avg64u x y) => (I64Add (I64ShrU (I64Sub x y) (I64Const [1])) y) + +// High word of multiply without carry bits; see Hacker's Delight, 2nd. ed, Figure 8-2, p. 174. +(Hmul64 x y) => + (Last + x0: (ZeroExt32to64 x) + x1: (I64ShrS x (I64Const [32])) + y0: (ZeroExt32to64 y) + y1: (I64ShrS y (I64Const [32])) + x0y0: (I64Mul x0 y0) + tt: (I64Add (I64Mul x1 y0) (I64ShrU x0y0 (I64Const [32]))) + w1: (I64Add (I64Mul x0 y1) (ZeroExt32to64 tt)) + w2: (I64ShrS tt (I64Const [32])) + (I64Add (I64Add (I64Mul x1 y1) w2) (I64ShrS w1 (I64Const [32])))) + +// Same as Hmul64 but signed shifts now unsigned. +(Hmul64u x y) => + (Last + x0: (ZeroExt32to64 x) + x1: (I64ShrU x (I64Const [32])) + y0: (ZeroExt32to64 y) + y1: (I64ShrU y (I64Const [32])) + w0: (I64Mul x0 y0) + tt: (I64Add (I64Mul x1 y0) (I64ShrU w0 (I64Const [32]))) + w1: (I64Add (I64Mul x0 y1) (ZeroExt32to64 tt)) + w2: (I64ShrU tt (I64Const [32])) + hi: (I64Add (I64Add (I64Mul x1 y1) w2) (I64ShrU w1 (I64Const [32])))) + +(Select0 (Mul64uhilo x y)) => (Hmul64u x y) +(Select1 (Mul64uhilo x y)) => (I64Mul x y) + // Lowering pointer arithmetic (OffPtr ...) => (I64AddConst ...) diff --git a/src/cmd/compile/internal/ssa/_gen/divmod.rules b/src/cmd/compile/internal/ssa/_gen/divmod.rules index 21e0a194068df8..7dd7d245bd0260 100644 --- a/src/cmd/compile/internal/ssa/_gen/divmod.rules +++ b/src/cmd/compile/internal/ssa/_gen/divmod.rules @@ -79,17 +79,9 @@ // The magic number m for c is ⌈2^k/c⌉, so we can use // (m+1)/2 = ⌈2^k/(c/2)⌉ instead. // -// 8. An unsigned divide on systems with an avg instruction. +// 8. A general unsigned divide using an avg instruction. // We noted above that (x*((1<>N>>s = ((x*m)>>N+x)>>s. // Let hi = (x*m)>>N, so we want (hi+x) >> s = avg(hi, x) >> (s-1). -// -// 9. Unsigned 64-bit divide by 16-bit constant on 32-bit systems. -// Use long division with 16-bit digits. -// -// Note: All systems have Hmul and Avg except for wasm, and the -// wasm JITs may well apply all these optimizations already anyway, -// so it may be worth looking into avoiding this pass entirely on wasm -// and dropping all the useAvg useHmul uncertainty. // Case 1. Signed divides where 2N ≤ register size. (Div8 x (Const8 [c])) && smagicOK8(c) => @@ -112,13 +104,13 @@ (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) // Case 2. Signed divides where m is even. -(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul => +(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 => (Sub32 (Rsh32x64 (Hmul32 x (Const32 [int32(smagic32(c).m/2)])) (Const64 [smagic32(c).s - 1])) (Rsh32x64 x (Const64 [31]))) -(Div64 x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul => +(Div64 x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 == 0 => (Sub64 (Rsh64x64 (Hmul64 x (Const64 [int64(smagic64(c).m/2)])) @@ -126,13 +118,13 @@ (Rsh64x64 x (Const64 [63]))) // Case 3. Signed divides where m is odd. -(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul => +(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 => (Sub32 (Rsh32x64 (Add32 x (Hmul32 x (Const32 [int32(smagic32(c).m)]))) (Const64 [smagic32(c).s])) (Rsh32x64 x (Const64 [31]))) -(Div64 x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul => +(Div64 x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 != 0 => (Sub64 (Rsh64x64 (Add64 x (Hmul64 x (Const64 [int64(smagic64(c).m)]))) @@ -149,11 +141,11 @@ (Rsh64Ux64 (Mul64 (SignExt32to64 x) (Const64 [int64(smagic32(c).m)])) (Const64 [32 + smagic32(c).s])) -(Div32u x (Const32 [c])) && t.IsSigned() && smagicOK32(c) && config.RegSize == 4 && config.useHmul => +(Div32u x (Const32 [c])) && t.IsSigned() && smagicOK32(c) && config.RegSize == 4 => (Rsh32Ux64 (Hmul32u x (Const32 [int32(smagic32(c).m)])) (Const64 [smagic32(c).s])) -(Div64u x (Const64 [c])) && t.IsSigned() && smagicOK64(c) && config.useHmul => +(Div64u x (Const64 [c])) && t.IsSigned() && smagicOK64(c) => (Rsh64Ux64 (Hmul64u x (Const64 [int64(smagic64(c).m)])) (Const64 [smagic64(c).s])) @@ -181,11 +173,11 @@ (Rsh64Ux64 (Mul64 (ZeroExt32to64 x) (Const64 [int64(1<<31 + umagic32(c).m/2)])) (Const64 [32 + umagic32(c).s - 1]))) -(Div32u x (Const32 [c])) && umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 4 && config.useHmul => +(Div32u x (Const32 [c])) && umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 4 => (Rsh32Ux64 (Hmul32u x (Const32 [int32(1<<31 + umagic32(c).m/2)])) (Const64 [umagic32(c).s - 1])) -(Div64u x (Const64 [c])) && umagicOK64(c) && umagic64(c).m&1 == 0 && config.useHmul => +(Div64u x (Const64 [c])) && umagicOK64(c) && umagic64(c).m&1 == 0 => (Rsh64Ux64 (Hmul64u x (Const64 [int64(1<<63 + umagic64(c).m/2)])) (Const64 [umagic64(c).s - 1])) @@ -205,39 +197,39 @@ (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1])) (Const64 [int64(1<<31 + (umagic32(c).m+1)/2)])) (Const64 [32 + umagic32(c).s - 2]))) -(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul => +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && c&1 == 0 => (Rsh32Ux64 (Hmul32u (Rsh32Ux64 x (Const64 [1])) (Const32 [int32(1<<31 + (umagic32(c).m+1)/2)])) (Const64 [umagic32(c).s - 2])) -(Div64u x (Const64 [c])) && umagicOK64(c) && c&1 == 0 && config.useHmul => +(Div64u x (Const64 [c])) && umagicOK64(c) && c&1 == 0 => (Rsh64Ux64 (Hmul64u (Rsh64Ux64 x (Const64 [1])) (Const64 [int64(1<<63 + (umagic64(c).m+1)/2)])) (Const64 [umagic64(c).s - 2])) -// Case 8. Unsigned divide on systems with avg. -(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && config.useAvg => +// Case 8. Unsigned divide using avg. +(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 => (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) (Mul32 (ZeroExt16to32 x) (Const32 [int32(umagic16(c).m)]))) (Const64 [16 + umagic16(c).s - 1]))) -(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && config.useAvg => +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 => (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) (Mul64 (ZeroExt32to64 x) (Const64 [int64(umagic32(c).m)]))) (Const64 [32 + umagic32(c).s - 1]))) -(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul => +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 => (Rsh32Ux64 (Avg32u x (Hmul32u x (Const32 [int32(umagic32(c).m)]))) (Const64 [umagic32(c).s - 1])) -(Div64u x (Const64 [c])) && umagicOK64(c) && config.useAvg && config.useHmul => +(Div64u x (Const64 [c])) && umagicOK64(c) => (Rsh64Ux64 (Avg64u x (Hmul64u x (Const64 [int64(umagic64(c).m)]))) (Const64 [umagic64(c).s - 1])) diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 819d77e420a783..ec0240941cfa21 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -41,8 +41,6 @@ type Config struct { hasGReg bool // has hardware g register ctxt *obj.Link // Generic arch information optimize bool // Do optimization - useAvg bool // Use optimizations that need Avg* operations - useHmul bool // Use optimizations that need Hmul* operations SoftFloat bool // Race bool // race detector enabled BigEndian bool // @@ -168,8 +166,6 @@ type Frontend interface { // NewConfig returns a new configuration object for the given architecture. func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat bool) *Config { c := &Config{arch: arch, Types: types} - c.useAvg = true - c.useHmul = true switch arch { case "amd64": c.PtrSize = 8 @@ -359,8 +355,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo c.FPReg = framepointerRegWasm c.LinkReg = linkRegWasm c.hasGReg = true - c.useAvg = false - c.useHmul = false c.unalignedOK = true c.haveCondSelect = true default: diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index d0adff788c0a4f..cdf290e2aa8e4f 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -203,9 +203,27 @@ func (sr shadowRange) merge(lo, hi int64) shadowRange { // reaches stores then we delete all the stores. The other operations will then // be eliminated by the dead code elimination pass. func elimDeadAutosGeneric(f *Func) { - addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches - elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is - var used ir.NameSet // used autos that must be kept + addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches + elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is + move := make(map[*ir.Name]ir.NameSet) // for a (Move &y &x _) and y is unused, move[y].Add(x) + var used ir.NameSet // used autos that must be kept + + // Adds a name to used and, when it is the target of a move, also + // propagates the used state to its source. + var usedAdd func(n *ir.Name) bool + usedAdd = func(n *ir.Name) bool { + if used.Has(n) { + return false + } + used.Add(n) + if s := move[n]; s != nil { + delete(move, n) + for n := range s { + usedAdd(n) + } + } + return true + } // visit the value and report whether any of the maps are updated visit := func(v *Value) (changed bool) { @@ -244,10 +262,7 @@ func elimDeadAutosGeneric(f *Func) { if !ok || (n.Class != ir.PAUTO && !isABIInternalParam(f, n)) { return } - if !used.Has(n) { - used.Add(n) - changed = true - } + changed = usedAdd(n) || changed return case OpStore, OpMove, OpZero: // v should be eliminated if we eliminate the auto. @@ -279,10 +294,22 @@ func elimDeadAutosGeneric(f *Func) { if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil { for _, a := range args { if n, ok := addr[a]; ok { - if !used.Has(n) { - used.Add(n) - changed = true + // If the addr of n is used by an OpMove as its source arg, + // and the OpMove's target arg is the addr of a unused name, + // then temporarily treat n as unused, and record in move map. + if nam, ok := elim[v]; ok && v.Op == OpMove && !used.Has(nam) { + if used.Has(n) { + continue + } + s := move[nam] + if s == nil { + s = ir.NameSet{} + move[nam] = s + } + s.Add(n) + continue } + changed = usedAdd(n) || changed } } return @@ -291,17 +318,21 @@ func elimDeadAutosGeneric(f *Func) { // Propagate any auto addresses through v. var node *ir.Name for _, a := range args { - if n, ok := addr[a]; ok && !used.Has(n) { + if n, ok := addr[a]; ok { if node == nil { - node = n - } else if node != n { + if !used.Has(n) { + node = n + } + } else { + if node == n { + continue + } // Most of the time we only see one pointer // reaching an op, but some ops can take // multiple pointers (e.g. NeqPtr, Phi etc.). // This is rare, so just propagate the first // value to keep things simple. - used.Add(n) - changed = true + changed = usedAdd(n) || changed } } } @@ -316,8 +347,7 @@ func elimDeadAutosGeneric(f *Func) { } if addr[v] != node { // This doesn't happen in practice, but catch it just in case. - used.Add(node) - changed = true + changed = usedAdd(node) || changed } return } @@ -336,9 +366,8 @@ func elimDeadAutosGeneric(f *Func) { } // keep the auto if its address reaches a control value for _, c := range b.ControlValues() { - if n, ok := addr[c]; ok && !used.Has(n) { - used.Add(n) - changed = true + if n, ok := addr[c]; ok { + changed = usedAdd(n) || changed } } } diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 0009de4fa69608..b5174acbc99ccc 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -119,10 +119,12 @@ import ( "cmd/compile/internal/types" "cmd/internal/src" "cmd/internal/sys" + "cmp" "fmt" "internal/buildcfg" "math" "math/bits" + "slices" "unsafe" ) @@ -1013,9 +1015,11 @@ func (s *regAllocState) regalloc(f *Func) { // Initialize regValLiveSet and uses fields for this block. // Walk backwards through the block doing liveness analysis. regValLiveSet.clear() - for _, e := range s.live[b.ID] { - s.addUse(e.ID, int32(len(b.Values))+e.dist, e.pos) // pseudo-uses from beyond end of block - regValLiveSet.add(e.ID) + if s.live != nil { + for _, e := range s.live[b.ID] { + s.addUse(e.ID, int32(len(b.Values))+e.dist, e.pos) // pseudo-uses from beyond end of block + regValLiveSet.add(e.ID) + } } for _, v := range b.ControlValues() { if s.values[v.ID].needReg { @@ -1335,7 +1339,9 @@ func (s *regAllocState) regalloc(f *Func) { } // Load static desired register info at the end of the block. - desired.copy(&s.desired[b.ID]) + if s.desired != nil { + desired.copy(&s.desired[b.ID]) + } // Check actual assigned registers at the start of the next block(s). // Dynamically assigned registers will trump the static @@ -1377,7 +1383,7 @@ func (s *regAllocState) regalloc(f *Func) { } } // Walk values backwards computing desired register info. - // See computeLive for more comments. + // See computeDesired for more comments. for i := len(oldSched) - 1; i >= 0; i-- { v := oldSched[i] prefs := desired.remove(v.ID) @@ -2044,8 +2050,10 @@ func (s *regAllocState) regalloc(f *Func) { if checkEnabled { regValLiveSet.clear() - for _, x := range s.live[b.ID] { - regValLiveSet.add(x.ID) + if s.live != nil { + for _, x := range s.live[b.ID] { + regValLiveSet.add(x.ID) + } } for r := register(0); r < s.numRegs; r++ { v := s.regs[r].v @@ -2062,37 +2070,39 @@ func (s *regAllocState) regalloc(f *Func) { // isn't in a register, generate a use for the spill location. // We need to remember this information so that // the liveness analysis in stackalloc is correct. - for _, e := range s.live[b.ID] { - vi := &s.values[e.ID] - if vi.regs != 0 { - // in a register, we'll use that source for the merge. - continue - } - if vi.rematerializeable { - // we'll rematerialize during the merge. - continue - } - if s.f.pass.debug > regDebug { - fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b) + if s.live != nil { + for _, e := range s.live[b.ID] { + vi := &s.values[e.ID] + if vi.regs != 0 { + // in a register, we'll use that source for the merge. + continue + } + if vi.rematerializeable { + // we'll rematerialize during the merge. + continue + } + if s.f.pass.debug > regDebug { + fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b) + } + spill := s.makeSpill(s.orig[e.ID], b) + s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID) } - spill := s.makeSpill(s.orig[e.ID], b) - s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID) - } - // Clear any final uses. - // All that is left should be the pseudo-uses added for values which - // are live at the end of b. - for _, e := range s.live[b.ID] { - u := s.values[e.ID].uses - if u == nil { - f.Fatalf("live at end, no uses v%d", e.ID) - } - if u.next != nil { - f.Fatalf("live at end, too many uses v%d", e.ID) + // Clear any final uses. + // All that is left should be the pseudo-uses added for values which + // are live at the end of b. + for _, e := range s.live[b.ID] { + u := s.values[e.ID].uses + if u == nil { + f.Fatalf("live at end, no uses v%d", e.ID) + } + if u.next != nil { + f.Fatalf("live at end, too many uses v%d", e.ID) + } + s.values[e.ID].uses = nil + u.next = s.freeUseRecords + s.freeUseRecords = u } - s.values[e.ID].uses = nil - u.next = s.freeUseRecords - s.freeUseRecords = u } // allocReg may have dropped registers from startRegsMask that @@ -2192,8 +2202,8 @@ func (s *regAllocState) placeSpills() { best := v.Block bestArg := v var bestDepth int16 - if l := s.loopnest.b2l[best.ID]; l != nil { - bestDepth = l.depth + if s.loopnest != nil && s.loopnest.b2l[best.ID] != nil { + bestDepth = s.loopnest.b2l[best.ID].depth } b := best const maxSpillSearch = 100 @@ -2215,8 +2225,8 @@ func (s *regAllocState) placeSpills() { } var depth int16 - if l := s.loopnest.b2l[b.ID]; l != nil { - depth = l.depth + if s.loopnest != nil && s.loopnest.b2l[b.ID] != nil { + depth = s.loopnest.b2l[b.ID].depth } if depth > bestDepth { // Don't push the spill into a deeper loop. @@ -2796,16 +2806,18 @@ type liveInfo struct { // computeLive computes a map from block ID to a list of value IDs live at the end // of that block. Together with the value ID is a count of how many instructions // to the next use of that value. The resulting map is stored in s.live. -// computeLive also computes the desired register information at the end of each block. -// This desired register information is stored in s.desired. -// TODO: this could be quadratic if lots of variables are live across lots of -// basic blocks. Figure out a way to make this function (or, more precisely, the user -// of this function) require only linear size & time. func (s *regAllocState) computeLive() { f := s.f + // single block functions do not have variables that are live across + // branches + if len(f.Blocks) == 1 { + return + } + po := f.postorder() s.live = make([][]liveInfo, f.NumBlocks()) s.desired = make([]desiredState, f.NumBlocks()) - var phis []*Value + s.loopnest = f.loopnest() + rematIDs := make([]ID, 0, 64) live := f.newSparseMapPos(f.NumValues()) @@ -2813,31 +2825,63 @@ func (s *regAllocState) computeLive() { t := f.newSparseMapPos(f.NumValues()) defer f.retSparseMapPos(t) - // Keep track of which value we want in each register. - var desired desiredState - - // Instead of iterating over f.Blocks, iterate over their postordering. - // Liveness information flows backward, so starting at the end - // increases the probability that we will stabilize quickly. - // TODO: Do a better job yet. Here's one possibility: - // Calculate the dominator tree and locate all strongly connected components. - // If a value is live in one block of an SCC, it is live in all. - // Walk the dominator tree from end to beginning, just once, treating SCC - // components as single blocks, duplicated calculated liveness information - // out to all of them. - po := f.postorder() - s.loopnest = f.loopnest() s.loopnest.computeUnavoidableCalls() + + // Liveness analysis. + // This is an adapted version of the algorithm described in chapter 2.4.2 + // of Fabrice Rastello's On Sparse Intermediate Representations. + // https://web.archive.org/web/20240417212122if_/https://inria.hal.science/hal-00761555/file/habilitation.pdf#section.50 + // + // For our implementation, we fall back to a traditional iterative algorithm when we encounter + // Irreducible CFGs. They are very uncommon in Go code because they need to be constructed with + // gotos and our current loopnest definition does not compute all the information that + // we'd need to compute the loop ancestors for that step of the algorithm. + // + // Additionally, instead of only considering non-loop successors in the initial DFS phase, + // we compute the liveout as the union of all successors. This larger liveout set is a subset + // of the final liveout for the block and adding this information in the DFS phase means that + // we get slightly more accurate distance information. + var loopLiveIn map[*loop][]liveInfo + var numCalls []int32 + if len(s.loopnest.loops) > 0 && !s.loopnest.hasIrreducible { + loopLiveIn = make(map[*loop][]liveInfo) + numCalls = f.Cache.allocInt32Slice(f.NumBlocks()) + defer f.Cache.freeInt32Slice(numCalls) + } + for { changed := false for _, b := range po { // Start with known live values at the end of the block. - // Add len(b.Values) to adjust from end-of-block distance - // to beginning-of-block distance. live.clear() for _, e := range s.live[b.ID] { - live.set(e.ID, e.dist+int32(len(b.Values)), e.pos) + live.set(e.ID, e.dist, e.pos) + } + update := false + // arguments to phi nodes are live at this blocks out + for _, e := range b.Succs { + succ := e.b + delta := branchDistance(b, succ) + for _, v := range succ.Values { + if v.Op != OpPhi { + break + } + arg := v.Args[e.i] + if s.values[arg.ID].needReg && (!live.contains(arg.ID) || delta < live.get(arg.ID)) { + live.set(arg.ID, delta, v.Pos) + update = true + } + } + } + if update { + s.live[b.ID] = updateLive(live, s.live[b.ID]) + } + // Add len(b.Values) to adjust from end-of-block distance + // to beginning-of-block distance. + c := live.contents() + for i := range c { + c[i].val += int32(len(b.Values)) } // Mark control values as live @@ -2847,18 +2891,16 @@ func (s *regAllocState) computeLive() { } } - // Propagate backwards to the start of the block - // Assumes Values have been scheduled. - phis = phis[:0] for i := len(b.Values) - 1; i >= 0; i-- { v := b.Values[i] live.remove(v.ID) if v.Op == OpPhi { - // save phi ops for later - phis = append(phis, v) continue } if opcodeTable[v.Op].call { + if numCalls != nil { + numCalls[b.ID]++ + } rematIDs = rematIDs[:0] c := live.contents() for i := range c { @@ -2881,7 +2923,207 @@ func (s *regAllocState) computeLive() { } } } - // Propagate desired registers backwards. + // This is a loop header, save our live-in so that + // we can use it to fill in the loop bodies later + if loopLiveIn != nil { + loop := s.loopnest.b2l[b.ID] + if loop != nil && loop.header.ID == b.ID { + loopLiveIn[loop] = updateLive(live, nil) + } + } + // For each predecessor of b, expand its list of live-at-end values. + // invariant: live contains the values live at the start of b + for _, e := range b.Preds { + p := e.b + delta := branchDistance(p, b) + + // Start t off with the previously known live values at the end of p. + t.clear() + for _, e := range s.live[p.ID] { + t.set(e.ID, e.dist, e.pos) + } + update := false + + // Add new live values from scanning this block. + for _, e := range live.contents() { + d := e.val + delta + if !t.contains(e.key) || d < t.get(e.key) { + update = true + t.set(e.key, d, e.pos) + } + } + + if !update { + continue + } + s.live[p.ID] = updateLive(t, s.live[p.ID]) + changed = true + } + } + + // Doing a traditional iterative algorithm and have run + // out of changes + if !changed { + break + } + + // Doing a pre-pass and will fill in the liveness information + // later + if loopLiveIn != nil { + break + } + // For loopless code, we have full liveness info after a single + // iteration + if len(s.loopnest.loops) == 0 { + break + } + } + if f.pass.debug > regDebug { + s.debugPrintLive("after dfs walk", f, s.live, s.desired) + } + + // irreducible CFGs and functions without loops are already + // done, compute their desired registers and return + if loopLiveIn == nil { + s.computeDesired() + return + } + + // Walk the loopnest from outer to inner, adding + // all live-in values from their parent. Instead of + // a recursive algorithm, iterate in depth order. + // TODO(dmo): can we permute the loopnest? can we avoid this copy? + loops := slices.Clone(s.loopnest.loops) + slices.SortFunc(loops, func(a, b *loop) int { + return cmp.Compare(a.depth, b.depth) + }) + + loopset := f.newSparseMapPos(f.NumValues()) + defer f.retSparseMapPos(loopset) + for _, loop := range loops { + if loop.outer == nil { + continue + } + livein := loopLiveIn[loop] + loopset.clear() + for _, l := range livein { + loopset.set(l.ID, l.dist, l.pos) + } + update := false + for _, l := range loopLiveIn[loop.outer] { + if !loopset.contains(l.ID) { + loopset.set(l.ID, l.dist, l.pos) + update = true + } + } + if update { + loopLiveIn[loop] = updateLive(loopset, livein) + } + } + // unknownDistance is a sentinel value for when we know a variable + // is live at any given block, but we do not yet know how far until it's next + // use. The distance will be computed later. + const unknownDistance = -1 + + // add live-in values of the loop headers to their children. + // This includes the loop headers themselves, since they can have values + // that die in the middle of the block and aren't live-out + for _, b := range po { + loop := s.loopnest.b2l[b.ID] + if loop == nil { + continue + } + headerLive := loopLiveIn[loop] + loopset.clear() + for _, l := range s.live[b.ID] { + loopset.set(l.ID, l.dist, l.pos) + } + update := false + for _, l := range headerLive { + if !loopset.contains(l.ID) { + loopset.set(l.ID, unknownDistance, src.NoXPos) + update = true + } + } + if update { + s.live[b.ID] = updateLive(loopset, s.live[b.ID]) + } + } + if f.pass.debug > regDebug { + s.debugPrintLive("after live loop prop", f, s.live, s.desired) + } + // Filling in liveness from loops leaves some blocks with no distance information + // Run over them and fill in the information from their successors. + // To stabilize faster, we quit when no block has missing values and we only + // look at blocks that still have missing values in subsequent iterations + unfinishedBlocks := f.Cache.allocBlockSlice(len(po)) + defer f.Cache.freeBlockSlice(unfinishedBlocks) + copy(unfinishedBlocks, po) + + for len(unfinishedBlocks) > 0 { + n := 0 + for _, b := range unfinishedBlocks { + live.clear() + unfinishedValues := 0 + for _, l := range s.live[b.ID] { + if l.dist == unknownDistance { + unfinishedValues++ + } + live.set(l.ID, l.dist, l.pos) + } + update := false + for _, e := range b.Succs { + succ := e.b + for _, l := range s.live[succ.ID] { + if !live.contains(l.ID) || l.dist == unknownDistance { + continue + } + dist := int32(len(succ.Values)) + l.dist + branchDistance(b, succ) + dist += numCalls[succ.ID] * unlikelyDistance + val := live.get(l.ID) + switch { + case val == unknownDistance: + unfinishedValues-- + fallthrough + case dist < val: + update = true + live.set(l.ID, dist, l.pos) + } + } + } + if update { + s.live[b.ID] = updateLive(live, s.live[b.ID]) + } + if unfinishedValues > 0 { + unfinishedBlocks[n] = b + n++ + } + } + unfinishedBlocks = unfinishedBlocks[:n] + } + + s.computeDesired() + + if f.pass.debug > regDebug { + s.debugPrintLive("final", f, s.live, s.desired) + } +} + +// computeDesired computes the desired register information at the end of each block. +// It is essentially a liveness analysis on machine registers instead of SSA values +// The desired register information is stored in s.desired. +func (s *regAllocState) computeDesired() { + + // TODO: Can we speed this up using the liveness information we have already + // from computeLive? + // TODO: Since we don't propagate information through phi nodes, can we do + // this as a single dominator tree walk instead of the iterative solution? + var desired desiredState + f := s.f + po := f.postorder() + for { + changed := false + for _, b := range po { desired.copy(&s.desired[b.ID]) for i := len(b.Values) - 1; i >= 0; i-- { v := b.Values[i] @@ -2916,106 +3158,85 @@ func (s *regAllocState) computeLive() { desired.addList(v.Args[0].ID, prefs) } } - - // For each predecessor of b, expand its list of live-at-end values. - // invariant: live contains the values live at the start of b (excluding phi inputs) - for i, e := range b.Preds { + for _, e := range b.Preds { p := e.b - // Compute additional distance for the edge. - // Note: delta must be at least 1 to distinguish the control - // value use from the first user in a successor block. - delta := int32(normalDistance) - if len(p.Succs) == 2 { - if p.Succs[0].b == b && p.Likely == BranchLikely || - p.Succs[1].b == b && p.Likely == BranchUnlikely { - delta = likelyDistance - } - if p.Succs[0].b == b && p.Likely == BranchUnlikely || - p.Succs[1].b == b && p.Likely == BranchLikely { - delta = unlikelyDistance - } - } + changed = s.desired[p.ID].merge(&desired) || changed + } + } + if !changed || (!s.loopnest.hasIrreducible && len(s.loopnest.loops) == 0) { + break + } + } +} - // Update any desired registers at the end of p. - s.desired[p.ID].merge(&desired) +// updateLive updates a given liveInfo slice with the contents of t +func updateLive(t *sparseMapPos, live []liveInfo) []liveInfo { + live = live[:0] + if cap(live) < t.size() { + live = make([]liveInfo, 0, t.size()) + } + for _, e := range t.contents() { + live = append(live, liveInfo{e.key, e.val, e.pos}) + } + return live +} - // Start t off with the previously known live values at the end of p. - t.clear() - for _, e := range s.live[p.ID] { - t.set(e.ID, e.dist, e.pos) - } - update := false +// branchDistance calculates the distance between a block and a +// successor in pseudo-instructions. This is used to indicate +// likeliness +func branchDistance(b *Block, s *Block) int32 { + if len(b.Succs) == 2 { + if b.Succs[0].b == s && b.Likely == BranchLikely || + b.Succs[1].b == s && b.Likely == BranchUnlikely { + return likelyDistance + } + if b.Succs[0].b == s && b.Likely == BranchUnlikely || + b.Succs[1].b == s && b.Likely == BranchLikely { + return unlikelyDistance + } + } + // Note: the branch distance must be at least 1 to distinguish the control + // value use from the first user in a successor block. + return normalDistance +} - // Add new live values from scanning this block. - for _, e := range live.contents() { - d := e.val + delta - if !t.contains(e.key) || d < t.get(e.key) { - update = true - t.set(e.key, d, e.pos) - } - } - // Also add the correct arg from the saved phi values. - // All phis are at distance delta (we consider them - // simultaneously happening at the start of the block). - for _, v := range phis { - id := v.Args[i].ID - if s.values[id].needReg && (!t.contains(id) || delta < t.get(id)) { - update = true - t.set(id, delta, v.Pos) - } - } +func (s *regAllocState) debugPrintLive(stage string, f *Func, live [][]liveInfo, desired []desiredState) { + fmt.Printf("%s: live values at end of each block: %s\n", stage, f.Name) + for _, b := range f.Blocks { + s.debugPrintLiveBlock(b, live[b.ID], &desired[b.ID]) + } +} - if !update { +func (s *regAllocState) debugPrintLiveBlock(b *Block, live []liveInfo, desired *desiredState) { + fmt.Printf(" %s:", b) + slices.SortFunc(live, func(a, b liveInfo) int { + return cmp.Compare(a.ID, b.ID) + }) + for _, x := range live { + fmt.Printf(" v%d(%d)", x.ID, x.dist) + for _, e := range desired.entries { + if e.ID != x.ID { + continue + } + fmt.Printf("[") + first := true + for _, r := range e.regs { + if r == noRegister { continue } - // The live set has changed, update it. - l := s.live[p.ID][:0] - if cap(l) < t.size() { - l = make([]liveInfo, 0, t.size()) + if !first { + fmt.Printf(",") } - for _, e := range t.contents() { - l = append(l, liveInfo{e.key, e.val, e.pos}) - } - s.live[p.ID] = l - changed = true + fmt.Print(&s.registers[r]) + first = false } - } - - if !changed { - break + fmt.Printf("]") } } - if f.pass.debug > regDebug { - fmt.Println("live values at end of each block") - for _, b := range f.Blocks { - fmt.Printf(" %s:", b) - for _, x := range s.live[b.ID] { - fmt.Printf(" v%d(%d)", x.ID, x.dist) - for _, e := range s.desired[b.ID].entries { - if e.ID != x.ID { - continue - } - fmt.Printf("[") - first := true - for _, r := range e.regs { - if r == noRegister { - continue - } - if !first { - fmt.Printf(",") - } - fmt.Print(&s.registers[r]) - first = false - } - fmt.Printf("]") - } - } - if avoid := s.desired[b.ID].avoid; avoid != 0 { - fmt.Printf(" avoid=%v", s.RegMaskString(avoid)) - } - fmt.Println() - } + if avoid := desired.avoid; avoid != 0 { + fmt.Printf(" avoid=%v", s.RegMaskString(avoid)) } + fmt.Println() } // A desiredState represents desired register assignments. @@ -3131,14 +3352,17 @@ func (d *desiredState) remove(vid ID) [4]register { return [4]register{noRegister, noRegister, noRegister, noRegister} } -// merge merges another desired state x into d. -func (d *desiredState) merge(x *desiredState) { +// merge merges another desired state x into d. Returns whether the set has +// changed +func (d *desiredState) merge(x *desiredState) bool { + oldAvoid := d.avoid d.avoid |= x.avoid // There should only be a few desired registers, so // linear insert is ok. for _, e := range x.entries { d.addList(e.ID, e.regs) } + return oldAvoid != d.avoid } // computeUnavoidableCalls computes the containsUnavoidableCall fields in the loop nest. diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index a822ebcbbd112e..07308973b15ec1 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -765,10 +765,6 @@ func arm64ConditionalParamsToAuxInt(v arm64ConditionalParams) int64 { return i } -func float64ExactBits(f float64, c float64) bool { - return math.Float64bits(f) == math.Float64bits(c) -} - func flagConstantToAuxInt(x flagConstant) int64 { return int64(x) } diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index 8a390eb85c8870..52870fe19921ce 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -3582,21 +3582,16 @@ func rewriteValueRISCV64_OpRISCV64FEQD(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (FEQD x (FMOVDconst [c])) - // cond: float64ExactBits(c, math.Inf(-1)) - // result: (ANDI [1] (FCLASSD x)) + // match: (FEQD x (FMOVDconst [math.Inf(-1)])) + // result: (ANDI [0b00_0000_0001] (FCLASSD x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpRISCV64FMOVDconst { - continue - } - c := auxIntToFloat64(v_1.AuxInt) - if !(float64ExactBits(c, math.Inf(-1))) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(-1) { continue } v.reset(OpRISCV64ANDI) - v.AuxInt = int64ToAuxInt(1) + v.AuxInt = int64ToAuxInt(0b00_0000_0001) v0 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v0.AddArg(x) v.AddArg(v0) @@ -3604,22 +3599,17 @@ func rewriteValueRISCV64_OpRISCV64FEQD(v *Value) bool { } break } - // match: (FEQD x (FMOVDconst [c])) - // cond: float64ExactBits(c, math.Inf(1)) - // result: (SNEZ (ANDI [1<<7] (FCLASSD x))) + // match: (FEQD x (FMOVDconst [math.Inf(1)])) + // result: (SNEZ (ANDI [0b00_1000_0000] (FCLASSD x))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpRISCV64FMOVDconst { - continue - } - c := auxIntToFloat64(v_1.AuxInt) - if !(float64ExactBits(c, math.Inf(1))) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(1) { continue } v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(1 << 7) + v0.AuxInt = int64ToAuxInt(0b00_1000_0000) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) @@ -3635,42 +3625,32 @@ func rewriteValueRISCV64_OpRISCV64FLED(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (FLED (FMOVDconst [c]) x) - // cond: float64ExactBits(c, -math.MaxFloat64) - // result: (SNEZ (ANDI [0xff &^ 1] (FCLASSD x))) + // match: (FLED (FMOVDconst [-math.MaxFloat64]) x) + // result: (SNEZ (ANDI [0b00_1111_1110] (FCLASSD x))) for { - if v_0.Op != OpRISCV64FMOVDconst { + if v_0.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != -math.MaxFloat64 { break } - c := auxIntToFloat64(v_0.AuxInt) x := v_1 - if !(float64ExactBits(c, -math.MaxFloat64)) { - break - } v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(0xff &^ 1) + v0.AuxInt = int64ToAuxInt(0b00_1111_1110) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) v.AddArg(v0) return true } - // match: (FLED x (FMOVDconst [c])) - // cond: float64ExactBits(c, math.MaxFloat64) - // result: (SNEZ (ANDI [0xff &^ (1<<7)] (FCLASSD x))) + // match: (FLED x (FMOVDconst [math.MaxFloat64])) + // result: (SNEZ (ANDI [0b00_0111_1111] (FCLASSD x))) for { x := v_0 - if v_1.Op != OpRISCV64FMOVDconst { - break - } - c := auxIntToFloat64(v_1.AuxInt) - if !(float64ExactBits(c, math.MaxFloat64)) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.MaxFloat64 { break } v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(0xff &^ (1 << 7)) + v0.AuxInt = int64ToAuxInt(0b00_0111_1111) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) @@ -3684,40 +3664,30 @@ func rewriteValueRISCV64_OpRISCV64FLTD(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (FLTD x (FMOVDconst [c])) - // cond: float64ExactBits(c, -math.MaxFloat64) - // result: (ANDI [1] (FCLASSD x)) + // match: (FLTD x (FMOVDconst [-math.MaxFloat64])) + // result: (ANDI [0b00_0000_0001] (FCLASSD x)) for { x := v_0 - if v_1.Op != OpRISCV64FMOVDconst { - break - } - c := auxIntToFloat64(v_1.AuxInt) - if !(float64ExactBits(c, -math.MaxFloat64)) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != -math.MaxFloat64 { break } v.reset(OpRISCV64ANDI) - v.AuxInt = int64ToAuxInt(1) + v.AuxInt = int64ToAuxInt(0b00_0000_0001) v0 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v0.AddArg(x) v.AddArg(v0) return true } - // match: (FLTD (FMOVDconst [c]) x) - // cond: float64ExactBits(c, math.MaxFloat64) - // result: (SNEZ (ANDI [1<<7] (FCLASSD x))) + // match: (FLTD (FMOVDconst [math.MaxFloat64]) x) + // result: (SNEZ (ANDI [0b00_1000_0000] (FCLASSD x))) for { - if v_0.Op != OpRISCV64FMOVDconst { + if v_0.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != math.MaxFloat64 { break } - c := auxIntToFloat64(v_0.AuxInt) x := v_1 - if !(float64ExactBits(c, math.MaxFloat64)) { - break - } v.reset(OpRISCV64SNEZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(1 << 7) + v0.AuxInt = int64ToAuxInt(0b00_1000_0000) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) @@ -4155,22 +4125,17 @@ func rewriteValueRISCV64_OpRISCV64FNED(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (FNED x (FMOVDconst [c])) - // cond: float64ExactBits(c, math.Inf(-1)) - // result: (SEQZ (ANDI [1] (FCLASSD x))) + // match: (FNED x (FMOVDconst [math.Inf(-1)])) + // result: (SEQZ (ANDI [0b00_0000_0001] (FCLASSD x))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpRISCV64FMOVDconst { - continue - } - c := auxIntToFloat64(v_1.AuxInt) - if !(float64ExactBits(c, math.Inf(-1))) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(-1) { continue } v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(1) + v0.AuxInt = int64ToAuxInt(0b00_0000_0001) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) @@ -4179,22 +4144,17 @@ func rewriteValueRISCV64_OpRISCV64FNED(v *Value) bool { } break } - // match: (FNED x (FMOVDconst [c])) - // cond: float64ExactBits(c, math.Inf(1)) - // result: (SEQZ (ANDI [1<<7] (FCLASSD x))) + // match: (FNED x (FMOVDconst [math.Inf(1)])) + // result: (SEQZ (ANDI [0b00_1000_0000] (FCLASSD x))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpRISCV64FMOVDconst { - continue - } - c := auxIntToFloat64(v_1.AuxInt) - if !(float64ExactBits(c, math.Inf(1))) { + if v_1.Op != OpRISCV64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != math.Inf(1) { continue } v.reset(OpRISCV64SEQZ) v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Int64) - v0.AuxInt = int64ToAuxInt(1 << 7) + v0.AuxInt = int64ToAuxInt(0b00_1000_0000) v1 := b.NewValue0(v.Pos, OpRISCV64FCLASSD, typ.Int64) v1.AddArg(x) v0.AddArg(v1) diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index a164a6eee555b9..faba41b3e5e16b 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -48,6 +48,8 @@ func rewriteValueWasm(v *Value) bool { case OpAndB: v.Op = OpWasmI64And return true + case OpAvg64u: + return rewriteValueWasm_OpAvg64u(v) case OpBitLen16: return rewriteValueWasm_OpBitLen16(v) case OpBitLen32: @@ -228,6 +230,10 @@ func rewriteValueWasm(v *Value) bool { case OpGetClosurePtr: v.Op = OpWasmLoweredGetClosurePtr return true + case OpHmul64: + return rewriteValueWasm_OpHmul64(v) + case OpHmul64u: + return rewriteValueWasm_OpHmul64u(v) case OpInterCall: v.Op = OpWasmLoweredInterCall return true @@ -239,6 +245,8 @@ func rewriteValueWasm(v *Value) bool { case OpIsSliceInBounds: v.Op = OpWasmI64LeU return true + case OpLast: + return rewriteValueWasm_OpLast(v) case OpLeq16: return rewriteValueWasm_OpLeq16(v) case OpLeq16U: @@ -514,6 +522,10 @@ func rewriteValueWasm(v *Value) bool { return rewriteValueWasm_OpRsh8x64(v) case OpRsh8x8: return rewriteValueWasm_OpRsh8x8(v) + case OpSelect0: + return rewriteValueWasm_OpSelect0(v) + case OpSelect1: + return rewriteValueWasm_OpSelect1(v) case OpSignExt16to32: return rewriteValueWasm_OpSignExt16to32(v) case OpSignExt16to64: @@ -684,6 +696,27 @@ func rewriteValueWasm_OpAddr(v *Value) bool { return true } } +func rewriteValueWasm_OpAvg64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Avg64u x y) + // result: (I64Add (I64ShrU (I64Sub x y) (I64Const [1])) y) + for { + x := v_0 + y := v_1 + v.reset(OpWasmI64Add) + v0 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v1 := b.NewValue0(v.Pos, OpWasmI64Sub, typ.Int64) + v1.AddArg2(x, y) + v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v2.AuxInt = int64ToAuxInt(1) + v0.AddArg2(v1, v2) + v.AddArg2(v0, y) + return true + } +} func rewriteValueWasm_OpBitLen16(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -1162,6 +1195,108 @@ func rewriteValueWasm_OpEq8(v *Value) bool { return true } } +func rewriteValueWasm_OpHmul64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul64 x y) + // result: (Last x0: (ZeroExt32to64 x) x1: (I64ShrS x (I64Const [32])) y0: (ZeroExt32to64 y) y1: (I64ShrS y (I64Const [32])) x0y0: (I64Mul x0 y0) tt: (I64Add (I64Mul x1 y0) (I64ShrU x0y0 (I64Const [32]))) w1: (I64Add (I64Mul x0 y1) (ZeroExt32to64 tt)) w2: (I64ShrS tt (I64Const [32])) (I64Add (I64Add (I64Mul x1 y1) w2) (I64ShrS w1 (I64Const [32])))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLast) + v.Type = t + x0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + x0.AddArg(x) + x1 := b.NewValue0(v.Pos, OpWasmI64ShrS, typ.Int64) + v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v2.AuxInt = int64ToAuxInt(32) + x1.AddArg2(x, v2) + y0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + y0.AddArg(y) + y1 := b.NewValue0(v.Pos, OpWasmI64ShrS, typ.Int64) + y1.AddArg2(y, v2) + x0y0 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + x0y0.AddArg2(x0, y0) + tt := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v7 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v7.AddArg2(x1, y0) + v8 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v8.AddArg2(x0y0, v2) + tt.AddArg2(v7, v8) + w1 := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v10 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v10.AddArg2(x0, y1) + v11 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v11.AddArg(tt) + w1.AddArg2(v10, v11) + w2 := b.NewValue0(v.Pos, OpWasmI64ShrS, typ.Int64) + w2.AddArg2(tt, v2) + v13 := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v14 := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v15 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v15.AddArg2(x1, y1) + v14.AddArg2(v15, w2) + v16 := b.NewValue0(v.Pos, OpWasmI64ShrS, typ.Int64) + v16.AddArg2(w1, v2) + v13.AddArg2(v14, v16) + v.AddArgs(x0, x1, y0, y1, x0y0, tt, w1, w2, v13) + return true + } +} +func rewriteValueWasm_OpHmul64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul64u x y) + // result: (Last x0: (ZeroExt32to64 x) x1: (I64ShrU x (I64Const [32])) y0: (ZeroExt32to64 y) y1: (I64ShrU y (I64Const [32])) w0: (I64Mul x0 y0) tt: (I64Add (I64Mul x1 y0) (I64ShrU w0 (I64Const [32]))) w1: (I64Add (I64Mul x0 y1) (ZeroExt32to64 tt)) w2: (I64ShrU tt (I64Const [32])) hi: (I64Add (I64Add (I64Mul x1 y1) w2) (I64ShrU w1 (I64Const [32])))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLast) + v.Type = t + x0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + x0.AddArg(x) + x1 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) + v2.AuxInt = int64ToAuxInt(32) + x1.AddArg2(x, v2) + y0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + y0.AddArg(y) + y1 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + y1.AddArg2(y, v2) + w0 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + w0.AddArg2(x0, y0) + tt := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v7 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v7.AddArg2(x1, y0) + v8 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v8.AddArg2(w0, v2) + tt.AddArg2(v7, v8) + w1 := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v10 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v10.AddArg2(x0, y1) + v11 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v11.AddArg(tt) + w1.AddArg2(v10, v11) + w2 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + w2.AddArg2(tt, v2) + hi := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v14 := b.NewValue0(v.Pos, OpWasmI64Add, typ.Int64) + v15 := b.NewValue0(v.Pos, OpWasmI64Mul, typ.Int64) + v15.AddArg2(x1, y1) + v14.AddArg2(v15, w2) + v16 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64) + v16.AddArg2(w1, v2) + hi.AddArg2(v14, v16) + v.AddArgs(x0, x1, y0, y1, w0, tt, w1, w2, hi) + return true + } +} func rewriteValueWasm_OpIsNonNil(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -1177,6 +1312,14 @@ func rewriteValueWasm_OpIsNonNil(v *Value) bool { return true } } +func rewriteValueWasm_OpLast(v *Value) bool { + // match: (Last ___) + // result: v.Args[len(v.Args)-1] + for { + v.copyOf(v.Args[len(v.Args)-1]) + return true + } +} func rewriteValueWasm_OpLeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -3199,6 +3342,40 @@ func rewriteValueWasm_OpRsh8x8(v *Value) bool { return true } } +func rewriteValueWasm_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + // match: (Select0 (Mul64uhilo x y)) + // result: (Hmul64u x y) + for { + t := v.Type + if v_0.Op != OpMul64uhilo { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpHmul64u) + v.Type = t + v.AddArg2(x, y) + return true + } + return false +} +func rewriteValueWasm_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + // match: (Select1 (Mul64uhilo x y)) + // result: (I64Mul x y) + for { + if v_0.Op != OpMul64uhilo { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpWasmI64Mul) + v.AddArg2(x, y) + return true + } + return false +} func rewriteValueWasm_OpSignExt16to32(v *Value) bool { v_0 := v.Args[0] // match: (SignExt16to32 x:(I64Load16S _ _)) diff --git a/src/cmd/compile/internal/ssa/rewritedivmod.go b/src/cmd/compile/internal/ssa/rewritedivmod.go index 02978075a8aede..ab5cf7d676abc5 100644 --- a/src/cmd/compile/internal/ssa/rewritedivmod.go +++ b/src/cmd/compile/internal/ssa/rewritedivmod.go @@ -212,7 +212,7 @@ func rewriteValuedivmod_OpDiv16u(v *Value) bool { return true } // match: (Div16u x (Const16 [c])) - // cond: umagicOK16(c) && config.RegSize == 4 && config.useAvg + // cond: umagicOK16(c) && config.RegSize == 4 // result: (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) (Mul32 (ZeroExt16to32 x) (Const32 [int32(umagic16(c).m)]))) (Const64 [16 + umagic16(c).s - 1]))) for { t := v.Type @@ -221,7 +221,7 @@ func rewriteValuedivmod_OpDiv16u(v *Value) bool { break } c := auxIntToInt16(v_1.AuxInt) - if !(umagicOK16(c) && config.RegSize == 4 && config.useAvg) { + if !(umagicOK16(c) && config.RegSize == 4) { break } v.reset(OpTrunc32to16) @@ -315,7 +315,7 @@ func rewriteValuedivmod_OpDiv32(v *Value) bool { return true } // match: (Div32 x (Const32 [c])) - // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul + // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 // result: (Sub32 (Rsh32x64 (Hmul32 x (Const32 [int32(smagic32(c).m/2)])) (Const64 [smagic32(c).s - 1])) (Rsh32x64 x (Const64 [31]))) for { t := v.Type @@ -324,7 +324,7 @@ func rewriteValuedivmod_OpDiv32(v *Value) bool { break } c := auxIntToInt32(v_1.AuxInt) - if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul) { + if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0) { break } v.reset(OpSub32) @@ -345,7 +345,7 @@ func rewriteValuedivmod_OpDiv32(v *Value) bool { return true } // match: (Div32 x (Const32 [c])) - // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul + // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 // result: (Sub32 (Rsh32x64 (Add32 x (Hmul32 x (Const32 [int32(smagic32(c).m)]))) (Const64 [smagic32(c).s])) (Rsh32x64 x (Const64 [31]))) for { t := v.Type @@ -354,7 +354,7 @@ func rewriteValuedivmod_OpDiv32(v *Value) bool { break } c := auxIntToInt32(v_1.AuxInt) - if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul) { + if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0) { break } v.reset(OpSub32) @@ -411,7 +411,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool { return true } // match: (Div32u x (Const32 [c])) - // cond: t.IsSigned() && smagicOK32(c) && config.RegSize == 4 && config.useHmul + // cond: t.IsSigned() && smagicOK32(c) && config.RegSize == 4 // result: (Rsh32Ux64 (Hmul32u x (Const32 [int32(smagic32(c).m)])) (Const64 [smagic32(c).s])) for { t := v.Type @@ -420,7 +420,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool { break } c := auxIntToInt32(v_1.AuxInt) - if !(t.IsSigned() && smagicOK32(c) && config.RegSize == 4 && config.useHmul) { + if !(t.IsSigned() && smagicOK32(c) && config.RegSize == 4) { break } v.reset(OpRsh32Ux64) @@ -463,7 +463,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool { return true } // match: (Div32u x (Const32 [c])) - // cond: umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 4 && config.useHmul + // cond: umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 4 // result: (Rsh32Ux64 (Hmul32u x (Const32 [int32(1<<31 + umagic32(c).m/2)])) (Const64 [umagic32(c).s - 1])) for { t := v.Type @@ -472,7 +472,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool { break } c := auxIntToInt32(v_1.AuxInt) - if !(umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 4 && config.useHmul) { + if !(umagicOK32(c) && umagic32(c).m&1 == 0 && config.RegSize == 4) { break } v.reset(OpRsh32Ux64) @@ -519,7 +519,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool { return true } // match: (Div32u x (Const32 [c])) - // cond: umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul + // cond: umagicOK32(c) && config.RegSize == 4 && c&1 == 0 // result: (Rsh32Ux64 (Hmul32u (Rsh32Ux64 x (Const64 [1])) (Const32 [int32(1<<31 + (umagic32(c).m+1)/2)])) (Const64 [umagic32(c).s - 2])) for { t := v.Type @@ -528,7 +528,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool { break } c := auxIntToInt32(v_1.AuxInt) - if !(umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul) { + if !(umagicOK32(c) && config.RegSize == 4 && c&1 == 0) { break } v.reset(OpRsh32Ux64) @@ -547,7 +547,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool { return true } // match: (Div32u x (Const32 [c])) - // cond: umagicOK32(c) && config.RegSize == 8 && config.useAvg + // cond: umagicOK32(c) && config.RegSize == 8 // result: (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) (Mul64 (ZeroExt32to64 x) (Const64 [int64(umagic32(c).m)]))) (Const64 [32 + umagic32(c).s - 1]))) for { t := v.Type @@ -556,7 +556,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool { break } c := auxIntToInt32(v_1.AuxInt) - if !(umagicOK32(c) && config.RegSize == 8 && config.useAvg) { + if !(umagicOK32(c) && config.RegSize == 8) { break } v.reset(OpTrunc64to32) @@ -581,7 +581,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool { return true } // match: (Div32u x (Const32 [c])) - // cond: umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul + // cond: umagicOK32(c) && config.RegSize == 4 // result: (Rsh32Ux64 (Avg32u x (Hmul32u x (Const32 [int32(umagic32(c).m)]))) (Const64 [umagic32(c).s - 1])) for { t := v.Type @@ -590,7 +590,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool { break } c := auxIntToInt32(v_1.AuxInt) - if !(umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul) { + if !(umagicOK32(c) && config.RegSize == 4) { break } v.reset(OpRsh32Ux64) @@ -612,7 +612,6 @@ func rewriteValuedivmod_OpDiv64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config typ := &b.Func.Config.Types // match: (Div64 n (Const64 [c])) // cond: isPowerOfTwo(c) @@ -644,7 +643,7 @@ func rewriteValuedivmod_OpDiv64(v *Value) bool { return true } // match: (Div64 x (Const64 [c])) - // cond: smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul + // cond: smagicOK64(c) && smagic64(c).m&1 == 0 // result: (Sub64 (Rsh64x64 (Hmul64 x (Const64 [int64(smagic64(c).m/2)])) (Const64 [smagic64(c).s - 1])) (Rsh64x64 x (Const64 [63]))) for { t := v.Type @@ -653,7 +652,7 @@ func rewriteValuedivmod_OpDiv64(v *Value) bool { break } c := auxIntToInt64(v_1.AuxInt) - if !(smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul) { + if !(smagicOK64(c) && smagic64(c).m&1 == 0) { break } v.reset(OpSub64) @@ -674,7 +673,7 @@ func rewriteValuedivmod_OpDiv64(v *Value) bool { return true } // match: (Div64 x (Const64 [c])) - // cond: smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul + // cond: smagicOK64(c) && smagic64(c).m&1 != 0 // result: (Sub64 (Rsh64x64 (Add64 x (Hmul64 x (Const64 [int64(smagic64(c).m)]))) (Const64 [smagic64(c).s])) (Rsh64x64 x (Const64 [63]))) for { t := v.Type @@ -683,7 +682,7 @@ func rewriteValuedivmod_OpDiv64(v *Value) bool { break } c := auxIntToInt64(v_1.AuxInt) - if !(smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul) { + if !(smagicOK64(c) && smagic64(c).m&1 != 0) { break } v.reset(OpSub64) @@ -711,10 +710,9 @@ func rewriteValuedivmod_OpDiv64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - config := b.Func.Config typ := &b.Func.Config.Types // match: (Div64u x (Const64 [c])) - // cond: t.IsSigned() && smagicOK64(c) && config.useHmul + // cond: t.IsSigned() && smagicOK64(c) // result: (Rsh64Ux64 (Hmul64u x (Const64 [int64(smagic64(c).m)])) (Const64 [smagic64(c).s])) for { t := v.Type @@ -723,7 +721,7 @@ func rewriteValuedivmod_OpDiv64u(v *Value) bool { break } c := auxIntToInt64(v_1.AuxInt) - if !(t.IsSigned() && smagicOK64(c) && config.useHmul) { + if !(t.IsSigned() && smagicOK64(c)) { break } v.reset(OpRsh64Ux64) @@ -738,7 +736,7 @@ func rewriteValuedivmod_OpDiv64u(v *Value) bool { return true } // match: (Div64u x (Const64 [c])) - // cond: umagicOK64(c) && umagic64(c).m&1 == 0 && config.useHmul + // cond: umagicOK64(c) && umagic64(c).m&1 == 0 // result: (Rsh64Ux64 (Hmul64u x (Const64 [int64(1<<63 + umagic64(c).m/2)])) (Const64 [umagic64(c).s - 1])) for { t := v.Type @@ -747,7 +745,7 @@ func rewriteValuedivmod_OpDiv64u(v *Value) bool { break } c := auxIntToInt64(v_1.AuxInt) - if !(umagicOK64(c) && umagic64(c).m&1 == 0 && config.useHmul) { + if !(umagicOK64(c) && umagic64(c).m&1 == 0) { break } v.reset(OpRsh64Ux64) @@ -762,7 +760,7 @@ func rewriteValuedivmod_OpDiv64u(v *Value) bool { return true } // match: (Div64u x (Const64 [c])) - // cond: umagicOK64(c) && c&1 == 0 && config.useHmul + // cond: umagicOK64(c) && c&1 == 0 // result: (Rsh64Ux64 (Hmul64u (Rsh64Ux64 x (Const64 [1])) (Const64 [int64(1<<63 + (umagic64(c).m+1)/2)])) (Const64 [umagic64(c).s - 2])) for { t := v.Type @@ -771,7 +769,7 @@ func rewriteValuedivmod_OpDiv64u(v *Value) bool { break } c := auxIntToInt64(v_1.AuxInt) - if !(umagicOK64(c) && c&1 == 0 && config.useHmul) { + if !(umagicOK64(c) && c&1 == 0) { break } v.reset(OpRsh64Ux64) @@ -790,7 +788,7 @@ func rewriteValuedivmod_OpDiv64u(v *Value) bool { return true } // match: (Div64u x (Const64 [c])) - // cond: umagicOK64(c) && config.useAvg && config.useHmul + // cond: umagicOK64(c) // result: (Rsh64Ux64 (Avg64u x (Hmul64u x (Const64 [int64(umagic64(c).m)]))) (Const64 [umagic64(c).s - 1])) for { t := v.Type @@ -799,7 +797,7 @@ func rewriteValuedivmod_OpDiv64u(v *Value) bool { break } c := auxIntToInt64(v_1.AuxInt) - if !(umagicOK64(c) && config.useAvg && config.useHmul) { + if !(umagicOK64(c)) { break } v.reset(OpRsh64Ux64) diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go index bf9e71c1701d08..78a42351169f4a 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics.go +++ b/src/cmd/compile/internal/ssagen/intrinsics.go @@ -1219,11 +1219,11 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { alias("math/bits", "OnesCount", "math/bits", "OnesCount64", p8...) - addF("math/bits", "Mul64", + add("math/bits", "Mul64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1]) }, - sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.PPC64, sys.S390X, sys.MIPS64, sys.MIPS, sys.RISCV64, sys.Loong64) + all...) alias("math/bits", "Mul", "math/bits", "Mul64", p8...) alias("internal/runtime/math", "Mul64", "math/bits", "Mul64", p8...) addF("math/bits", "Add64", @@ -1603,10 +1603,10 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { }, sys.AMD64) - /******** crypto/subtle ********/ - // We implement a superset of the ConstantTimeSelect promise: - // ConstantTimeSelect returns x if v != 0 and y if v == 0. - add("crypto/subtle", "ConstantTimeSelect", + /******** crypto/internal/constanttime ********/ + // We implement a superset of the Select promise: + // Select returns x if v != 0 and y if v == 0. + add("crypto/internal/constanttime", "Select", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { v, x, y := args[0], args[1], args[2] @@ -1627,7 +1627,7 @@ func initIntrinsics(cfg *intrinsicBuildConfig) { return s.newValue3(ssa.OpCondSelect, types.Types[types.TINT], x, y, check) }, sys.ArchAMD64, sys.ArchARM64, sys.ArchLoong64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchWasm) // all with CMOV support. - add("crypto/subtle", "constantTimeBoolToUint8", + add("crypto/internal/constanttime", "boolToUint8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCvtBoolToUint8, types.Types[types.TUINT8], args[0]) }, diff --git a/src/cmd/compile/internal/ssagen/intrinsics_test.go b/src/cmd/compile/internal/ssagen/intrinsics_test.go index 9311f843454c36..713adc0e8be83b 100644 --- a/src/cmd/compile/internal/ssagen/intrinsics_test.go +++ b/src/cmd/compile/internal/ssagen/intrinsics_test.go @@ -42,7 +42,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"386", "math/bits", "TrailingZeros8"}: struct{}{}, {"386", "runtime", "KeepAlive"}: struct{}{}, {"386", "runtime", "slicebytetostringtmp"}: struct{}{}, - {"386", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"386", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"amd64", "internal/runtime/atomic", "And"}: struct{}{}, {"amd64", "internal/runtime/atomic", "And32"}: struct{}{}, {"amd64", "internal/runtime/atomic", "And64"}: struct{}{}, @@ -189,8 +189,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"amd64", "sync/atomic", "SwapUint32"}: struct{}{}, {"amd64", "sync/atomic", "SwapUint64"}: struct{}{}, {"amd64", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"amd64", "crypto/subtle", "ConstantTimeSelect"}: struct{}{}, - {"amd64", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"amd64", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"amd64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"arm", "internal/runtime/sys", "Bswap32"}: struct{}{}, {"arm", "internal/runtime/sys", "Bswap64"}: struct{}{}, {"arm", "internal/runtime/sys", "GetCallerPC"}: struct{}{}, @@ -219,7 +219,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"arm", "math/bits", "TrailingZeros8"}: struct{}{}, {"arm", "runtime", "KeepAlive"}: struct{}{}, {"arm", "runtime", "slicebytetostringtmp"}: struct{}{}, - {"arm", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"arm", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"arm64", "internal/runtime/atomic", "And"}: struct{}{}, {"arm64", "internal/runtime/atomic", "And32"}: struct{}{}, {"arm64", "internal/runtime/atomic", "And64"}: struct{}{}, @@ -364,8 +364,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"arm64", "sync/atomic", "SwapUint32"}: struct{}{}, {"arm64", "sync/atomic", "SwapUint64"}: struct{}{}, {"arm64", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"arm64", "crypto/subtle", "ConstantTimeSelect"}: struct{}{}, - {"arm64", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"arm64", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"arm64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"loong64", "internal/runtime/atomic", "And"}: struct{}{}, {"loong64", "internal/runtime/atomic", "And32"}: struct{}{}, {"loong64", "internal/runtime/atomic", "And64"}: struct{}{}, @@ -512,8 +512,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"loong64", "sync/atomic", "SwapUint32"}: struct{}{}, {"loong64", "sync/atomic", "SwapUint64"}: struct{}{}, {"loong64", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"loong64", "crypto/subtle", "ConstantTimeSelect"}: struct{}{}, - {"loong64", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"loong64", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"loong64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"mips", "internal/runtime/atomic", "And"}: struct{}{}, {"mips", "internal/runtime/atomic", "And8"}: struct{}{}, {"mips", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -585,7 +585,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"mips", "sync/atomic", "SwapInt32"}: struct{}{}, {"mips", "sync/atomic", "SwapUint32"}: struct{}{}, {"mips", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"mips", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"mips", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"mips64", "internal/runtime/atomic", "And"}: struct{}{}, {"mips64", "internal/runtime/atomic", "And8"}: struct{}{}, {"mips64", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -674,7 +674,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"mips64", "sync/atomic", "SwapUint32"}: struct{}{}, {"mips64", "sync/atomic", "SwapUint64"}: struct{}{}, {"mips64", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"mips64", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"mips64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"mips64le", "internal/runtime/atomic", "And"}: struct{}{}, {"mips64le", "internal/runtime/atomic", "And8"}: struct{}{}, {"mips64le", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -763,7 +763,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"mips64le", "sync/atomic", "SwapUint32"}: struct{}{}, {"mips64le", "sync/atomic", "SwapUint64"}: struct{}{}, {"mips64le", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"mips64le", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"mips64le", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"mipsle", "internal/runtime/atomic", "And"}: struct{}{}, {"mipsle", "internal/runtime/atomic", "And8"}: struct{}{}, {"mipsle", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -835,7 +835,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"mipsle", "sync/atomic", "SwapInt32"}: struct{}{}, {"mipsle", "sync/atomic", "SwapUint32"}: struct{}{}, {"mipsle", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"mipsle", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"mipsle", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"ppc64", "internal/runtime/atomic", "And"}: struct{}{}, {"ppc64", "internal/runtime/atomic", "And8"}: struct{}{}, {"ppc64", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -960,8 +960,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"ppc64", "sync/atomic", "SwapUint32"}: struct{}{}, {"ppc64", "sync/atomic", "SwapUint64"}: struct{}{}, {"ppc64", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"ppc64", "crypto/subtle", "ConstantTimeSelect"}: struct{}{}, - {"ppc64", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"ppc64", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"ppc64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"ppc64le", "internal/runtime/atomic", "And"}: struct{}{}, {"ppc64le", "internal/runtime/atomic", "And8"}: struct{}{}, {"ppc64le", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -1086,8 +1086,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"ppc64le", "sync/atomic", "SwapUint32"}: struct{}{}, {"ppc64le", "sync/atomic", "SwapUint64"}: struct{}{}, {"ppc64le", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"ppc64le", "crypto/subtle", "ConstantTimeSelect"}: struct{}{}, - {"ppc64le", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"ppc64le", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"ppc64le", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"riscv64", "internal/runtime/atomic", "And"}: struct{}{}, {"riscv64", "internal/runtime/atomic", "And8"}: struct{}{}, {"riscv64", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -1208,7 +1208,7 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"riscv64", "sync/atomic", "SwapUint32"}: struct{}{}, {"riscv64", "sync/atomic", "SwapUint64"}: struct{}{}, {"riscv64", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"riscv64", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"riscv64", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, {"s390x", "internal/runtime/atomic", "And"}: struct{}{}, {"s390x", "internal/runtime/atomic", "And8"}: struct{}{}, {"s390x", "internal/runtime/atomic", "Cas"}: struct{}{}, @@ -1327,7 +1327,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"s390x", "sync/atomic", "SwapUint32"}: struct{}{}, {"s390x", "sync/atomic", "SwapUint64"}: struct{}{}, {"s390x", "sync/atomic", "SwapUintptr"}: struct{}{}, - {"s390x", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"s390x", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, + {"wasm", "internal/runtime/math", "Mul64"}: struct{}{}, {"wasm", "internal/runtime/sys", "GetCallerPC"}: struct{}{}, {"wasm", "internal/runtime/sys", "GetCallerSP"}: struct{}{}, {"wasm", "internal/runtime/sys", "GetClosurePtr"}: struct{}{}, @@ -1344,11 +1345,14 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"wasm", "math", "RoundToEven"}: struct{}{}, {"wasm", "math", "Trunc"}: struct{}{}, {"wasm", "math", "sqrt"}: struct{}{}, + {"wasm", "math/big", "mulWW"}: struct{}{}, {"wasm", "math/bits", "Len"}: struct{}{}, {"wasm", "math/bits", "Len16"}: struct{}{}, {"wasm", "math/bits", "Len32"}: struct{}{}, {"wasm", "math/bits", "Len64"}: struct{}{}, {"wasm", "math/bits", "Len8"}: struct{}{}, + {"wasm", "math/bits", "Mul"}: struct{}{}, + {"wasm", "math/bits", "Mul64"}: struct{}{}, {"wasm", "math/bits", "OnesCount"}: struct{}{}, {"wasm", "math/bits", "OnesCount16"}: struct{}{}, {"wasm", "math/bits", "OnesCount32"}: struct{}{}, @@ -1363,8 +1367,8 @@ var wantIntrinsics = map[testIntrinsicKey]struct{}{ {"wasm", "math/bits", "TrailingZeros8"}: struct{}{}, {"wasm", "runtime", "KeepAlive"}: struct{}{}, {"wasm", "runtime", "slicebytetostringtmp"}: struct{}{}, - {"wasm", "crypto/subtle", "ConstantTimeSelect"}: struct{}{}, - {"wasm", "crypto/subtle", "constantTimeBoolToUint8"}: struct{}{}, + {"wasm", "crypto/internal/constanttime", "Select"}: struct{}{}, + {"wasm", "crypto/internal/constanttime", "boolToUint8"}: struct{}{}, } func TestIntrinsics(t *testing.T) { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index ae7d57566f7e0d..db2ffb5752f1ce 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -7797,7 +7797,7 @@ func callTargetLSym(callee *ir.Name) *obj.LSym { } // deferStructFnField is the field index of _defer.fn. -const deferStructFnField = 4 +const deferStructFnField = 3 var deferType *types.Type @@ -7817,7 +7817,6 @@ func deferstruct() *types.Type { makefield("heap", types.Types[types.TBOOL]), makefield("rangefunc", types.Types[types.TBOOL]), makefield("sp", types.Types[types.TUINTPTR]), - makefield("pc", types.Types[types.TUINTPTR]), // Note: the types here don't really matter. Defer structures // are always scanned explicitly during stack copying and GC, // so we make them uintptr type even though they are real pointers. diff --git a/src/cmd/compile/internal/test/move_test.go b/src/cmd/compile/internal/test/move_test.go new file mode 100644 index 00000000000000..f361a86539142d --- /dev/null +++ b/src/cmd/compile/internal/test/move_test.go @@ -0,0 +1,55 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import "testing" + +var ( + n = [16]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + m = [16]int{2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32} +) + +func TestEqual(t *testing.T) { + if r := move2(n, m, 0); r != n { + t.Fatalf("%v != %v", r, n) + } + if r := move2(n, m, 1); r != m { + t.Fatalf("%v != %v", r, m) + } + if r := move2p(n, m, 0); r != n { + t.Fatalf("%v != %v", r, n) + } + if r := move2p(n, m, 1); r != m { + t.Fatalf("%v != %v", r, m) + } +} + +//go:noinline +func move2(a, b [16]int, c int) [16]int { + e := a + f := b + var d [16]int + if c%2 == 0 { + d = e + } else { + d = f + } + r := d + return r +} + +//go:noinline +func move2p(a, b [16]int, c int) [16]int { + e := a + f := b + var p *[16]int + if c%2 == 0 { + p = &e + } else { + p = &f + } + r := *p + return r +} diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go index ae744e13bc79b5..51581c27e16148 100644 --- a/src/cmd/go/internal/clean/clean.go +++ b/src/cmd/go/internal/clean/clean.go @@ -121,7 +121,7 @@ func init() { func runClean(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() if len(args) > 0 { cacheFlag := "" switch { @@ -143,7 +143,7 @@ func runClean(ctx context.Context, cmd *base.Command, args []string) { // either the flags and arguments explicitly imply a package, // or no other target (such as a cache) was requested to be cleaned. cleanPkg := len(args) > 0 || cleanI || cleanR - if (!modload.Enabled(moduleLoaderState) || modload.HasModRoot(moduleLoaderState)) && + if (!moduleLoaderState.Enabled() || moduleLoaderState.HasModRoot()) && !cleanCache && !cleanModcache && !cleanTestcache && !cleanFuzzcache { cleanPkg = true } diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index f600a354727ae1..d345a36863232e 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -192,12 +192,12 @@ func findEnv(env []cfg.EnvVar, name string) string { func ExtraEnvVars(loaderstate *modload.State) []cfg.EnvVar { gomod := "" modload.Init(loaderstate) - if modload.HasModRoot(loaderstate) { - gomod = modload.ModFilePath(loaderstate) - } else if modload.Enabled(loaderstate) { + if loaderstate.HasModRoot() { + gomod = loaderstate.ModFilePath() + } else if loaderstate.Enabled() { gomod = os.DevNull } - modload.InitWorkfile(loaderstate) + loaderstate.InitWorkfile() gowork := modload.WorkFilePath(loaderstate) // As a special case, if a user set off explicitly, report that in GOWORK. if cfg.Getenv("GOWORK") == "off" { diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go index cd689b510deb19..fe356bdc081456 100644 --- a/src/cmd/go/internal/fmtcmd/fmt.go +++ b/src/cmd/go/internal/fmtcmd/fmt.go @@ -61,7 +61,7 @@ func runFmt(ctx context.Context, cmd *base.Command, args []string) { baseGofmtArgLen := gofmtArgLen for _, pkg := range load.PackagesAndErrors(moduleLoaderState, ctx, load.PackageOpts{}, args) { - if modload.Enabled(moduleLoaderState) && pkg.Module != nil && !pkg.Module.Main { + if moduleLoaderState.Enabled() && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not formatting packages in dependency modules\n") printed = true diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go index 2a5a5a6764af95..59142859c1f445 100644 --- a/src/cmd/go/internal/generate/generate.go +++ b/src/cmd/go/internal/generate/generate.go @@ -183,7 +183,7 @@ func init() { func runGenerate(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() if generateRunFlag != "" { var err error @@ -206,7 +206,7 @@ func runGenerate(ctx context.Context, cmd *base.Command, args []string) { printed := false pkgOpts := load.PackageOpts{IgnoreImports: true} for _, pkg := range load.PackagesAndErrors(moduleLoaderState, ctx, pkgOpts, args) { - if modload.Enabled(moduleLoaderState) && pkg.Module != nil && !pkg.Module.Main { + if moduleLoaderState.Enabled() && pkg.Module != nil && !pkg.Module.Main { if !printed { fmt.Fprintf(os.Stderr, "go: not generating in packages in dependency modules\n") printed = true diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index 086a8c2ca390cb..81ac4ebaf9cf68 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -420,7 +420,7 @@ var nl = []byte{'\n'} func runList(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() if *listFmt != "" && listJson { base.Fatalf("go list -f cannot be used with -json") @@ -428,7 +428,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { if *listReuse != "" && !*listM { base.Fatalf("go list -reuse cannot be used without -m") } - if *listReuse != "" && modload.HasModRoot(moduleLoaderState) { + if *listReuse != "" && moduleLoaderState.HasModRoot() { base.Fatalf("go list -reuse cannot be used inside a module") } @@ -502,7 +502,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { if cfg.BuildMod == "vendor" { base.Fatalf("go list -retracted cannot be used when vendoring is enabled") } - if !modload.Enabled(moduleLoaderState) { + if !moduleLoaderState.Enabled() { base.Fatalf("go list -retracted can only be used in module-aware mode") } } @@ -526,7 +526,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { base.Fatalf("go list -test cannot be used with -m") } - if modload.Init(moduleLoaderState); !modload.Enabled(moduleLoaderState) { + if modload.Init(moduleLoaderState); !moduleLoaderState.Enabled() { base.Fatalf("go: list -m cannot be used with GO111MODULE=off") } diff --git a/src/cmd/go/internal/load/search.go b/src/cmd/go/internal/load/search.go index 732dc2a5ae4d2f..749a00e8485f4d 100644 --- a/src/cmd/go/internal/load/search.go +++ b/src/cmd/go/internal/load/search.go @@ -57,9 +57,9 @@ func MatchPackage(pattern, cwd string) func(*modload.State, *Package) bool { default: return func(s *modload.State, p *Package) bool { switch { - case pattern == "tool" && modload.Enabled(s): + case pattern == "tool" && s.Enabled(): return s.MainModules.Tools()[p.ImportPath] - case pattern == "work" && modload.Enabled(s): + case pattern == "work" && s.Enabled(): return p.Module != nil && s.MainModules.Contains(p.Module.Path) default: matchPath := pkgpattern.MatchPattern(pattern) diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go index 7544e221d58f81..150d0c88607122 100644 --- a/src/cmd/go/internal/modcmd/download.go +++ b/src/cmd/go/internal/modcmd/download.go @@ -110,14 +110,14 @@ type ModuleJSON struct { func runDownload(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() // Check whether modules are enabled and whether we're in a module. moduleLoaderState.ForceUseModules = true modload.ExplicitWriteGoMod = true haveExplicitArgs := len(args) > 0 - if modload.HasModRoot(moduleLoaderState) || modload.WorkFilePath(moduleLoaderState) != "" { + if moduleLoaderState.HasModRoot() || modload.WorkFilePath(moduleLoaderState) != "" { modload.LoadModFile(moduleLoaderState, ctx) // to fill MainModules if haveExplicitArgs { @@ -170,7 +170,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { } if len(args) == 0 { - if modload.HasModRoot(moduleLoaderState) { + if moduleLoaderState.HasModRoot() { os.Stderr.WriteString("go: no module dependencies to download\n") } else { base.Errorf("go: no modules specified (see 'go help mod download')") @@ -178,7 +178,7 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) { base.Exit() } - if *downloadReuse != "" && modload.HasModRoot(moduleLoaderState) { + if *downloadReuse != "" && moduleLoaderState.HasModRoot() { base.Fatalf("go mod download -reuse cannot be used inside a module") } diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go index 2e0d1a6dd8cd02..4cd8d875012c3c 100644 --- a/src/cmd/go/internal/modcmd/edit.go +++ b/src/cmd/go/internal/modcmd/edit.go @@ -233,7 +233,7 @@ func runEdit(ctx context.Context, cmd *base.Command, args []string) { if len(args) == 1 { gomod = args[0] } else { - gomod = modload.ModFilePath(moduleLoaderState) + gomod = moduleLoaderState.ModFilePath() } if *editModule != "" { diff --git a/src/cmd/go/internal/modcmd/graph.go b/src/cmd/go/internal/modcmd/graph.go index 467da99b22961a..307c6ee4b56f15 100644 --- a/src/cmd/go/internal/modcmd/graph.go +++ b/src/cmd/go/internal/modcmd/graph.go @@ -53,7 +53,7 @@ func init() { func runGraph(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() if len(args) > 0 { base.Fatalf("go: 'go mod graph' accepts no arguments") diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index ef44ce41c04c7f..5782f4e79448c6 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -67,7 +67,7 @@ func init() { func runVendor(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() if modload.WorkFilePath(moduleLoaderState) != "" { base.Fatalf("go: 'go mod vendor' cannot be run in workspace mode. Run 'go work vendor' to vendor the workspace or set 'GOWORK=off' to exit workspace mode.") } @@ -118,7 +118,7 @@ func RunVendor(loaderstate *modload.State, ctx context.Context, vendorE bool, ve includeGoVersions := false isExplicit := map[module.Version]bool{} gv := loaderstate.MainModules.GoVersion(loaderstate) - if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(loaderstate, base.Cwd()) != "" || modload.ModFile(loaderstate).Go != nil) { + if gover.Compare(gv, "1.14") >= 0 && (loaderstate.FindGoWork(base.Cwd()) != "" || modload.ModFile(loaderstate).Go != nil) { // If the Go version is at least 1.14, annotate all explicit 'require' and // 'replace' targets found in the go.mod file so that we can perform a // stronger consistency check when -mod=vendor is set. diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index e40a05ed531648..d654ba26a4b57c 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -45,7 +45,7 @@ func init() { func runVerify(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() if len(args) != 0 { // NOTE(rsc): Could take a module pattern. diff --git a/src/cmd/go/internal/modcmd/why.go b/src/cmd/go/internal/modcmd/why.go index 407a19d5c21040..b52b9354c29c72 100644 --- a/src/cmd/go/internal/modcmd/why.go +++ b/src/cmd/go/internal/modcmd/why.go @@ -64,7 +64,7 @@ func init() { func runWhy(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() moduleLoaderState.ForceUseModules = true moduleLoaderState.RootMode = modload.NeedRoot modload.ExplicitWriteGoMod = true // don't write go.mod in ListModules diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go index 329fbaec040fc9..c8dc6e29bf69c1 100644 --- a/src/cmd/go/internal/modget/get.go +++ b/src/cmd/go/internal/modget/get.go @@ -308,14 +308,14 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { // Allow looking up modules for import paths when outside of a module. // 'go get' is expected to do this, unlike other commands. - modload.AllowMissingModuleImports(moduleLoaderState) + moduleLoaderState.AllowMissingModuleImports() // 'go get' no longer builds or installs packages, so there's nothing to do // if there's no go.mod file. // TODO(#40775): make modload.Init return ErrNoModRoot instead of exiting. // We could handle that here by printing a different message. modload.Init(moduleLoaderState) - if !modload.HasModRoot(moduleLoaderState) { + if !moduleLoaderState.HasModRoot() { base.Fatalf("go: go.mod file not found in current directory or any parent directory.\n" + "\t'go get' is no longer supported outside a module.\n" + "\tTo build and install a command, use 'go install' with a version,\n" + @@ -425,7 +425,7 @@ func runGet(ctx context.Context, cmd *base.Command, args []string) { newReqs := reqsFromGoMod(modload.ModFile(moduleLoaderState)) r.reportChanges(oldReqs, newReqs) - if gowork := modload.FindGoWork(moduleLoaderState, base.Cwd()); gowork != "" { + if gowork := moduleLoaderState.FindGoWork(base.Cwd()); gowork != "" { wf, err := modload.ReadWorkFile(gowork) if err == nil && modload.UpdateWorkGoVersion(wf, moduleLoaderState.MainModules.GoVersion(moduleLoaderState)) { modload.WriteWorkFile(gowork, wf) @@ -575,7 +575,7 @@ func newResolver(loaderstate *modload.State, ctx context.Context, queries []*que buildListVersion: initialVersion, initialVersion: initialVersion, nonesByPath: map[string]*query{}, - workspace: loadWorkspace(modload.FindGoWork(loaderstate, base.Cwd())), + workspace: loadWorkspace(loaderstate.FindGoWork(base.Cwd())), } for _, q := range queries { @@ -722,7 +722,7 @@ func (r *resolver) queryNone(loaderstate *modload.State, ctx context.Context, q if !q.isWildcard() { q.pathOnce(q.pattern, func() pathSet { - hasModRoot := modload.HasModRoot(loaderstate) + hasModRoot := loaderstate.HasModRoot() if hasModRoot && loaderstate.MainModules.Contains(q.pattern) { v := module.Version{Path: q.pattern} // The user has explicitly requested to downgrade their own module to @@ -752,7 +752,7 @@ func (r *resolver) queryNone(loaderstate *modload.State, ctx context.Context, q continue } q.pathOnce(curM.Path, func() pathSet { - if modload.HasModRoot(loaderstate) && curM.Version == "" && loaderstate.MainModules.Contains(curM.Path) { + if loaderstate.HasModRoot() && curM.Version == "" && loaderstate.MainModules.Contains(curM.Path) { return errSet(&modload.QueryMatchesMainModulesError{ MainModules: []module.Version{curM}, Pattern: q.pattern, @@ -779,7 +779,7 @@ func (r *resolver) performLocalQueries(loaderstate *modload.State, ctx context.C // restricted to matching packages in the main module. pkgPattern, mainModule := loaderstate.MainModules.DirImportPath(loaderstate, ctx, q.pattern) if pkgPattern == "." { - modload.MustHaveModRoot(loaderstate) + loaderstate.MustHaveModRoot() versions := loaderstate.MainModules.Versions() modRoots := make([]string, 0, len(versions)) for _, m := range versions { @@ -802,7 +802,7 @@ func (r *resolver) performLocalQueries(loaderstate *modload.State, ctx context.C return errSet(fmt.Errorf("no package to get in current directory")) } if !q.isWildcard() { - modload.MustHaveModRoot(loaderstate) + loaderstate.MustHaveModRoot() return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, loaderstate.MainModules.ModRoot(mainModule))) } search.WarnUnmatched([]*search.Match{match}) diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go index 75d32dc633a420..3086dbc1ad61b0 100644 --- a/src/cmd/go/internal/modget/query.go +++ b/src/cmd/go/internal/modget/query.go @@ -184,7 +184,7 @@ func (q *query) validate(loaderstate *modload.State) error { if q.pattern == "all" { // If there is no main module, "all" is not meaningful. - if !modload.HasModRoot(loaderstate) { + if !loaderstate.HasModRoot() { return fmt.Errorf(`cannot match "all": %v`, modload.NewNoMainModulesError(loaderstate)) } if !versionOkForMainModule(q.version) { diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index 7299452670c13f..f6ba8d43b779f5 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -52,7 +52,7 @@ func findStandardImportPath(path string) string { // standard library or if the package was not successfully loaded with // LoadPackages or ImportFromFiles, nil is returned. func PackageModuleInfo(loaderstate *State, ctx context.Context, pkgpath string) *modinfo.ModulePublic { - if isStandardImportPath(pkgpath) || !Enabled(loaderstate) { + if isStandardImportPath(pkgpath) || !loaderstate.Enabled() { return nil } m, ok := findModule(loaded, pkgpath) @@ -69,7 +69,7 @@ func PackageModuleInfo(loaderstate *State, ctx context.Context, pkgpath string) // standard library or if the package was not successfully loaded with // LoadPackages or ImportFromFiles, the empty string is returned. func PackageModRoot(loaderstate *State, ctx context.Context, pkgpath string) string { - if isStandardImportPath(pkgpath) || !Enabled(loaderstate) || cfg.BuildMod == "vendor" { + if isStandardImportPath(pkgpath) || !loaderstate.Enabled() || cfg.BuildMod == "vendor" { return "" } m, ok := findModule(loaded, pkgpath) @@ -84,7 +84,7 @@ func PackageModRoot(loaderstate *State, ctx context.Context, pkgpath string) str } func ModuleInfo(loaderstate *State, ctx context.Context, path string) *modinfo.ModulePublic { - if !Enabled(loaderstate) { + if !loaderstate.Enabled() { return nil } diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go index cb64bec9c81d12..37c2a6c759f0db 100644 --- a/src/cmd/go/internal/modload/buildlist.go +++ b/src/cmd/go/internal/modload/buildlist.go @@ -165,7 +165,7 @@ func (rs *Requirements) String() string { func (rs *Requirements) initVendor(loaderstate *State, vendorList []module.Version) { rs.graphOnce.Do(func() { roots := loaderstate.MainModules.Versions() - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { // Use rs.rootModules to pull in the go and toolchain roots // from the go.work file and preserve the invariant that all // of rs.rootModules are in mg.g. @@ -208,7 +208,7 @@ func (rs *Requirements) initVendor(loaderstate *State, vendorList []module.Versi // graph, but still distinguishes between direct and indirect // dependencies. vendorMod := module.Version{Path: "vendor/modules.txt", Version: ""} - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { for _, m := range loaderstate.MainModules.Versions() { reqs, _ := rootsFromModFile(loaderstate, m, loaderstate.MainModules.ModFile(m), omitToolchainRoot) mg.g.Require(m, append(reqs, vendorMod)) @@ -333,7 +333,7 @@ func readModGraph(loaderstate *State, ctx context.Context, pruning modPruning, r } var graphRoots []module.Version - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { graphRoots = roots } else { graphRoots = loaderstate.MainModules.Versions() @@ -347,7 +347,7 @@ func readModGraph(loaderstate *State, ctx context.Context, pruning modPruning, r ) if pruning != workspace { - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { panic("pruning is not workspace in workspace mode") } mg.g.Require(loaderstate.MainModules.mustGetSingleMainModule(loaderstate), roots) @@ -529,7 +529,7 @@ func (mg *ModuleGraph) findError() error { func (mg *ModuleGraph) allRootsSelected(loaderstate *State) bool { var roots []module.Version - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { roots = loaderstate.MainModules.Versions() } else { roots, _ = mg.g.RequiredBy(loaderstate.MainModules.mustGetSingleMainModule(loaderstate)) diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go index c6b56c35d4b129..3998ce11726fe0 100644 --- a/src/cmd/go/internal/modload/import.go +++ b/src/cmd/go/internal/modload/import.go @@ -29,10 +29,11 @@ import ( ) type ImportMissingError struct { - Path string - Module module.Version - QueryErr error - modContainingCWD module.Version + Path string + Module module.Version + QueryErr error + modContainingCWD module.Version + allowMissingModuleImports bool // modRoot is dependent on the value of ImportingMainModule and should be // kept in sync. @@ -70,7 +71,7 @@ func (e *ImportMissingError) Error() string { if e.QueryErr != nil && !errors.Is(e.QueryErr, ErrNoModRoot) { return fmt.Sprintf("cannot find module providing package %s: %v", e.Path, e.QueryErr) } - if cfg.BuildMod == "mod" || (cfg.BuildMod == "readonly" && allowMissingModuleImports) { + if cfg.BuildMod == "mod" || (cfg.BuildMod == "readonly" && e.allowMissingModuleImports) { return "cannot find module providing package " + e.Path } @@ -340,7 +341,7 @@ func importFromModules(loaderstate *State, ctx context.Context, path string, rs } } - if HasModRoot(loaderstate) { + if loaderstate.HasModRoot() { vendorDir := VendorDir(loaderstate) dir, inVendorDir, _ := dirInModule(path, "", vendorDir, false) if inVendorDir { @@ -355,7 +356,7 @@ func importFromModules(loaderstate *State, ctx context.Context, path string, rs roots = append(roots, vendorDir) } else { subCommand := "mod" - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { subCommand = "work" } fmt.Fprintf(os.Stderr, "go: ignoring package %s which exists in the vendor directory but is missing from vendor/modules.txt. To sync the vendor directory run go %s vendor.\n", path, subCommand) @@ -373,8 +374,9 @@ func importFromModules(loaderstate *State, ctx context.Context, path string, rs if len(mods) == 0 { return module.Version{}, "", "", nil, &ImportMissingError{ - Path: path, - modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + Path: path, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, } } @@ -490,14 +492,15 @@ func importFromModules(loaderstate *State, ctx context.Context, path string, rs // We checked the full module graph and still didn't find the // requested package. var queryErr error - if !HasModRoot(loaderstate) { + if !loaderstate.HasModRoot() { queryErr = NewNoMainModulesError(loaderstate) } return module.Version{}, "", "", nil, &ImportMissingError{ - Path: path, - QueryErr: queryErr, - isStd: pathIsStd, - modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + Path: path, + QueryErr: queryErr, + isStd: pathIsStd, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, } } @@ -571,9 +574,10 @@ func queryImport(loaderstate *State, ctx context.Context, path string, rs *Requi } else if ok { if cfg.BuildMod == "readonly" { return module.Version{}, &ImportMissingError{ - Path: path, - replaced: m, - modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + Path: path, + replaced: m, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, } } return m, nil @@ -601,13 +605,14 @@ func queryImport(loaderstate *State, ctx context.Context, path string, rs *Requi // // Instead of trying QueryPattern, report an ImportMissingError immediately. return module.Version{}, &ImportMissingError{ - Path: path, - isStd: true, - modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + Path: path, + isStd: true, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, } } - if (cfg.BuildMod == "readonly" || cfg.BuildMod == "vendor") && !allowMissingModuleImports { + if (cfg.BuildMod == "readonly" || cfg.BuildMod == "vendor") && !loaderstate.allowMissingModuleImports { // In readonly mode, we can't write go.mod, so we shouldn't try to look up // the module. If readonly mode was enabled explicitly, include that in // the error message. @@ -620,9 +625,10 @@ func queryImport(loaderstate *State, ctx context.Context, path string, rs *Requi queryErr = fmt.Errorf("import lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) } return module.Version{}, &ImportMissingError{ - Path: path, - QueryErr: queryErr, - modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + Path: path, + QueryErr: queryErr, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, } } @@ -642,9 +648,10 @@ func queryImport(loaderstate *State, ctx context.Context, path string, rs *Requi // Return "cannot find module providing package […]" instead of whatever // low-level error QueryPattern produced. return module.Version{}, &ImportMissingError{ - Path: path, - QueryErr: err, - modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + Path: path, + QueryErr: err, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, } } else { return module.Version{}, err @@ -670,10 +677,11 @@ func queryImport(loaderstate *State, ctx context.Context, path string, rs *Requi return c.Mod, nil } return module.Version{}, &ImportMissingError{ - Path: path, - Module: candidates[0].Mod, - newMissingVersion: candidate0MissingVersion, - modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + Path: path, + Module: candidates[0].Mod, + newMissingVersion: candidate0MissingVersion, + modContainingCWD: loaderstate.MainModules.ModContainingCWD(), + allowMissingModuleImports: loaderstate.allowMissingModuleImports, } } @@ -820,7 +828,7 @@ func fetch(loaderstate *State, ctx context.Context, mod module.Version) (dir str // mustHaveSums reports whether we require that all checksums // needed to load or build packages are already present in the go.sum file. func mustHaveSums(loaderstate *State) bool { - return HasModRoot(loaderstate) && cfg.BuildMod == "readonly" && !inWorkspaceMode(loaderstate) + return loaderstate.HasModRoot() && cfg.BuildMod == "readonly" && !loaderstate.inWorkspaceMode() } type sumMissingError struct { diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go index 0716675a91d2bd..820fb87b5928f2 100644 --- a/src/cmd/go/internal/modload/import_test.go +++ b/src/cmd/go/internal/modload/import_test.go @@ -58,16 +58,11 @@ var importTests = []struct { func TestQueryImport(t *testing.T) { loaderstate := NewState() loaderstate.RootMode = NoRoot + loaderstate.AllowMissingModuleImports() testenv.MustHaveExternalNetwork(t) testenv.MustHaveExecPath(t, "git") - oldAllowMissingModuleImports := allowMissingModuleImports - defer func() { - allowMissingModuleImports = oldAllowMissingModuleImports - }() - allowMissingModuleImports = true - ctx := context.Background() rs := LoadModFile(loaderstate, ctx) diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index 9abfac971573b3..3d6f9a4a65abc3 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -38,8 +38,6 @@ import ( // // TODO(#40775): See if these can be plumbed as explicit parameters. var ( - allowMissingModuleImports bool - // ExplicitWriteGoMod prevents LoadPackages, ListModules, and other functions // from updating go.mod and go.sum or reporting errors when updates are // needed. A package should set this if it would cause go.mod to be written @@ -85,7 +83,7 @@ func EnterWorkspace(loaderstate *State, ctx context.Context) (exit func(), err e loaderstate.ForceUseModules = true // Load in workspace mode. - InitWorkfile(loaderstate) + loaderstate.InitWorkfile() LoadModFile(loaderstate, ctx) // Update the content of the previous main module, and recompute the requirements. @@ -195,7 +193,7 @@ func (mms *MainModuleSet) getSingleMainModule(loaderstate *State) (module.Versio return module.Version{}, errors.New("internal error: mustGetSingleMainModule called in context with no main modules") } if len(mms.versions) != 1 { - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { return module.Version{}, errors.New("internal error: mustGetSingleMainModule called in workspace mode") } else { return module.Version{}, errors.New("internal error: multiple main modules present outside of workspace mode") @@ -255,7 +253,7 @@ func (mms *MainModuleSet) HighestReplaced() map[string]string { // GoVersion returns the go version set on the single module, in module mode, // or the go.work file in workspace mode. func (mms *MainModuleSet) GoVersion(loaderstate *State) string { - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { return gover.FromGoWork(mms.workFile) } if mms != nil && len(mms.versions) == 1 { @@ -275,7 +273,7 @@ func (mms *MainModuleSet) GoVersion(loaderstate *State) string { // or on the go.work file in workspace mode. // The caller must not modify the result. func (mms *MainModuleSet) Godebugs(loaderstate *State) []*modfile.Godebug { - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { if mms.workFile != nil { return mms.workFile.Godebug } @@ -345,13 +343,13 @@ func BinDir(loaderstate *State) string { // InitWorkfile initializes the workFilePath variable for commands that // operate in workspace mode. It should not be called by other commands, // for example 'go mod tidy', that don't operate in workspace mode. -func InitWorkfile(loaderstate *State) { +func (loaderstate *State) InitWorkfile() { // Initialize fsys early because we need overlay to read go.work file. fips140.Init() if err := fsys.Init(); err != nil { base.Fatal(err) } - loaderstate.workFilePath = FindGoWork(loaderstate, base.Cwd()) + loaderstate.workFilePath = loaderstate.FindGoWork(base.Cwd()) } // FindGoWork returns the name of the go.work file for this command, @@ -359,7 +357,7 @@ func InitWorkfile(loaderstate *State) { // Most code should use Init and Enabled rather than use this directly. // It is exported mainly for Go toolchain switching, which must process // the go.work very early at startup. -func FindGoWork(loaderstate *State, wd string) string { +func (loaderstate *State) FindGoWork(wd string) string { if loaderstate.RootMode == NoRoot { return "" } @@ -415,7 +413,8 @@ func (s *State) setState(new State) State { } type State struct { - initialized bool + initialized bool + allowMissingModuleImports bool // ForceUseModules may be set to force modules to be enabled when // GO111MODULE=auto or to report an error when GO111MODULE=off. @@ -576,7 +575,7 @@ func Init(loaderstate *State) { // of 'go get', but Init reads the -modfile flag in 'go get', so it shouldn't // be called until the command is installed and flags are parsed. Instead of // calling Init and Enabled, the main package can call this function. -func WillBeEnabled(loaderstate *State) bool { +func (loaderstate *State) WillBeEnabled() bool { if loaderstate.modRoots != nil || cfg.ModulesEnabled { // Already enabled. return true @@ -628,13 +627,13 @@ func FindGoMod(wd string) string { // If modules are enabled but there is no main module, Enabled returns true // and then the first use of module information will call die // (usually through MustModRoot). -func Enabled(loaderstate *State) bool { +func (loaderstate *State) Enabled() bool { Init(loaderstate) return loaderstate.modRoots != nil || cfg.ModulesEnabled } func (s *State) vendorDir() (string, error) { - if inWorkspaceMode(s) { + if s.inWorkspaceMode() { return filepath.Join(filepath.Dir(WorkFilePath(s)), "vendor"), nil } mainModule, err := s.MainModules.getSingleMainModule(s) @@ -667,11 +666,11 @@ func VendorDir(loaderstate *State) string { return dir } -func inWorkspaceMode(loaderstate *State) bool { +func (loaderstate *State) inWorkspaceMode() bool { if !loaderstate.initialized { panic("inWorkspaceMode called before modload.Init called") } - if !Enabled(loaderstate) { + if !loaderstate.Enabled() { return false } return loaderstate.workFilePath != "" @@ -680,16 +679,16 @@ func inWorkspaceMode(loaderstate *State) bool { // HasModRoot reports whether a main module or main modules are present. // HasModRoot may return false even if Enabled returns true: for example, 'get' // does not require a main module. -func HasModRoot(loaderstate *State) bool { +func (loaderstate *State) HasModRoot() bool { Init(loaderstate) return loaderstate.modRoots != nil } // MustHaveModRoot checks that a main module or main modules are present, // and calls base.Fatalf if there are no main modules. -func MustHaveModRoot(loaderstate *State) { +func (loaderstate *State) MustHaveModRoot() { Init(loaderstate) - if !HasModRoot(loaderstate) { + if !loaderstate.HasModRoot() { die(loaderstate) } } @@ -697,8 +696,8 @@ func MustHaveModRoot(loaderstate *State) { // ModFilePath returns the path that would be used for the go.mod // file, if in module mode. ModFilePath calls base.Fatalf if there is no main // module, even if -modfile is set. -func ModFilePath(loaderstate *State) string { - MustHaveModRoot(loaderstate) +func (loaderstate *State) ModFilePath() string { + loaderstate.MustHaveModRoot() return modFilePath(findModuleRoot(base.Cwd())) } @@ -716,7 +715,7 @@ func die(loaderstate *State) { if cfg.Getenv("GO111MODULE") == "off" { base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") } - if !inWorkspaceMode(loaderstate) { + if !loaderstate.inWorkspaceMode() { if dir, name := findAltConfig(base.Cwd()); dir != "" { rel, err := filepath.Rel(base.Cwd(), dir) if err != nil { @@ -753,7 +752,7 @@ func (e noMainModulesError) Unwrap() error { func NewNoMainModulesError(s *State) noMainModulesError { return noMainModulesError{ - inWorkspaceMode: inWorkspaceMode(s), + inWorkspaceMode: s.inWorkspaceMode(), } } @@ -921,7 +920,7 @@ func loadModFile(loaderstate *State, ctx context.Context, opts *PackageOpts) (*R Init(loaderstate) var workFile *modfile.WorkFile - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { var err error workFile, loaderstate.modRoots, err = LoadWorkFile(loaderstate.workFilePath) if err != nil { @@ -965,7 +964,7 @@ func loadModFile(loaderstate *State, ctx context.Context, opts *PackageOpts) (*R roots []module.Version direct = map[string]bool{"go": true} ) - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { // Since we are in a workspace, the Go version for the synthetic // "command-line-arguments" module must not exceed the Go version // for the workspace. @@ -1004,7 +1003,7 @@ func loadModFile(loaderstate *State, ctx context.Context, opts *PackageOpts) (*R var fixed bool data, f, err := ReadModFile(gomod, fixVersion(loaderstate, ctx, &fixed)) if err != nil { - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { if tooNew, ok := err.(*gover.TooNewError); ok && !strings.HasPrefix(cfg.CmdName, "work ") { // Switching to a newer toolchain won't help - the go.work has the wrong version. // Report this more specific error, unless we are a command like 'go work use' @@ -1019,7 +1018,7 @@ func loadModFile(loaderstate *State, ctx context.Context, opts *PackageOpts) (*R errs = append(errs, err) continue } - if inWorkspaceMode(loaderstate) && !strings.HasPrefix(cfg.CmdName, "work ") { + if loaderstate.inWorkspaceMode() && !strings.HasPrefix(cfg.CmdName, "work ") { // Refuse to use workspace if its go version is too old. // Disable this check if we are a workspace command like work use or work sync, // which will fix the problem. @@ -1031,7 +1030,7 @@ func loadModFile(loaderstate *State, ctx context.Context, opts *PackageOpts) (*R } } - if !inWorkspaceMode(loaderstate) { + if !loaderstate.inWorkspaceMode() { ok := true for _, g := range f.Godebug { if err := CheckGodebug("godebug", g.Key, g.Value); err != nil { @@ -1079,7 +1078,7 @@ func loadModFile(loaderstate *State, ctx context.Context, opts *PackageOpts) (*R rs.initVendor(loaderstate, vendorList) } - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { // We don't need to update the mod file so return early. loaderstate.requirements = rs return rs, nil @@ -1292,11 +1291,11 @@ func fixVersion(loaderstate *State, ctx context.Context, fixed *bool) modfile.Ve // // This function affects the default cfg.BuildMod when outside of a module, // so it can only be called prior to Init. -func AllowMissingModuleImports(loaderstate *State) { - if loaderstate.initialized { +func (s *State) AllowMissingModuleImports() { + if s.initialized { panic("AllowMissingModuleImports after Init") } - allowMissingModuleImports = true + s.allowMissingModuleImports = true } // makeMainModules creates a MainModuleSet and associated variables according to @@ -1422,7 +1421,7 @@ func requirementsFromModFiles(loaderstate *State, ctx context.Context, workFile var roots []module.Version direct := map[string]bool{} var pruning modPruning - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { pruning = workspace roots = make([]module.Version, len(loaderstate.MainModules.Versions()), 2+len(loaderstate.MainModules.Versions())) copy(roots, loaderstate.MainModules.Versions()) @@ -1517,7 +1516,7 @@ func appendGoAndToolchainRoots(roots []module.Version, goVersion, toolchain stri // wasn't provided. setDefaultBuildMod may be called multiple times. func setDefaultBuildMod(loaderstate *State) { if cfg.BuildModExplicit { - if inWorkspaceMode(loaderstate) && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" { + if loaderstate.inWorkspaceMode() && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" { switch cfg.CmdName { case "work sync", "mod graph", "mod verify", "mod why": // These commands run with BuildMod set to mod, but they don't take the @@ -1553,7 +1552,7 @@ func setDefaultBuildMod(loaderstate *State) { return } if loaderstate.modRoots == nil { - if allowMissingModuleImports { + if loaderstate.allowMissingModuleImports { cfg.BuildMod = "mod" } else { cfg.BuildMod = "readonly" @@ -1564,7 +1563,7 @@ func setDefaultBuildMod(loaderstate *State) { if len(loaderstate.modRoots) >= 1 { var goVersion string var versionSource string - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { versionSource = "go.work" if wfg := loaderstate.MainModules.WorkFile().Go; wfg != nil { goVersion = wfg.Version @@ -1652,7 +1651,7 @@ func modulesTextIsForWorkspace(vendorDir string) (bool, error) { } func mustHaveCompleteRequirements(loaderstate *State) bool { - return cfg.BuildMod != "mod" && !inWorkspaceMode(loaderstate) + return cfg.BuildMod != "mod" && !loaderstate.inWorkspaceMode() } // addGoStmt adds a go directive to the go.mod file if it does not already @@ -1956,7 +1955,7 @@ func UpdateGoModFromReqs(loaderstate *State, ctx context.Context, opts WriteOpts // // In workspace mode, commitRequirements only writes changes to go.work.sum. func commitRequirements(loaderstate *State, ctx context.Context, opts WriteOpts) (err error) { - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { // go.mod files aren't updated in workspace mode, but we still want to // update the go.work.sum file. return modfetch.WriteGoSum(ctx, keepSums(loaderstate, ctx, loaded, loaderstate.requirements, addBuildListZipSums), mustHaveCompleteRequirements(loaderstate)) @@ -2243,9 +2242,12 @@ func CheckGodebug(verb, k, v string) error { } return nil } - for _, info := range godebugs.All { - if k == info.Name { - return nil + if godebugs.Lookup(k) != nil { + return nil + } + for _, info := range godebugs.Removed { + if info.Name == k { + return fmt.Errorf("use of removed %s %q, see https://go.dev/doc/godebug#go-1%v", verb, k, info.Removed) } } return fmt.Errorf("unknown %s %q", verb, k) diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index 6a4d788824caa9..316fda4003be03 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -145,7 +145,7 @@ func listModules(loaderstate *State, ctx context.Context, rs *Requirements, args } if arg == "all" || strings.Contains(arg, "...") { needFullGraph = true - if !HasModRoot(loaderstate) { + if !loaderstate.HasModRoot() { base.Fatalf("go: cannot match %q: %v", arg, NewNoMainModulesError(loaderstate)) } continue @@ -154,7 +154,7 @@ func listModules(loaderstate *State, ctx context.Context, rs *Requirements, args if vers == "upgrade" || vers == "patch" { if _, ok := rs.rootSelected(loaderstate, path); !ok || rs.pruning == unpruned { needFullGraph = true - if !HasModRoot(loaderstate) { + if !loaderstate.HasModRoot() { base.Fatalf("go: cannot match %q: %v", arg, NewNoMainModulesError(loaderstate)) } } @@ -163,7 +163,7 @@ func listModules(loaderstate *State, ctx context.Context, rs *Requirements, args } if _, ok := rs.rootSelected(loaderstate, arg); !ok || rs.pruning == unpruned { needFullGraph = true - if mode&ListVersions == 0 && !HasModRoot(loaderstate) { + if mode&ListVersions == 0 && !loaderstate.HasModRoot() { base.Fatalf("go: cannot match %q without -versions or an explicit version: %v", arg, NewNoMainModulesError(loaderstate)) } } diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go index 065d3a78163a21..b4d128fe9a15f6 100644 --- a/src/cmd/go/internal/modload/load.go +++ b/src/cmd/go/internal/modload/load.go @@ -294,7 +294,7 @@ func LoadPackages(loaderstate *State, ctx context.Context, opts PackageOpts, pat // If we're outside of a module, ensure that the failure mode // indicates that. - if !HasModRoot(loaderstate) { + if !loaderstate.HasModRoot() { die(loaderstate) } @@ -546,7 +546,7 @@ func matchLocalDirs(loaderstate *State, ctx context.Context, modRoots []string, if !slices.Contains(modRoots, modRoot) && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(loaderstate, ctx, absDir, rs) == "" { m.Dirs = []string{} scope := "main module or its selected dependencies" - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { scope = "modules listed in go.work or their selected dependencies" } m.AddError(fmt.Errorf("directory prefix %s does not contain %s", base.ShortPath(absDir), scope)) @@ -674,7 +674,7 @@ func resolveLocalPackage(loaderstate *State, ctx context.Context, dir string, rs if dirstr == "directory ." { dirstr = "current directory" } - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { if mr := findModuleRoot(absDir); mr != "" { return "", fmt.Errorf("%s is contained in a module that is not one of the workspace modules listed in go.work. You can add the module to the workspace using:\n\tgo work use %s", dirstr, base.ShortPath(mr)) } @@ -800,7 +800,7 @@ func ImportFromFiles(loaderstate *State, ctx context.Context, gofiles []string) // DirImportPath returns the effective import path for dir, // provided it is within a main module, or else returns ".". func (mms *MainModuleSet) DirImportPath(loaderstate *State, ctx context.Context, dir string) (path string, m module.Version) { - if !HasModRoot(loaderstate) { + if !loaderstate.HasModRoot() { return ".", module.Version{} } LoadModFile(loaderstate, ctx) // Sets targetPrefix. @@ -1184,7 +1184,7 @@ func loadFromRoots(loaderstate *State, ctx context.Context, params loaderParams) continue } - if !ld.ResolveMissingImports || (!HasModRoot(loaderstate) && !allowMissingModuleImports) { + if !ld.ResolveMissingImports || (!loaderstate.HasModRoot() && !loaderstate.allowMissingModuleImports) { // We've loaded as much as we can without resolving missing imports. break } @@ -1399,7 +1399,7 @@ func (ld *loader) updateRequirements(loaderstate *State, ctx context.Context) (c continue } - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { // In workspace mode / workspace pruning mode, the roots are the main modules // rather than the main module's direct dependencies. The check below on the selected // roots does not apply. diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go index be0f2a5c1166fc..7191833a0dcce9 100644 --- a/src/cmd/go/internal/modload/modfile.go +++ b/src/cmd/go/internal/modload/modfile.go @@ -574,7 +574,7 @@ type retraction struct { // // The caller must not modify the returned summary. func goModSummary(loaderstate *State, m module.Version) (*modFileSummary, error) { - if m.Version == "" && !inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { + if m.Version == "" && !loaderstate.inWorkspaceMode() && loaderstate.MainModules.Contains(m.Path) { panic("internal error: goModSummary called on a main module") } if gover.IsToolchain(m.Path) { @@ -686,7 +686,7 @@ func rawGoModSummary(loaderstate *State, m module.Version) (*modFileSummary, err } return &modFileSummary{module: m}, nil } - if m.Version == "" && !inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { + if m.Version == "" && !loaderstate.inWorkspaceMode() && loaderstate.MainModules.Contains(m.Path) { // Calling rawGoModSummary implies that we are treating m as a module whose // requirements aren't the roots of the module graph and can't be modified. // @@ -694,12 +694,12 @@ func rawGoModSummary(loaderstate *State, m module.Version) (*modFileSummary, err // are the roots of the module graph and we expect them to be kept consistent. panic("internal error: rawGoModSummary called on a main module") } - if m.Version == "" && inWorkspaceMode(loaderstate) && m.Path == "command-line-arguments" { + if m.Version == "" && loaderstate.inWorkspaceMode() && m.Path == "command-line-arguments" { // "go work sync" calls LoadModGraph to make sure the module graph is valid. // If there are no modules in the workspace, we synthesize an empty // command-line-arguments module, which rawGoModData cannot read a go.mod for. return &modFileSummary{module: m}, nil - } else if m.Version == "" && inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { + } else if m.Version == "" && loaderstate.inWorkspaceMode() && loaderstate.MainModules.Contains(m.Path) { // When go get uses EnterWorkspace to check that the workspace loads properly, // it will update the contents of the workspace module's modfile in memory. To use the updated // contents of the modfile when doing the load, don't read from disk and instead @@ -785,7 +785,7 @@ func rawGoModData(loaderstate *State, m module.Version) (name string, data []byt if m.Version == "" { dir := m.Path if !filepath.IsAbs(dir) { - if inWorkspaceMode(loaderstate) && loaderstate.MainModules.Contains(m.Path) { + if loaderstate.inWorkspaceMode() && loaderstate.MainModules.Contains(m.Path) { dir = loaderstate.MainModules.ModRoot(m) } else { // m is a replacement module with only a file path. diff --git a/src/cmd/go/internal/modload/search.go b/src/cmd/go/internal/modload/search.go index 1bb4e3f911e04f..c45808635dbe69 100644 --- a/src/cmd/go/internal/modload/search.go +++ b/src/cmd/go/internal/modload/search.go @@ -176,7 +176,7 @@ func matchPackages(loaderstate *State, ctx context.Context, m *search.Match, tag walkPkgs(modRoot, loaderstate.MainModules.PathPrefix(mod), pruneGoMod|pruneVendor) } } - if HasModRoot(loaderstate) { + if loaderstate.HasModRoot() { walkPkgs(VendorDir(loaderstate), "", pruneVendor) } return diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go index 1fc20ad398b363..9956bcdb127290 100644 --- a/src/cmd/go/internal/modload/vendor.go +++ b/src/cmd/go/internal/modload/vendor.go @@ -154,7 +154,7 @@ func checkVendorConsistency(loaderstate *State, indexes []*modFileIndex, modFile } pre114 := false - if !inWorkspaceMode(loaderstate) { // workspace mode was added after Go 1.14 + if !loaderstate.inWorkspaceMode() { // workspace mode was added after Go 1.14 if len(indexes) != 1 { panic(fmt.Errorf("not in workspace mode but number of indexes is %v, not 1", len(indexes))) } @@ -252,7 +252,7 @@ func checkVendorConsistency(loaderstate *State, indexes []*modFileIndex, modFile } if !foundRequire { article := "" - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { article = "a " } vendErrorf(mod, "is marked as explicit in vendor/modules.txt, but not explicitly required in %vgo.mod", article) @@ -264,7 +264,7 @@ func checkVendorConsistency(loaderstate *State, indexes []*modFileIndex, modFile for _, mod := range vendorReplaced { r := Replacement(loaderstate, mod) replacementSource := "go.mod" - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { replacementSource = "the workspace" } if r == (module.Version{}) { @@ -276,7 +276,7 @@ func checkVendorConsistency(loaderstate *State, indexes []*modFileIndex, modFile if vendErrors.Len() > 0 { subcmd := "mod" - if inWorkspaceMode(loaderstate) { + if loaderstate.inWorkspaceMode() { subcmd = "work" } base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir(loaderstate)), vendErrors, subcmd) diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go index f821b37f292bfd..ebd99ccfb21f19 100644 --- a/src/cmd/go/internal/run/run.go +++ b/src/cmd/go/internal/run/run.go @@ -79,10 +79,10 @@ func runRun(ctx context.Context, cmd *base.Command, args []string) { // for -race and -msan. moduleLoaderState.ForceUseModules = true moduleLoaderState.RootMode = modload.NoRoot - modload.AllowMissingModuleImports(moduleLoaderState) + moduleLoaderState.AllowMissingModuleImports() modload.Init(moduleLoaderState) } else { - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() } work.BuildInit(moduleLoaderState) diff --git a/src/cmd/go/internal/telemetrystats/telemetrystats.go b/src/cmd/go/internal/telemetrystats/telemetrystats.go index 84b4ae2e841567..81a6e1e1758461 100644 --- a/src/cmd/go/internal/telemetrystats/telemetrystats.go +++ b/src/cmd/go/internal/telemetrystats/telemetrystats.go @@ -25,13 +25,20 @@ func incrementConfig() { // TODO(jitsu): Telemetry for the go/mode counters should eventually be // moved to modload.Init() s := modload.NewState() - if !modload.WillBeEnabled(s) { + if !s.WillBeEnabled() { counter.Inc("go/mode:gopath") - } else if workfile := modload.FindGoWork(s, base.Cwd()); workfile != "" { + } else if workfile := s.FindGoWork(base.Cwd()); workfile != "" { counter.Inc("go/mode:workspace") } else { counter.Inc("go/mode:module") } + + if cfg.BuildContext.CgoEnabled { + counter.Inc("go/cgo:enabled") + } else { + counter.Inc("go/cgo:disabled") + } + counter.Inc("go/platform/target/goos:" + cfg.Goos) counter.Inc("go/platform/target/goarch:" + cfg.Goarch) switch cfg.Goarch { diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 77fb9488ac03fe..44ee98feaaf576 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -684,7 +684,7 @@ var defaultVetFlags = []string{ func runTest(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() pkgArgs, testArgs = testFlags(args) - modload.InitWorkfile(moduleLoaderState) // The test command does custom flag processing; initialize workspaces after that. + moduleLoaderState.InitWorkfile() // The test command does custom flag processing; initialize workspaces after that. if cfg.DebugTrace != "" { var close func() error @@ -742,7 +742,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) { if !mainMods.Contains(m.Path) { base.Fatalf("cannot use -fuzz flag on package outside the main module") } - } else if pkgs[0].Standard && modload.Enabled(moduleLoaderState) { + } else if pkgs[0].Standard && moduleLoaderState.Enabled() { // Because packages in 'std' and 'cmd' are part of the standard library, // they are only treated as part of a module in 'go mod' subcommands and // 'go get'. However, we still don't want to accidentally corrupt their diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go index e283c7354f5a34..92e8a803105f8d 100644 --- a/src/cmd/go/internal/tool/tool.go +++ b/src/cmd/go/internal/tool/tool.go @@ -162,7 +162,7 @@ func listTools(loaderstate *modload.State, ctx context.Context) { fmt.Println(name) } - modload.InitWorkfile(loaderstate) + loaderstate.InitWorkfile() modload.LoadModFile(loaderstate, ctx) modTools := slices.Sorted(maps.Keys(loaderstate.MainModules.Tools())) for _, tool := range modTools { @@ -253,7 +253,7 @@ func loadBuiltinTool(toolName string) string { } func loadModTool(loaderstate *modload.State, ctx context.Context, name string) string { - modload.InitWorkfile(loaderstate) + loaderstate.InitWorkfile() modload.LoadModFile(loaderstate, ctx) matches := []string{} diff --git a/src/cmd/go/internal/toolchain/select.go b/src/cmd/go/internal/toolchain/select.go index e7201e2f5fbb27..4c7e7a5e576ba5 100644 --- a/src/cmd/go/internal/toolchain/select.go +++ b/src/cmd/go/internal/toolchain/select.go @@ -99,7 +99,7 @@ func Select() { log.SetPrefix("go: ") defer log.SetPrefix("") - if !modload.WillBeEnabled(moduleLoaderState) { + if !moduleLoaderState.WillBeEnabled() { return } @@ -525,7 +525,7 @@ func raceSafeCopy(old, new string) error { // The toolchain line overrides the version line func modGoToolchain(loaderstate *modload.State) (file, goVers, toolchain string) { wd := base.UncachedCwd() - file = modload.FindGoWork(loaderstate, wd) + file = loaderstate.FindGoWork(wd) // $GOWORK can be set to a file that does not yet exist, if we are running 'go work init'. // Do not try to load the file in that case if _, err := os.Stat(file); err != nil { diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go index a429cbff65934e..9055446325af6e 100644 --- a/src/cmd/go/internal/vet/vet.go +++ b/src/cmd/go/internal/vet/vet.go @@ -126,7 +126,7 @@ func run(ctx context.Context, cmd *base.Command, args []string) { // The vet/fix commands do custom flag processing; // initialize workspaces after that. - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() if cfg.DebugTrace != "" { var close func() error diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index 7ca95cbe3f9286..c483c19c65b30c 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -460,7 +460,7 @@ var pkgsFilter = func(pkgs []*load.Package) []*load.Package { return pkgs } func runBuild(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() BuildInit(moduleLoaderState) b := NewBuilder("", moduleLoaderState.VendorDirOrEmpty) defer func() { @@ -696,10 +696,10 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) { } } - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() BuildInit(moduleLoaderState) pkgs := load.PackagesAndErrors(moduleLoaderState, ctx, load.PackageOpts{AutoVCS: true}, args) - if cfg.ModulesEnabled && !modload.HasModRoot(moduleLoaderState) { + if cfg.ModulesEnabled && !moduleLoaderState.HasModRoot() { haveErrors := false allMissingErrors := true for _, pkg := range pkgs { @@ -863,7 +863,7 @@ func InstallPackages(loaderstate *modload.State, ctx context.Context, patterns [ func installOutsideModule(loaderstate *modload.State, ctx context.Context, args []string) { loaderstate.ForceUseModules = true loaderstate.RootMode = modload.NoRoot - modload.AllowMissingModuleImports(loaderstate) + loaderstate.AllowMissingModuleImports() modload.Init(loaderstate) BuildInit(loaderstate) diff --git a/src/cmd/go/internal/workcmd/edit.go b/src/cmd/go/internal/workcmd/edit.go index 2b9f658f861c85..b18098ba5d7f71 100644 --- a/src/cmd/go/internal/workcmd/edit.go +++ b/src/cmd/go/internal/workcmd/edit.go @@ -144,7 +144,7 @@ func runEditwork(ctx context.Context, cmd *base.Command, args []string) { if len(args) == 1 { gowork = args[0] } else { - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() gowork = modload.WorkFilePath(moduleLoaderState) } if gowork == "" { diff --git a/src/cmd/go/internal/workcmd/init.go b/src/cmd/go/internal/workcmd/init.go index 9ba9e4dec02c5f..896740f0803502 100644 --- a/src/cmd/go/internal/workcmd/init.go +++ b/src/cmd/go/internal/workcmd/init.go @@ -45,7 +45,7 @@ func init() { func runInit(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() moduleLoaderState.ForceUseModules = true diff --git a/src/cmd/go/internal/workcmd/sync.go b/src/cmd/go/internal/workcmd/sync.go index ae4fd9c5f34ce1..13ce1e5f4249ee 100644 --- a/src/cmd/go/internal/workcmd/sync.go +++ b/src/cmd/go/internal/workcmd/sync.go @@ -50,7 +50,7 @@ func init() { func runSync(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() moduleLoaderState.ForceUseModules = true - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() if modload.WorkFilePath(moduleLoaderState) == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } diff --git a/src/cmd/go/internal/workcmd/use.go b/src/cmd/go/internal/workcmd/use.go index eae9688b52413f..041aa069e2d6bd 100644 --- a/src/cmd/go/internal/workcmd/use.go +++ b/src/cmd/go/internal/workcmd/use.go @@ -63,7 +63,7 @@ func init() { func runUse(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() moduleLoaderState.ForceUseModules = true - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() gowork := modload.WorkFilePath(moduleLoaderState) if gowork == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") diff --git a/src/cmd/go/internal/workcmd/vendor.go b/src/cmd/go/internal/workcmd/vendor.go index 8852d965fa3b94..26715c8d3be3c6 100644 --- a/src/cmd/go/internal/workcmd/vendor.go +++ b/src/cmd/go/internal/workcmd/vendor.go @@ -47,7 +47,7 @@ func init() { func runVendor(ctx context.Context, cmd *base.Command, args []string) { moduleLoaderState := modload.NewState() - modload.InitWorkfile(moduleLoaderState) + moduleLoaderState.InitWorkfile() if modload.WorkFilePath(moduleLoaderState) == "" { base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)") } diff --git a/src/cmd/go/testdata/script/list_empty_importpath.txt b/src/cmd/go/testdata/script/list_empty_importpath.txt index 4ddf6b36d26881..fe4210322bb4f1 100644 --- a/src/cmd/go/testdata/script/list_empty_importpath.txt +++ b/src/cmd/go/testdata/script/list_empty_importpath.txt @@ -1,12 +1,15 @@ ! go list all ! stderr 'panic' -[!GOOS:windows] [!GOOS:solaris] stderr 'invalid import path' -# #73976: Allow 'no errors' on Windows and Solaris until issue +[!GOOS:windows] [!GOOS:solaris] [!GOOS:freebsd] [!GOOS:openbsd] [!GOOS:netbsd] stderr 'invalid import path' +# #73976: Allow 'no errors' on Windows, Solaris, and BSD until issue # is resolved to prevent flakes. 'no errors' is printed by # empty scanner.ErrorList errors so that's probably where the # message is coming from, though we don't know how. [GOOS:windows] stderr 'invalid import path|no errors' [GOOS:solaris] stderr 'invalid import path|no errors' +[GOOS:freebsd] stderr 'invalid import path|no errors' +[GOOS:openbsd] stderr 'invalid import path|no errors' +[GOOS:netbsd] stderr 'invalid import path|no errors' # go list produces a package for 'p' but not for '' go list -e all diff --git a/src/cmd/go/testdata/script/mod_removed_godebug.txt b/src/cmd/go/testdata/script/mod_removed_godebug.txt new file mode 100644 index 00000000000000..bd1f61c9d26cc9 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_removed_godebug.txt @@ -0,0 +1,11 @@ +# Test case that makes sure we print a nice error message +# instead of the generic "unknown godebug" error message +# for removed GODEBUGs. + +! go list +stderr '^go.mod:3: use of removed godebug "x509sha1", see https://go.dev/doc/godebug#go-124$' + +-- go.mod -- +module example.com/bar + +godebug x509sha1=1 diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index befd1bee13d66f..7e7f028bfb3d2b 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -7276,6 +7276,8 @@ func (c *ctxt7) opldrr(p *obj.Prog, a obj.As, rt, rn, rm int16, extension bool) op = OptionS<<10 | 0x3<<21 | 0x17<<27 | 1<<26 case AFMOVD: op = OptionS<<10 | 0x3<<21 | 0x1f<<27 | 1<<26 + case AFMOVQ: + op = OptionS<<10 | 0x7<<21 | 0x07<<27 | 1<<26 default: c.ctxt.Diag("bad opldrr %v\n%v", a, p) return 0 @@ -7308,6 +7310,8 @@ func (c *ctxt7) opstrr(p *obj.Prog, a obj.As, rt, rn, rm int16, extension bool) op = OptionS<<10 | 0x1<<21 | 0x17<<27 | 1<<26 case AFMOVD: op = OptionS<<10 | 0x1<<21 | 0x1f<<27 | 1<<26 + case AFMOVQ: + op = OptionS<<10 | 0x5<<21 | 0x07<<27 | 1<<26 default: c.ctxt.Diag("bad opstrr %v\n%v", a, p) return 0 diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go index 3a676db922ca71..762dc338e3e149 100644 --- a/src/cmd/internal/obj/loong64/a.out.go +++ b/src/cmd/internal/obj/loong64/a.out.go @@ -1115,6 +1115,11 @@ const ( AXVSHUF4IW AXVSHUF4IV + AVPERMIW + AXVPERMIW + AXVPERMIV + AXVPERMIQ + AVSETEQV AVSETNEV AVSETANYEQB diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go index 422ccbd9b0bc0a..607e6063110a3c 100644 --- a/src/cmd/internal/obj/loong64/anames.go +++ b/src/cmd/internal/obj/loong64/anames.go @@ -586,6 +586,10 @@ var Anames = []string{ "XVSHUF4IH", "XVSHUF4IW", "XVSHUF4IV", + "VPERMIW", + "XVPERMIW", + "XVPERMIV", + "XVPERMIQ", "VSETEQV", "VSETNEV", "VSETANYEQB", diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go index 7eb5668d82e231..87691838861c3d 100644 --- a/src/cmd/internal/obj/loong64/asm.go +++ b/src/cmd/internal/obj/loong64/asm.go @@ -58,6 +58,8 @@ var optab = []Optab{ {AMOVW, C_REG, C_NONE, C_NONE, C_REG, C_NONE, 1, 4, 0, 0}, {AMOVV, C_REG, C_NONE, C_NONE, C_REG, C_NONE, 1, 4, 0, 0}, + {AVMOVQ, C_VREG, C_NONE, C_NONE, C_VREG, C_NONE, 1, 4, 0, 0}, + {AXVMOVQ, C_XREG, C_NONE, C_NONE, C_XREG, C_NONE, 1, 4, 0, 0}, {AMOVB, C_REG, C_NONE, C_NONE, C_REG, C_NONE, 12, 4, 0, 0}, {AMOVBU, C_REG, C_NONE, C_NONE, C_REG, C_NONE, 12, 4, 0, 0}, {AMOVWU, C_REG, C_NONE, C_NONE, C_REG, C_NONE, 12, 4, 0, 0}, @@ -1778,6 +1780,7 @@ func buildop(ctxt *obj.Link) { opset(AVSHUF4IH, r0) opset(AVSHUF4IW, r0) opset(AVSHUF4IV, r0) + opset(AVPERMIW, r0) case AXVANDB: opset(AXVORB, r0) @@ -1787,6 +1790,9 @@ func buildop(ctxt *obj.Link) { opset(AXVSHUF4IH, r0) opset(AXVSHUF4IW, r0) opset(AXVSHUF4IV, r0) + opset(AXVPERMIW, r0) + opset(AXVPERMIV, r0) + opset(AXVPERMIQ, r0) case AVANDV: opset(AVORV, r0) @@ -2097,12 +2103,19 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { case 0: // pseudo ops break - case 1: // mov r1,r2 ==> OR r1,r0,r2 - a := AOR - if p.As == AMOVW { - a = ASLL + case 1: // mov rj, rd + switch p.As { + case AMOVW: + o1 = OP_RRR(c.oprrr(ASLL), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) + case AMOVV: + o1 = OP_RRR(c.oprrr(AOR), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) + case AVMOVQ: + o1 = OP_6IRR(c.opirr(AVSLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg)) + case AXVMOVQ: + o1 = OP_6IRR(c.opirr(AXVSLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg)) + default: + c.ctxt.Diag("unexpected encoding\n%v", p) } - o1 = OP_RRR(c.oprrr(a), uint32(REGZERO), uint32(p.From.Reg), uint32(p.To.Reg)) case 2: // add/sub r1,[r2],r3 r := int(p.Reg) @@ -4362,6 +4375,14 @@ func (c *ctxt0) opirr(a obj.As) uint32 { return 0x1de6 << 18 // xvshuf4i.w case AXVSHUF4IV: return 0x1de7 << 18 // xvshuf4i.d + case AVPERMIW: + return 0x1cf9 << 18 // vpermi.w + case AXVPERMIW: + return 0x1df9 << 18 // xvpermi.w + case AXVPERMIV: + return 0x1dfa << 18 // xvpermi.d + case AXVPERMIQ: + return 0x1dfb << 18 // xvpermi.q case AVBITCLRB: return 0x1CC4<<18 | 0x1<<13 // vbitclri.b case AVBITCLRH: diff --git a/src/cmd/internal/obj/loong64/doc.go b/src/cmd/internal/obj/loong64/doc.go index f7e5a4fb4279ea..c96501ea81b990 100644 --- a/src/cmd/internal/obj/loong64/doc.go +++ b/src/cmd/internal/obj/loong64/doc.go @@ -203,6 +203,15 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate) VMOVQ Vj.W[index], Vd.W4 | vreplvei.w vd, vj, ui2 | for i in range(4) : VR[vd].w[i] = VR[vj].w[ui2] VMOVQ Vj.V[index], Vd.V2 | vreplvei.d vd, vj, ui1 | for i in range(2) : VR[vd].d[i] = VR[vj].d[ui1] +3.7 Move vector register to vector register. + Instruction format: + VMOVQ Vj, Vd + + Mapping between Go and platform assembly: + Go assembly | platform assembly | semantics + VMOVQ Vj, Vd | vslli.d vd, vj, 0x0 | for i in range(2) : VR[vd].D[i] = SLL(VR[vj].D[i], 0) + VXMOVQ Xj, Xd | xvslli.d xd, xj, 0x0 | for i in range(4) : XR[xd].D[i] = SLL(XR[xj].D[i], 0) + 3.7 Load data from memory and broadcast to each element of a vector register. Instruction format: @@ -229,6 +238,23 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate) VMOVQ 8(R4), V5.W4 | vldrepl.w v5, r4, $2 VMOVQ 8(R4), V5.V2 | vldrepl.d v5, r4, $1 +3.8 Vector permutation instruction + Instruction format: + VPERMIW ui8, Vj, Vd + + Mapping between Go and platform assembly: + Go assembly | platform assembly | semantics + VPERMIW ui8, Vj, Vd | vpermi.w vd, vj, ui8 | VR[vd].W[0] = VR[vj].W[ui8[1:0]], VR[vd].W[1] = VR[vj].W[ui8[3:2]], + | | VR[vd].W[2] = VR[vd].W[ui8[5:4]], VR[vd].W[3] = VR[vd].W[ui8[7:6]] + XVPERMIW ui8, Xj, Xd | xvpermi.w xd, xj, ui8 | XR[xd].W[0] = XR[xj].W[ui8[1:0]], XR[xd].W[1] = XR[xj].W[ui8[3:2]], + | | XR[xd].W[3] = XR[xd].W[ui8[7:6]], XR[xd].W[2] = XR[xd].W[ui8[5:4]], + | | XR[xd].W[4] = XR[xj].W[ui8[1:0]+4], XR[xd].W[5] = XR[xj].W[ui8[3:2]+4], + | | XR[xd].W[6] = XR[xd].W[ui8[5:4]+4], XR[xd].W[7] = XR[xd].W[ui8[7:6]+4] + XVPERMIV ui8, Xj, Xd | xvpermi.d xd, xj, ui8 | XR[xd].D[0] = XR[xj].D[ui8[1:0]], XR[xd].D[1] = XR[xj].D[ui8[3:2]], + | | XR[xd].D[2] = XR[xj].D[ui8[5:4]], XR[xd].D[3] = XR[xj].D[ui8[7:6]] + XVPERMIQ ui8, Xj, Xd | xvpermi.q xd, xj, ui8 | vec = {XR[xd], XR[xj]}, XR[xd].Q[0] = vec.Q[ui8[1:0]], XR[xd].Q[1] = vec.Q[ui8[5:4]] + + # Special instruction encoding definition and description on LoongArch 1. DBAR hint encoding for LA664(Loongson 3A6000) and later micro-architectures, paraphrased diff --git a/src/cmd/internal/objfile/elf.go b/src/cmd/internal/objfile/elf.go index 8923290cffefe8..6988cea9362d19 100644 --- a/src/cmd/internal/objfile/elf.go +++ b/src/cmd/internal/objfile/elf.go @@ -64,40 +64,26 @@ func (f *elfFile) symbols() ([]Sym, error) { return syms, nil } -func (f *elfFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *elfFile) pcln() (textStart uint64, pclntab []byte, err error) { if sect := f.elf.Section(".text"); sect != nil { textStart = sect.Addr } - sect := f.elf.Section(".gosymtab") - if sect == nil { - // try .data.rel.ro.gosymtab, for PIE binaries - sect = f.elf.Section(".data.rel.ro.gosymtab") - } - if sect != nil { - if symtab, err = sect.Data(); err != nil { - return 0, nil, nil, err - } - } else { - // if both sections failed, try the symbol - symtab = f.symbolData("runtime.symtab", "runtime.esymtab") - } - - sect = f.elf.Section(".gopclntab") + sect := f.elf.Section(".gopclntab") if sect == nil { // try .data.rel.ro.gopclntab, for PIE binaries sect = f.elf.Section(".data.rel.ro.gopclntab") } if sect != nil { if pclntab, err = sect.Data(); err != nil { - return 0, nil, nil, err + return 0, nil, err } } else { // if both sections failed, try the symbol pclntab = f.symbolData("runtime.pclntab", "runtime.epclntab") } - return textStart, symtab, pclntab, nil + return textStart, pclntab, nil } func (f *elfFile) text() (textStart uint64, text []byte, err error) { diff --git a/src/cmd/internal/objfile/goobj.go b/src/cmd/internal/objfile/goobj.go index 7d564a2661d951..ec852d0669fac9 100644 --- a/src/cmd/internal/objfile/goobj.go +++ b/src/cmd/internal/objfile/goobj.go @@ -221,10 +221,10 @@ func (f *goobjFile) symbols() ([]Sym, error) { return syms, nil } -func (f *goobjFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *goobjFile) pcln() (textStart uint64, pclntab []byte, err error) { // Should never be called. We implement Liner below, callers // should use that instead. - return 0, nil, nil, fmt.Errorf("pcln not available in go object file") + return 0, nil, fmt.Errorf("pcln not available in go object file") } // PCToLine returns the file name, line, and function data for the given pc. diff --git a/src/cmd/internal/objfile/macho.go b/src/cmd/internal/objfile/macho.go index 8258145f26f342..eaf665faee91e8 100644 --- a/src/cmd/internal/objfile/macho.go +++ b/src/cmd/internal/objfile/macho.go @@ -79,21 +79,16 @@ func (f *machoFile) symbols() ([]Sym, error) { return syms, nil } -func (f *machoFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *machoFile) pcln() (textStart uint64, pclntab []byte, err error) { if sect := f.macho.Section("__text"); sect != nil { textStart = sect.Addr } - if sect := f.macho.Section("__gosymtab"); sect != nil { - if symtab, err = sect.Data(); err != nil { - return 0, nil, nil, err - } - } if sect := f.macho.Section("__gopclntab"); sect != nil { if pclntab, err = sect.Data(); err != nil { - return 0, nil, nil, err + return 0, nil, err } } - return textStart, symtab, pclntab, nil + return textStart, pclntab, nil } func (f *machoFile) text() (textStart uint64, text []byte, err error) { diff --git a/src/cmd/internal/objfile/objfile.go b/src/cmd/internal/objfile/objfile.go index ed9aae280e5579..32e06dfd991420 100644 --- a/src/cmd/internal/objfile/objfile.go +++ b/src/cmd/internal/objfile/objfile.go @@ -18,7 +18,7 @@ import ( type rawFile interface { symbols() (syms []Sym, err error) - pcln() (textStart uint64, symtab, pclntab []byte, err error) + pcln() (textStart uint64, pclntab []byte, err error) text() (textStart uint64, text []byte, err error) goarch() string loadAddress() (uint64, error) @@ -141,7 +141,7 @@ func (e *Entry) PCLineTable() (Liner, error) { return pcln, nil } // Otherwise, read the pcln tables and build a Liner out of that. - textStart, symtab, pclntab, err := e.raw.pcln() + textStart, pclntab, err := e.raw.pcln() if err != nil { return nil, err } @@ -154,7 +154,7 @@ func (e *Entry) PCLineTable() (Liner, error) { } } } - return gosym.NewTable(symtab, gosym.NewLineTable(pclntab, textStart)) + return gosym.NewTable(nil, gosym.NewLineTable(pclntab, textStart)) } func (e *Entry) Text() (uint64, []byte, error) { diff --git a/src/cmd/internal/objfile/pe.go b/src/cmd/internal/objfile/pe.go index c5c08264a9cdb0..e94821298f1ba7 100644 --- a/src/cmd/internal/objfile/pe.go +++ b/src/cmd/internal/objfile/pe.go @@ -90,10 +90,10 @@ func (f *peFile) symbols() ([]Sym, error) { return syms, nil } -func (f *peFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *peFile) pcln() (textStart uint64, pclntab []byte, err error) { imageBase, err := f.imageBase() if err != nil { - return 0, nil, nil, err + return 0, nil, err } if sect := f.pe.Section(".text"); sect != nil { @@ -104,17 +104,10 @@ func (f *peFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { // TODO: Remove code looking for the old symbols when we no longer care about 1.3. var err2 error if pclntab, err2 = loadPETable(f.pe, "pclntab", "epclntab"); err2 != nil { - return 0, nil, nil, err - } - } - if symtab, err = loadPETable(f.pe, "runtime.symtab", "runtime.esymtab"); err != nil { - // Same as above. - var err2 error - if symtab, err2 = loadPETable(f.pe, "symtab", "esymtab"); err2 != nil { - return 0, nil, nil, err + return 0, nil, err } } - return textStart, symtab, pclntab, nil + return textStart, pclntab, nil } func (f *peFile) text() (textStart uint64, text []byte, err error) { diff --git a/src/cmd/internal/objfile/plan9obj.go b/src/cmd/internal/objfile/plan9obj.go index c91970762c79ee..edd40230cec0c2 100644 --- a/src/cmd/internal/objfile/plan9obj.go +++ b/src/cmd/internal/objfile/plan9obj.go @@ -71,24 +71,17 @@ func (f *plan9File) symbols() ([]Sym, error) { return syms, nil } -func (f *plan9File) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *plan9File) pcln() (textStart uint64, pclntab []byte, err error) { textStart = f.plan9.LoadAddress + f.plan9.HdrSize if pclntab, err = loadPlan9Table(f.plan9, "runtime.pclntab", "runtime.epclntab"); err != nil { // We didn't find the symbols, so look for the names used in 1.3 and earlier. // TODO: Remove code looking for the old symbols when we no longer care about 1.3. var err2 error if pclntab, err2 = loadPlan9Table(f.plan9, "pclntab", "epclntab"); err2 != nil { - return 0, nil, nil, err + return 0, nil, err } } - if symtab, err = loadPlan9Table(f.plan9, "runtime.symtab", "runtime.esymtab"); err != nil { - // Same as above. - var err2 error - if symtab, err2 = loadPlan9Table(f.plan9, "symtab", "esymtab"); err2 != nil { - return 0, nil, nil, err - } - } - return textStart, symtab, pclntab, nil + return textStart, pclntab, nil } func (f *plan9File) text() (textStart uint64, text []byte, err error) { diff --git a/src/cmd/internal/objfile/xcoff.go b/src/cmd/internal/objfile/xcoff.go index 24f42760c9dd60..85928621f1837c 100644 --- a/src/cmd/internal/objfile/xcoff.go +++ b/src/cmd/internal/objfile/xcoff.go @@ -87,15 +87,14 @@ func (f *xcoffFile) symbols() ([]Sym, error) { return syms, nil } -func (f *xcoffFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { +func (f *xcoffFile) pcln() (textStart uint64, pclntab []byte, err error) { if sect := f.xcoff.Section(".text"); sect != nil { textStart = sect.VirtualAddress } if pclntab, err = loadXCOFFTable(f.xcoff, "runtime.pclntab", "runtime.epclntab"); err != nil { - return 0, nil, nil, err + return 0, nil, err } - symtab, _ = loadXCOFFTable(f.xcoff, "runtime.symtab", "runtime.esymtab") // ignore error, this symbol is not useful anyway - return textStart, symtab, pclntab, nil + return textStart, pclntab, nil } func (f *xcoffFile) text() (textStart uint64, text []byte, err error) { diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index b70d050c99bc0b..fcc32727607956 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -1628,9 +1628,9 @@ type dodataState struct { // Link context ctxt *Link // Data symbols bucketed by type. - data [sym.SXREF][]loader.Sym + data [sym.SFirstUnallocated][]loader.Sym // Max alignment for each flavor of data symbol. - dataMaxAlign [sym.SXREF]int32 + dataMaxAlign [sym.SFirstUnallocated]int32 // Overridden sym type symGroupType []sym.SymKind // Current data size so far. @@ -1687,7 +1687,7 @@ func (ctxt *Link) dodata(symGroupType []sym.SymKind) { st := state.symType(s) - if st <= sym.STEXTFIPSEND || st >= sym.SXREF { + if st <= sym.STEXTEND || st >= sym.SFirstUnallocated { continue } state.data[st] = append(state.data[st], s) @@ -2238,11 +2238,6 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { state.checkdatsize(sym.SITABLINK) sect.Length = uint64(state.datsize) - sect.Vaddr - /* gosymtab */ - sect = state.allocateNamedSectionAndAssignSyms(seg, genrelrosecname(".gosymtab"), sym.SSYMTAB, sym.SRODATA, relroSecPerm) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.symtab", 0), sect) - ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.esymtab", 0), sect) - /* gopclntab */ sect = state.allocateNamedSectionAndAssignSyms(seg, genrelrosecname(".gopclntab"), sym.SPCLNTAB, sym.SRODATA, relroSecPerm) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.pclntab", 0), sect) @@ -2264,11 +2259,11 @@ func (state *dodataState) allocateDataSections(ctxt *Link) { } siz := 0 - for symn := sym.SELFRXSECT; symn < sym.SXREF; symn++ { + for symn := sym.SELFRXSECT; symn < sym.SFirstUnallocated; symn++ { siz += len(state.data[symn]) } ctxt.datap = make([]loader.Sym, 0, siz) - for symn := sym.SELFRXSECT; symn < sym.SXREF; symn++ { + for symn := sym.SELFRXSECT; symn < sym.SFirstUnallocated; symn++ { ctxt.datap = append(ctxt.datap, state.data[symn]...) } } @@ -2852,9 +2847,6 @@ func (ctxt *Link) address() []*sym.Segment { // will be such that the last page of the text segment will be // mapped twice, once r-x and once starting out rw- and, after // relocation processing, changed to r--. - // - // Ideally the last page of the text segment would not be - // writable even for this short period. va = uint64(Rnd(int64(va), *FlagRound)) order = append(order, &Segrodata) @@ -2988,7 +2980,6 @@ func (ctxt *Link) address() []*sym.Segment { ldr := ctxt.loader var ( rodata = ldr.SymSect(ldr.LookupOrCreateSym("runtime.rodata", 0)) - symtab = ldr.SymSect(ldr.LookupOrCreateSym("runtime.symtab", 0)) pclntab = ldr.SymSect(ldr.LookupOrCreateSym("runtime.pclntab", 0)) types = ldr.SymSect(ldr.LookupOrCreateSym("runtime.types", 0)) ) @@ -3068,8 +3059,6 @@ func (ctxt *Link) address() []*sym.Segment { ctxt.xdefine("runtime.egcbss", sym.SRODATA, ldr.SymAddr(s)+ldr.SymSize(s)) ldr.SetSymSect(ldr.LookupOrCreateSym("runtime.egcbss", 0), ldr.SymSect(s)) - ctxt.xdefine("runtime.symtab", sym.SRODATA, int64(symtab.Vaddr)) - ctxt.xdefine("runtime.esymtab", sym.SRODATA, int64(symtab.Vaddr+symtab.Length)) ctxt.xdefine("runtime.pclntab", sym.SRODATA, int64(pclntab.Vaddr)) ctxt.defineInternal("runtime.pcheader", sym.SRODATA) ctxt.defineInternal("runtime.funcnametab", sym.SRODATA) diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index ed215d7fe50128..8981f1c3f02272 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -1477,7 +1477,6 @@ func (ctxt *Link) doelf() { } shstrtabAddstring(relro_prefix + ".typelink") shstrtabAddstring(relro_prefix + ".itablink") - shstrtabAddstring(relro_prefix + ".gosymtab") shstrtabAddstring(relro_prefix + ".gopclntab") if ctxt.IsExternal() { @@ -1487,7 +1486,6 @@ func (ctxt *Link) doelf() { shstrtabAddstring(elfRelType + ".rodata") shstrtabAddstring(elfRelType + relro_prefix + ".typelink") shstrtabAddstring(elfRelType + relro_prefix + ".itablink") - shstrtabAddstring(elfRelType + relro_prefix + ".gosymtab") shstrtabAddstring(elfRelType + relro_prefix + ".gopclntab") shstrtabAddstring(elfRelType + ".noptrdata") shstrtabAddstring(elfRelType + ".data") diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 8e059f299ffa73..6d3347ff2d4569 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -919,7 +919,7 @@ func collectmachosyms(ctxt *Link) { continue } t := ldr.SymType(s) - if t >= sym.SELFRXSECT && t < sym.SXREF { // data sections handled in dodata + if t >= sym.SELFRXSECT && t < sym.SFirstUnallocated { // data sections handled in dodata if t == sym.STLSBSS { // TLSBSS is not used on darwin. See data.go:allocateDataSections continue diff --git a/src/cmd/link/internal/ld/macho_test.go b/src/cmd/link/internal/ld/macho_test.go index 29adc0b78b1165..adf159ab6d2ce8 100644 --- a/src/cmd/link/internal/ld/macho_test.go +++ b/src/cmd/link/internal/ld/macho_test.go @@ -37,7 +37,7 @@ func TestMachoSectionsReadOnly(t *testing.T) { args: []string{"-ldflags", "-linkmode=internal"}, prog: prog, mustInternalLink: true, - wantSecsRO: []string{"__got", "__rodata", "__itablink", "__typelink", "__gosymtab", "__gopclntab"}, + wantSecsRO: []string{"__got", "__rodata", "__itablink", "__typelink", "__gopclntab"}, }, { name: "linkmode-external", diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index 2b3687c37e4a6a..68af94a405a942 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -243,7 +243,6 @@ func makeInlSyms(ctxt *Link, funcs []loader.Sym, nameOffsets map[loader.Sym]uint // generator to fill in its data later. func (state *pclntab) generatePCHeader(ctxt *Link) { ldr := ctxt.loader - textStartOff := int64(8 + 2*ctxt.Arch.PtrSize) size := int64(8 + 8*ctxt.Arch.PtrSize) writeHeader := func(ctxt *Link, s loader.Sym) { header := ctxt.loader.MakeSymbolUpdater(s) @@ -264,10 +263,7 @@ func (state *pclntab) generatePCHeader(ctxt *Link) { header.SetUint8(ctxt.Arch, 7, uint8(ctxt.Arch.PtrSize)) off := header.SetUint(ctxt.Arch, 8, uint64(state.nfunc)) off = header.SetUint(ctxt.Arch, off, uint64(state.nfiles)) - if off != textStartOff { - panic(fmt.Sprintf("pcHeader textStartOff: %d != %d", off, textStartOff)) - } - off += int64(ctxt.Arch.PtrSize) // skip runtimeText relocation + off = header.SetUintptr(ctxt.Arch, off, 0) // unused off = writeSymOffset(off, state.funcnametab) off = writeSymOffset(off, state.cutab) off = writeSymOffset(off, state.filetab) @@ -279,9 +275,6 @@ func (state *pclntab) generatePCHeader(ctxt *Link) { } state.pcheader = state.addGeneratedSym(ctxt, "runtime.pcheader", size, writeHeader) - // Create the runtimeText relocation. - sb := ldr.MakeSymbolUpdater(state.pcheader) - sb.SetAddr(ctxt.Arch, textStartOff, ldr.Lookup("runtime.text", 0)) } // walkFuncs iterates over the funcs, calling a function for each unique diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index f5b7580fda3307..b49da42c4cf0c7 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -854,7 +854,7 @@ func (f *peFile) writeSymbols(ctxt *Link) { continue } t := ldr.SymType(s) - if t >= sym.SELFRXSECT && t < sym.SXREF { // data sections handled in dodata + if t >= sym.SELFRXSECT && t < sym.SFirstUnallocated { // data sections handled in dodata if t == sym.STLSBSS { continue } diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 2c999ccc4e3a19..a0345ca1c7b7b7 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -244,7 +244,7 @@ func genelfsym(ctxt *Link, elfbind elf.SymBind) { continue } st := ldr.SymType(s) - if st >= sym.SELFRXSECT && st < sym.SXREF { + if st >= sym.SELFRXSECT && st < sym.SFirstUnallocated { typ := elf.STT_OBJECT if st == sym.STLSBSS { if ctxt.IsInternal() { @@ -345,7 +345,7 @@ func asmbPlan9Sym(ctxt *Link) { continue } t := ldr.SymType(s) - if t >= sym.SELFRXSECT && t < sym.SXREF { // data sections handled in dodata + if t >= sym.SELFRXSECT && t < sym.SFirstUnallocated { // data sections handled in dodata if t == sym.STLSBSS { continue } @@ -446,7 +446,6 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { ctxt.xdefine("runtime.ecovctrs", sym.SNOPTRBSS, 0) ctxt.xdefine("runtime.end", sym.SBSS, 0) ctxt.xdefine("runtime.epclntab", sym.SRODATA, 0) - ctxt.xdefine("runtime.esymtab", sym.SRODATA, 0) // garbage collection symbols s := ldr.CreateSymForUpdate("runtime.gcdata", 0) @@ -506,11 +505,6 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { symgofuncrel = groupSym("go:funcrel.*", sym.SGOFUNCRELRO) } - symt := ldr.CreateSymForUpdate("runtime.symtab", 0) - symt.SetType(sym.SSYMTAB) - symt.SetSize(0) - symt.SetLocal(true) - // assign specific types so that they sort together. // within a type they sort by size, so the .* symbols // just defined above will be first. @@ -843,7 +837,7 @@ func (ctxt *Link) symtab(pcln *pclntab) []sym.SymKind { } // CarrierSymByType tracks carrier symbols and their sizes. -var CarrierSymByType [sym.SXREF]struct { +var CarrierSymByType [sym.SFirstUnallocated]struct { Sym loader.Sym Size int64 } diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go index fdc16ac48f9d2b..fc98fcba2bcceb 100644 --- a/src/cmd/link/internal/ld/xcoff.go +++ b/src/cmd/link/internal/ld/xcoff.go @@ -1121,7 +1121,7 @@ func (f *xcoffFile) asmaixsym(ctxt *Link) { putaixsym(ctxt, s, BSSSym) } - case st >= sym.SELFRXSECT && st < sym.SXREF: // data sections handled in dodata + case st >= sym.SELFRXSECT && st < sym.SFirstUnallocated: // data sections handled in dodata if ldr.AttrReachable(s) { putaixsym(ctxt, s, DataSym) } diff --git a/src/cmd/link/internal/sym/symkind.go b/src/cmd/link/internal/sym/symkind.go index 0671d9d724d25d..9f62e809e14ca3 100644 --- a/src/cmd/link/internal/sym/symkind.go +++ b/src/cmd/link/internal/sym/symkind.go @@ -41,31 +41,32 @@ type SymKind uint8 // //go:generate stringer -type=SymKind const ( + // An otherwise invalid zero value for the type. Sxxx SymKind = iota - STEXT - STEXTFIPSSTART - STEXTFIPS - STEXTFIPSEND - STEXTEND - SELFRXSECT - SMACHOPLT - - // Read-only sections. - STYPE - SSTRING - SGOSTRING - SGOFUNC - SGCBITS - SRODATA - SRODATAFIPSSTART - SRODATAFIPS - SRODATAFIPSEND - SRODATAEND - SFUNCTAB - - SELFROSECT - - // Read-only sections with relocations. + // The text segment, containing executable instructions. + STEXT // General executable code. + STEXTFIPSSTART // Start of FIPS text section. + STEXTFIPS // Instructions hashed for FIPS checks. + STEXTFIPSEND // End of FIPS text section. + STEXTEND // End of text section. + SELFRXSECT // Executable PLT; PPC64 .glink. + SMACHOPLT // Mach-O PLT. + + // Read-only, non-executable, segment. + STYPE // Type descriptors. + SSTRING // Used only for XCOFF runtime.rodata symbol? + SGOSTRING // Go string constants. + SGOFUNC // Function descriptors and funcdata symbols. + SGCBITS // GC bit masks and programs. + SRODATA // General read-only data. + SRODATAFIPSSTART // Start of FIPS read-only data. + SRODATAFIPS // FIPS read-only data. + SRODATAFIPSEND // End of FIPS read-only data. + SRODATAEND // End of read-only data. + SFUNCTAB // Appears to be unused, except for runtime.etypes. + SELFROSECT // ELF read-only data: relocs, dynamic linking info. + + // Read-only, non-executable, dynamically relocatable segment. // // Types STYPE-SFUNCTAB above are written to the .rodata section by default. // When linking a shared object, some conceptually "read only" types need to @@ -84,55 +85,58 @@ const ( SGCBITSRELRO SRODATARELRO SFUNCTABRELRO - SELFRELROSECT - SMACHORELROSECT - // Part of .data.rel.ro if it exists, otherwise part of .rodata. - STYPELINK - SITABLINK - SSYMTAB - SPCLNTAB + SELFRELROSECT // ELF-specific read-only relocatable: PLT, etc. + SMACHORELROSECT // Mach-O specific read-only relocatable. - // Writable sections. + STYPELINK // Type links. + SITABLINK // Itab links. + SPCLNTAB // Pclntab data. + + // Allocated writable segment. SFirstWritable - SBUILDINFO - SFIPSINFO - SELFSECT - SMACHO - SMACHOGOT - SWINDOWS - SELFGOT - SNOPTRDATA - SNOPTRDATAFIPSSTART - SNOPTRDATAFIPS - SNOPTRDATAFIPSEND - SNOPTRDATAEND - SINITARR - SDATA - SDATAFIPSSTART - SDATAFIPS - SDATAFIPSEND - SDATAEND - SXCOFFTOC - SBSS - SNOPTRBSS - SLIBFUZZER_8BIT_COUNTER - SCOVERAGE_COUNTER - SCOVERAGE_AUXVAR - STLSBSS - SXREF - SMACHOSYMSTR - SMACHOSYMTAB - SMACHOINDIRECTPLT - SMACHOINDIRECTGOT - SFILEPATH - SDYNIMPORT - SHOSTOBJ - SUNDEFEXT // Undefined symbol for resolution by external linker - - // Sections for debugging information + SBUILDINFO // debug/buildinfo data (why is this writable?). + SFIPSINFO // go:fipsinfo aka crypto/internal/fips140/check.Linkinfo (why is this writable)? + SELFSECT // .got.plt, .plt, .dynamic where appropriate. + SMACHO // Used only for .llvmasm? + SMACHOGOT // Mach-O GOT. + SWINDOWS // Windows dynamic symbols. + SELFGOT // Writable ELF GOT section. + SNOPTRDATA // Data with no heap pointers. + SNOPTRDATAFIPSSTART // Start of FIPS non-pointer writable data. + SNOPTRDATAFIPS // FIPS non-pointer writable data. + SNOPTRDATAFIPSEND // End of FIPS non-pointer writable data. + SNOPTRDATAEND // End of data with no heap pointers. + SINITARR // ELF .init_array section. + SDATA // Data that may have heap pointers. + SDATAFIPSSTART // Start of FIPS writable data. + SDATAFIPS // FIPS writable data. + SDATAFIPSEND // End of FIPS writable data. + SDATAEND // End of data that may have heap pointers. + SXCOFFTOC // AIX TOC entries. + + // Allocated zero-initialized segment. + SBSS // Zeroed data that may have heap pointers. + SNOPTRBSS // Zeroed data with no heap pointers. + SLIBFUZZER_8BIT_COUNTER // Fuzzer counters. + SCOVERAGE_COUNTER // Coverage counters. + SCOVERAGE_AUXVAR // Compiler generated coverage symbols. + STLSBSS // Thread-local zeroed data. + + // Unallocated segment. + SFirstUnallocated + SXREF // Reference from non-Go object file. + SMACHOSYMSTR // Mach-O string table. + SMACHOSYMTAB // Mach-O symbol table. + SMACHOINDIRECTPLT // Mach-O indirect PLT. + SMACHOINDIRECTGOT // Mach-O indirect GOT. + SDYNIMPORT // Reference to symbol defined in shared library. + SHOSTOBJ // Symbol defined in non-Go object file. + SUNDEFEXT // Undefined symbol for resolution by external linker. + + // Unallocated DWARF debugging segment. SDWARFSECT - // DWARF symbol types + // DWARF symbol types created by compiler or linker. SDWARFCUINFO SDWARFCONST SDWARFFCN @@ -144,9 +148,9 @@ const ( SDWARFLINES SDWARFADDR - // SEH symbol types - SSEHUNWINDINFO - SSEHSECT + // SEH symbol types. These are probably allocated at run time. + SSEHUNWINDINFO // Compiler generated Windows SEH info. + SSEHSECT // Windows SEH data. ) // AbiSymKindToSymKind maps values read from object files (which are diff --git a/src/cmd/link/internal/sym/symkind_string.go b/src/cmd/link/internal/sym/symkind_string.go index 5395c9571ba8b8..4e3a0a3431ae0b 100644 --- a/src/cmd/link/internal/sym/symkind_string.go +++ b/src/cmd/link/internal/sym/symkind_string.go @@ -39,61 +39,60 @@ func _() { _ = x[SMACHORELROSECT-28] _ = x[STYPELINK-29] _ = x[SITABLINK-30] - _ = x[SSYMTAB-31] - _ = x[SPCLNTAB-32] - _ = x[SFirstWritable-33] - _ = x[SBUILDINFO-34] - _ = x[SFIPSINFO-35] - _ = x[SELFSECT-36] - _ = x[SMACHO-37] - _ = x[SMACHOGOT-38] - _ = x[SWINDOWS-39] - _ = x[SELFGOT-40] - _ = x[SNOPTRDATA-41] - _ = x[SNOPTRDATAFIPSSTART-42] - _ = x[SNOPTRDATAFIPS-43] - _ = x[SNOPTRDATAFIPSEND-44] - _ = x[SNOPTRDATAEND-45] - _ = x[SINITARR-46] - _ = x[SDATA-47] - _ = x[SDATAFIPSSTART-48] - _ = x[SDATAFIPS-49] - _ = x[SDATAFIPSEND-50] - _ = x[SDATAEND-51] - _ = x[SXCOFFTOC-52] - _ = x[SBSS-53] - _ = x[SNOPTRBSS-54] - _ = x[SLIBFUZZER_8BIT_COUNTER-55] - _ = x[SCOVERAGE_COUNTER-56] - _ = x[SCOVERAGE_AUXVAR-57] - _ = x[STLSBSS-58] + _ = x[SPCLNTAB-31] + _ = x[SFirstWritable-32] + _ = x[SBUILDINFO-33] + _ = x[SFIPSINFO-34] + _ = x[SELFSECT-35] + _ = x[SMACHO-36] + _ = x[SMACHOGOT-37] + _ = x[SWINDOWS-38] + _ = x[SELFGOT-39] + _ = x[SNOPTRDATA-40] + _ = x[SNOPTRDATAFIPSSTART-41] + _ = x[SNOPTRDATAFIPS-42] + _ = x[SNOPTRDATAFIPSEND-43] + _ = x[SNOPTRDATAEND-44] + _ = x[SINITARR-45] + _ = x[SDATA-46] + _ = x[SDATAFIPSSTART-47] + _ = x[SDATAFIPS-48] + _ = x[SDATAFIPSEND-49] + _ = x[SDATAEND-50] + _ = x[SXCOFFTOC-51] + _ = x[SBSS-52] + _ = x[SNOPTRBSS-53] + _ = x[SLIBFUZZER_8BIT_COUNTER-54] + _ = x[SCOVERAGE_COUNTER-55] + _ = x[SCOVERAGE_AUXVAR-56] + _ = x[STLSBSS-57] + _ = x[SFirstUnallocated-58] _ = x[SXREF-59] _ = x[SMACHOSYMSTR-60] _ = x[SMACHOSYMTAB-61] _ = x[SMACHOINDIRECTPLT-62] _ = x[SMACHOINDIRECTGOT-63] - _ = x[SFILEPATH-64] - _ = x[SDYNIMPORT-65] - _ = x[SHOSTOBJ-66] - _ = x[SUNDEFEXT-67] - _ = x[SDWARFSECT-68] - _ = x[SDWARFCUINFO-69] - _ = x[SDWARFCONST-70] - _ = x[SDWARFFCN-71] - _ = x[SDWARFABSFCN-72] - _ = x[SDWARFTYPE-73] - _ = x[SDWARFVAR-74] - _ = x[SDWARFRANGE-75] - _ = x[SDWARFLOC-76] - _ = x[SDWARFLINES-77] - _ = x[SDWARFADDR-78] - _ = x[SSEHUNWINDINFO-79] - _ = x[SSEHSECT-80] + _ = x[SDYNIMPORT-64] + _ = x[SHOSTOBJ-65] + _ = x[SUNDEFEXT-66] + _ = x[SDWARFSECT-67] + _ = x[SDWARFCUINFO-68] + _ = x[SDWARFCONST-69] + _ = x[SDWARFFCN-70] + _ = x[SDWARFABSFCN-71] + _ = x[SDWARFTYPE-72] + _ = x[SDWARFVAR-73] + _ = x[SDWARFRANGE-74] + _ = x[SDWARFLOC-75] + _ = x[SDWARFLINES-76] + _ = x[SDWARFADDR-77] + _ = x[SSEHUNWINDINFO-78] + _ = x[SSEHSECT-79] } -const _SymKind_name = "SxxxSTEXTSTEXTFIPSSTARTSTEXTFIPSSTEXTFIPSENDSTEXTENDSELFRXSECTSMACHOPLTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASRODATAFIPSSTARTSRODATAFIPSSRODATAFIPSENDSRODATAENDSFUNCTABSELFROSECTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSELFRELROSECTSMACHORELROSECTSTYPELINKSITABLINKSSYMTABSPCLNTABSFirstWritableSBUILDINFOSFIPSINFOSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASNOPTRDATAFIPSSTARTSNOPTRDATAFIPSSNOPTRDATAFIPSENDSNOPTRDATAENDSINITARRSDATASDATAFIPSSTARTSDATAFIPSSDATAFIPSENDSDATAENDSXCOFFTOCSBSSSNOPTRBSSSLIBFUZZER_8BIT_COUNTERSCOVERAGE_COUNTERSCOVERAGE_AUXVARSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILEPATHSDYNIMPORTSHOSTOBJSUNDEFEXTSDWARFSECTSDWARFCUINFOSDWARFCONSTSDWARFFCNSDWARFABSFCNSDWARFTYPESDWARFVARSDWARFRANGESDWARFLOCSDWARFLINESSDWARFADDRSSEHUNWINDINFOSSEHSECT" +const _SymKind_name = "SxxxSTEXTSTEXTFIPSSTARTSTEXTFIPSSTEXTFIPSENDSTEXTENDSELFRXSECTSMACHOPLTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASRODATAFIPSSTARTSRODATAFIPSSRODATAFIPSENDSRODATAENDSFUNCTABSELFROSECTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSELFRELROSECTSMACHORELROSECTSTYPELINKSITABLINKSPCLNTABSFirstWritableSBUILDINFOSFIPSINFOSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASNOPTRDATAFIPSSTARTSNOPTRDATAFIPSSNOPTRDATAFIPSENDSNOPTRDATAENDSINITARRSDATASDATAFIPSSTARTSDATAFIPSSDATAFIPSENDSDATAENDSXCOFFTOCSBSSSNOPTRBSSSLIBFUZZER_8BIT_COUNTERSCOVERAGE_COUNTERSCOVERAGE_AUXVARSTLSBSSSFirstUnallocatedSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSDYNIMPORTSHOSTOBJSUNDEFEXTSDWARFSECTSDWARFCUINFOSDWARFCONSTSDWARFFCNSDWARFABSFCNSDWARFTYPESDWARFVARSDWARFRANGESDWARFLOCSDWARFLINESSDWARFADDRSSEHUNWINDINFOSSEHSECT" -var _SymKind_index = [...]uint16{0, 4, 9, 23, 32, 44, 52, 62, 71, 76, 83, 92, 99, 106, 113, 129, 140, 154, 164, 172, 182, 192, 204, 218, 230, 242, 254, 267, 280, 295, 304, 313, 320, 328, 342, 352, 361, 369, 375, 384, 392, 399, 409, 428, 442, 459, 472, 480, 485, 499, 508, 520, 528, 537, 541, 550, 573, 590, 606, 613, 618, 630, 642, 659, 676, 685, 695, 703, 712, 722, 734, 745, 754, 766, 776, 785, 796, 805, 816, 826, 840, 848} +var _SymKind_index = [...]uint16{0, 4, 9, 23, 32, 44, 52, 62, 71, 76, 83, 92, 99, 106, 113, 129, 140, 154, 164, 172, 182, 192, 204, 218, 230, 242, 254, 267, 280, 295, 304, 313, 321, 335, 345, 354, 362, 368, 377, 385, 392, 402, 421, 435, 452, 465, 473, 478, 492, 501, 513, 521, 530, 534, 543, 566, 583, 599, 606, 623, 628, 640, 652, 669, 686, 696, 704, 713, 723, 735, 746, 755, 767, 777, 786, 797, 806, 817, 827, 841, 849} func (i SymKind) String() string { if i >= SymKind(len(_SymKind_index)-1) { diff --git a/src/cmd/link/internal/wasm/asm.go b/src/cmd/link/internal/wasm/asm.go index 2ddf5b33ba273a..65f79c80120e06 100644 --- a/src/cmd/link/internal/wasm/asm.go +++ b/src/cmd/link/internal/wasm/asm.go @@ -127,7 +127,6 @@ func asmb(ctxt *ld.Link, ldr *loader.Loader) { ldr.SymSect(ldr.Lookup("runtime.rodata", 0)), ldr.SymSect(ldr.Lookup("runtime.typelink", 0)), ldr.SymSect(ldr.Lookup("runtime.itablink", 0)), - ldr.SymSect(ldr.Lookup("runtime.symtab", 0)), ldr.SymSect(ldr.Lookup("runtime.pclntab", 0)), ldr.SymSect(ldr.Lookup("runtime.noptrdata", 0)), ldr.SymSect(ldr.Lookup("runtime.data", 0)), diff --git a/src/crypto/internal/constanttime/constant_time.go b/src/crypto/internal/constanttime/constant_time.go new file mode 100644 index 00000000000000..5525307195661b --- /dev/null +++ b/src/crypto/internal/constanttime/constant_time.go @@ -0,0 +1,42 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package constanttime + +// The functions in this package are compiler intrinsics for constant-time +// operations. They are exposed by crypto/subtle and used directly by the +// FIPS 140-3 module. + +// Select returns x if v == 1 and y if v == 0. +// Its behavior is undefined if v takes any other value. +func Select(v, x, y int) int { + // This is intrinsicified on arches with CMOV. + // It implements the following superset behavior: + // ConstantTimeSelect returns x if v != 0 and y if v == 0. + // Do the same here to avoid non portable UB. + v = int(boolToUint8(v != 0)) + return ^(v-1)&x | (v-1)&y +} + +// ByteEq returns 1 if x == y and 0 otherwise. +func ByteEq(x, y uint8) int { + return int(boolToUint8(x == y)) +} + +// Eq returns 1 if x == y and 0 otherwise. +func Eq(x, y int32) int { + return int(boolToUint8(x == y)) +} + +// LessOrEq returns 1 if x <= y and 0 otherwise. +// Its behavior is undefined if x or y are negative or > 2**31 - 1. +func LessOrEq(x, y int) int { + return int(boolToUint8(x <= y)) +} + +// boolToUint8 is a compiler intrinsic. +// It returns 1 for true and 0 for false. +func boolToUint8(b bool) uint8 { + panic("unreachable; must be intrinsicified") +} diff --git a/src/crypto/internal/fips140/edwards25519/tables.go b/src/crypto/internal/fips140/edwards25519/tables.go index 801b76771d1ea3..7da3f7b15bca63 100644 --- a/src/crypto/internal/fips140/edwards25519/tables.go +++ b/src/crypto/internal/fips140/edwards25519/tables.go @@ -4,9 +4,7 @@ package edwards25519 -import ( - "crypto/internal/fips140/subtle" -) +import "crypto/internal/constanttime" // A dynamic lookup table for variable-base, constant-time scalar muls. type projLookupTable struct { @@ -95,7 +93,7 @@ func (v *projLookupTable) SelectInto(dest *projCached, x int8) { dest.Zero() for j := 1; j <= 8; j++ { // Set dest = j*Q if |x| = j - cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + cond := constanttime.ByteEq(xabs, uint8(j)) dest.Select(&v.points[j-1], dest, cond) } // Now dest = |x|*Q, conditionally negate to get x*Q @@ -111,7 +109,7 @@ func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) { dest.Zero() for j := 1; j <= 8; j++ { // Set dest = j*Q if |x| = j - cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + cond := constanttime.ByteEq(xabs, uint8(j)) dest.Select(&v.points[j-1], dest, cond) } // Now dest = |x|*Q, conditionally negate to get x*Q diff --git a/src/crypto/internal/fips140/nistec/generate.go b/src/crypto/internal/fips140/nistec/generate.go index 7786dc556f5260..75b1ac60f0b6ec 100644 --- a/src/crypto/internal/fips140/nistec/generate.go +++ b/src/crypto/internal/fips140/nistec/generate.go @@ -140,8 +140,8 @@ const tmplNISTEC = `// Copyright 2022 The Go Authors. All rights reserved. package nistec import ( + "crypto/internal/constanttime" "crypto/internal/fips140/nistec/fiat" - "crypto/internal/fips140/subtle" "errors" "sync" ) @@ -467,7 +467,7 @@ func (table *{{.p}}Table) Select(p *{{.P}}Point, n uint8) { } p.Set(New{{.P}}Point()) for i := uint8(1); i < 16; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.Select(table[i-1], p, cond) } } diff --git a/src/crypto/internal/fips140/nistec/p224.go b/src/crypto/internal/fips140/nistec/p224.go index 82bced251fe0ac..7965b186891b0b 100644 --- a/src/crypto/internal/fips140/nistec/p224.go +++ b/src/crypto/internal/fips140/nistec/p224.go @@ -7,8 +7,8 @@ package nistec import ( + "crypto/internal/constanttime" "crypto/internal/fips140/nistec/fiat" - "crypto/internal/fips140/subtle" "errors" "sync" ) @@ -333,7 +333,7 @@ func (table *p224Table) Select(p *P224Point, n uint8) { } p.Set(NewP224Point()) for i := uint8(1); i < 16; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.Select(table[i-1], p, cond) } } diff --git a/src/crypto/internal/fips140/nistec/p256.go b/src/crypto/internal/fips140/nistec/p256.go index c957c5424737b0..650bde4e73e0a7 100644 --- a/src/crypto/internal/fips140/nistec/p256.go +++ b/src/crypto/internal/fips140/nistec/p256.go @@ -7,8 +7,8 @@ package nistec import ( + "crypto/internal/constanttime" "crypto/internal/fips140/nistec/fiat" - "crypto/internal/fips140/subtle" "crypto/internal/fips140deps/byteorder" "crypto/internal/fips140deps/cpu" "errors" @@ -458,7 +458,7 @@ func (table *p256Table) Select(p *P256Point, n uint8) { } p.Set(NewP256Point()) for i := uint8(1); i <= 16; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.Select(&table[i-1], p, cond) } } @@ -553,7 +553,7 @@ func (table *p256AffineTable) Select(p *p256AffinePoint, n uint8) { panic("nistec: internal error: p256AffineTable.Select called with out-of-bounds value") } for i := uint8(1); i <= 32; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.x.Select(&table[i-1].x, &p.x, cond) p.y.Select(&table[i-1].y, &p.y, cond) } @@ -618,7 +618,7 @@ func (p *P256Point) ScalarBaseMult(scalar []byte) (*P256Point, error) { // the point at infinity (because infinity can't be represented in affine // coordinates). Here we conditionally set p to the infinity if sel is zero. // In the loop, that's handled by AddAffine. - selIsZero := subtle.ConstantTimeByteEq(sel, 0) + selIsZero := constanttime.ByteEq(sel, 0) p.Select(NewP256Point(), t.Projective(), selIsZero) for index >= 5 { @@ -636,7 +636,7 @@ func (p *P256Point) ScalarBaseMult(scalar []byte) (*P256Point, error) { table := &p256GeneratorTables[(index+1)/6] table.Select(t, sel) t.Negate(sign) - selIsZero := subtle.ConstantTimeByteEq(sel, 0) + selIsZero := constanttime.ByteEq(sel, 0) p.AddAffine(p, t, selIsZero) } diff --git a/src/crypto/internal/fips140/nistec/p384.go b/src/crypto/internal/fips140/nistec/p384.go index 318c08a97972f7..352f1a806e8ee4 100644 --- a/src/crypto/internal/fips140/nistec/p384.go +++ b/src/crypto/internal/fips140/nistec/p384.go @@ -7,8 +7,8 @@ package nistec import ( + "crypto/internal/constanttime" "crypto/internal/fips140/nistec/fiat" - "crypto/internal/fips140/subtle" "errors" "sync" ) @@ -333,7 +333,7 @@ func (table *p384Table) Select(p *P384Point, n uint8) { } p.Set(NewP384Point()) for i := uint8(1); i < 16; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.Select(table[i-1], p, cond) } } diff --git a/src/crypto/internal/fips140/nistec/p521.go b/src/crypto/internal/fips140/nistec/p521.go index 8ade8a33040b7a..429f6379934904 100644 --- a/src/crypto/internal/fips140/nistec/p521.go +++ b/src/crypto/internal/fips140/nistec/p521.go @@ -7,8 +7,8 @@ package nistec import ( + "crypto/internal/constanttime" "crypto/internal/fips140/nistec/fiat" - "crypto/internal/fips140/subtle" "errors" "sync" ) @@ -333,7 +333,7 @@ func (table *p521Table) Select(p *P521Point, n uint8) { } p.Set(NewP521Point()) for i := uint8(1); i < 16; i++ { - cond := subtle.ConstantTimeByteEq(i, n) + cond := constanttime.ByteEq(i, n) p.Select(table[i-1], p, cond) } } diff --git a/src/crypto/internal/fips140/rsa/pkcs1v22.go b/src/crypto/internal/fips140/rsa/pkcs1v22.go index 94e7345996a46f..29c47069a3e0ee 100644 --- a/src/crypto/internal/fips140/rsa/pkcs1v22.go +++ b/src/crypto/internal/fips140/rsa/pkcs1v22.go @@ -9,6 +9,7 @@ package rsa import ( "bytes" + "crypto/internal/constanttime" "crypto/internal/fips140" "crypto/internal/fips140/drbg" "crypto/internal/fips140/sha256" @@ -432,7 +433,7 @@ func DecryptOAEP(hash, mgfHash hash.Hash, priv *PrivateKey, ciphertext []byte, l hash.Write(label) lHash := hash.Sum(nil) - firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0) + firstByteIsZero := constanttime.ByteEq(em[0], 0) seed := em[1 : hash.Size()+1] db := em[hash.Size()+1:] @@ -458,11 +459,11 @@ func DecryptOAEP(hash, mgfHash hash.Hash, priv *PrivateKey, ciphertext []byte, l rest := db[hash.Size():] for i := 0; i < len(rest); i++ { - equals0 := subtle.ConstantTimeByteEq(rest[i], 0) - equals1 := subtle.ConstantTimeByteEq(rest[i], 1) - index = subtle.ConstantTimeSelect(lookingForIndex&equals1, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals1, 0, lookingForIndex) - invalid = subtle.ConstantTimeSelect(lookingForIndex&^equals0, 1, invalid) + equals0 := constanttime.ByteEq(rest[i], 0) + equals1 := constanttime.ByteEq(rest[i], 1) + index = constanttime.Select(lookingForIndex&equals1, i, index) + lookingForIndex = constanttime.Select(equals1, 0, lookingForIndex) + invalid = constanttime.Select(lookingForIndex&^equals0, 1, invalid) } if firstByteIsZero&lHash2Good&^invalid&^lookingForIndex != 1 { diff --git a/src/crypto/internal/fips140/subtle/constant_time.go b/src/crypto/internal/fips140/subtle/constant_time.go index fa7a002d3fa456..fc1e3079855e94 100644 --- a/src/crypto/internal/fips140/subtle/constant_time.go +++ b/src/crypto/internal/fips140/subtle/constant_time.go @@ -5,6 +5,7 @@ package subtle import ( + "crypto/internal/constanttime" "crypto/internal/fips140deps/byteorder" "math/bits" ) @@ -24,7 +25,7 @@ func ConstantTimeCompare(x, y []byte) int { v |= x[i] ^ y[i] } - return ConstantTimeByteEq(v, 0) + return constanttime.ByteEq(v, 0) } // ConstantTimeLessOrEqBytes returns 1 if x <= y and 0 otherwise. The comparison @@ -58,20 +59,6 @@ func ConstantTimeLessOrEqBytes(x, y []byte) int { return int(b ^ 1) } -// ConstantTimeSelect returns x if v == 1 and y if v == 0. -// Its behavior is undefined if v takes any other value. -func ConstantTimeSelect(v, x, y int) int { return ^(v-1)&x | (v-1)&y } - -// ConstantTimeByteEq returns 1 if x == y and 0 otherwise. -func ConstantTimeByteEq(x, y uint8) int { - return int((uint32(x^y) - 1) >> 31) -} - -// ConstantTimeEq returns 1 if x == y and 0 otherwise. -func ConstantTimeEq(x, y int32) int { - return int((uint64(uint32(x^y)) - 1) >> 63) -} - // ConstantTimeCopy copies the contents of y into x (a slice of equal length) // if v == 1. If v == 0, x is left unchanged. Its behavior is undefined if v // takes any other value. @@ -86,11 +73,3 @@ func ConstantTimeCopy(v int, x, y []byte) { x[i] = x[i]&xmask | y[i]&ymask } } - -// ConstantTimeLessOrEq returns 1 if x <= y and 0 otherwise. -// Its behavior is undefined if x or y are negative or > 2**31 - 1. -func ConstantTimeLessOrEq(x, y int) int { - x32 := int32(x) - y32 := int32(y) - return int(((x32 - y32 - 1) >> 31) & 1) -} diff --git a/src/crypto/internal/fips140deps/fipsdeps_test.go b/src/crypto/internal/fips140deps/fipsdeps_test.go index 3eaae1830d0e18..29a56047c3c5fd 100644 --- a/src/crypto/internal/fips140deps/fipsdeps_test.go +++ b/src/crypto/internal/fips140deps/fipsdeps_test.go @@ -28,6 +28,9 @@ var AllowedInternalPackages = map[string]bool{ // randutil.MaybeReadByte is used in non-FIPS mode by GenerateKey functions. "crypto/internal/randutil": true, + + // constanttime are the constant-time intrinsics. + "crypto/internal/constanttime": true, } func TestImports(t *testing.T) { diff --git a/src/crypto/subtle/constant_time.go b/src/crypto/subtle/constant_time.go index 8eeff3b629befb..14c911101b0fb8 100644 --- a/src/crypto/subtle/constant_time.go +++ b/src/crypto/subtle/constant_time.go @@ -6,63 +6,47 @@ // code but require careful thought to use correctly. package subtle -import "crypto/internal/fips140/subtle" +import ( + "crypto/internal/constanttime" + "crypto/internal/fips140/subtle" +) + +// These functions are forwarded to crypto/internal/constanttime for intrinsified +// operations, and to crypto/internal/fips140/subtle for byte slice operations. // ConstantTimeCompare returns 1 if the two slices, x and y, have equal contents // and 0 otherwise. The time taken is a function of the length of the slices and // is independent of the contents. If the lengths of x and y do not match it // returns 0 immediately. func ConstantTimeCompare(x, y []byte) int { - if len(x) != len(y) { - return 0 - } - - var v byte - - for i := 0; i < len(x); i++ { - v |= x[i] ^ y[i] - } - - return ConstantTimeByteEq(v, 0) + return subtle.ConstantTimeCompare(x, y) } // ConstantTimeSelect returns x if v == 1 and y if v == 0. // Its behavior is undefined if v takes any other value. func ConstantTimeSelect(v, x, y int) int { - // This is intrinsicified on arches with CMOV. - // It implements the following superset behavior: - // ConstantTimeSelect returns x if v != 0 and y if v == 0. - // Do the same here to avoid non portable UB. - v = int(constantTimeBoolToUint8(v != 0)) - return ^(v-1)&x | (v-1)&y + return constanttime.Select(v, x, y) } // ConstantTimeByteEq returns 1 if x == y and 0 otherwise. func ConstantTimeByteEq(x, y uint8) int { - return int(constantTimeBoolToUint8(x == y)) + return constanttime.ByteEq(x, y) } // ConstantTimeEq returns 1 if x == y and 0 otherwise. func ConstantTimeEq(x, y int32) int { - return int(constantTimeBoolToUint8(x == y)) + return constanttime.Eq(x, y) } // ConstantTimeCopy copies the contents of y into x (a slice of equal length) // if v == 1. If v == 0, x is left unchanged. Its behavior is undefined if v // takes any other value. func ConstantTimeCopy(v int, x, y []byte) { - // Forward this one since it gains nothing from compiler intrinsics. subtle.ConstantTimeCopy(v, x, y) } // ConstantTimeLessOrEq returns 1 if x <= y and 0 otherwise. // Its behavior is undefined if x or y are negative or > 2**31 - 1. func ConstantTimeLessOrEq(x, y int) int { - return int(constantTimeBoolToUint8(x <= y)) -} - -// constantTimeBoolToUint8 is a compiler intrinsic. -// It returns 1 for true and 0 for false. -func constantTimeBoolToUint8(b bool) uint8 { - panic("unreachable; must be intrinsicified") + return constanttime.LessOrEq(x, y) } diff --git a/src/crypto/tls/bettertls_test.go b/src/crypto/tls/bettertls_test.go new file mode 100644 index 00000000000000..d1b06109288e5c --- /dev/null +++ b/src/crypto/tls/bettertls_test.go @@ -0,0 +1,230 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test uses Netflix's BetterTLS test suite to test the crypto/x509 +// path building and name constraint validation. +// +// The test data in JSON form is around 31MB, so we fetch the BetterTLS +// go module and use it to generate the JSON data on-the-fly in a tmp dir. +// +// For more information, see: +// https://github.com/netflix/bettertls +// https://netflixtechblog.com/bettertls-c9915cd255c0 + +package tls_test + +import ( + "crypto/internal/cryptotest" + "crypto/x509" + "encoding/base64" + "encoding/json" + "internal/testenv" + "os" + "path/filepath" + "testing" +) + +// TestBetterTLS runs the "pathbuilding" and "nameconstraints" suites of +// BetterTLS. +// +// The test cases in the pathbuilding suite are designed to test edge-cases +// for path building and validation. In particular, the ["chain of pain"][0] +// scenario where a validator treats path building as an operation with +// a single possible outcome, instead of many. +// +// The test cases in the nameconstraints suite are designed to test edge-cases +// for name constraint parsing and validation. +// +// [0]: https://medium.com/@sleevi_/path-building-vs-path-verifying-the-chain-of-pain-9fbab861d7d6 +func TestBetterTLS(t *testing.T) { + testenv.SkipIfShortAndSlow(t) + + data, roots := testData(t) + + for _, suite := range []string{"pathbuilding", "nameconstraints"} { + t.Run(suite, func(t *testing.T) { + runTestSuite(t, suite, &data, roots) + }) + } +} + +func runTestSuite(t *testing.T, suiteName string, data *betterTLS, roots *x509.CertPool) { + suite, exists := data.Suites[suiteName] + if !exists { + t.Fatalf("missing %s suite", suiteName) + } + + t.Logf( + "running %s test suite with %d test cases", + suiteName, len(suite.TestCases)) + + for _, tc := range suite.TestCases { + t.Logf("testing %s test case %d", suiteName, tc.ID) + + certsDER, err := tc.Certs() + if err != nil { + t.Fatalf( + "failed to decode certificates for test case %d: %v", + tc.ID, err) + } + + if len(certsDER) == 0 { + t.Fatalf("test case %d has no certificates", tc.ID) + } + + eeCert, err := x509.ParseCertificate(certsDER[0]) + if err != nil { + // Several constraint test cases contain invalid end-entity + // certificate extensions that we reject ahead of verification + // time. We consider this a pass and skip further processing. + // + // For example, a SAN with a uniformResourceIdentifier general name + // containing the value `"http://foo.bar, DNS:test.localhost"`, or + // an iPAddress general name of the wrong length. + if suiteName == "nameconstraints" && tc.Expected == expectedReject { + t.Logf( + "skipping expected reject test case %d "+ + "- end entity certificate parse error: %v", + tc.ID, err) + continue + } + t.Fatalf( + "failed to parse end entity certificate for test case %d: %v", + tc.ID, err) + } + + intermediates := x509.NewCertPool() + for i, certDER := range certsDER[1:] { + cert, err := x509.ParseCertificate(certDER) + if err != nil { + t.Fatalf( + "failed to parse intermediate certificate %d for test case %d: %v", + i+1, tc.ID, err) + } + intermediates.AddCert(cert) + } + + _, err = eeCert.Verify(x509.VerifyOptions{ + Roots: roots, + Intermediates: intermediates, + DNSName: tc.Hostname, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }) + + switch tc.Expected { + case expectedAccept: + if err != nil { + t.Errorf( + "test case %d failed: expected success, got error: %v", + tc.ID, err) + } + case expectedReject: + if err == nil { + t.Errorf( + "test case %d failed: expected failure, but verification succeeded", + tc.ID) + } + default: + t.Fatalf( + "test case %d failed: unknown expected result: %s", + tc.ID, tc.Expected) + } + } +} + +func testData(t *testing.T) (betterTLS, *x509.CertPool) { + const ( + bettertlsModule = "github.com/Netflix/bettertls" + bettertlsVersion = "v0.0.0-20250909192348-e1e99e353074" + ) + + bettertlsDir := cryptotest.FetchModule(t, bettertlsModule, bettertlsVersion) + + tempDir := t.TempDir() + testsJSONPath := filepath.Join(tempDir, "tests.json") + + cmd := testenv.Command(t, testenv.GoToolPath(t), + "run", "./test-suites/cmd/bettertls", + "export-tests", + "--out", testsJSONPath) + cmd.Dir = bettertlsDir + + t.Log("running bettertls export-tests command") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf( + "failed to run bettertls export-tests: %v\nOutput: %s", + err, output) + } + + jsonData, err := os.ReadFile(testsJSONPath) + if err != nil { + t.Fatalf("failed to read exported tests.json: %v", err) + } + + t.Logf("successfully loaded tests.json at %s", testsJSONPath) + + var data betterTLS + if err := json.Unmarshal(jsonData, &data); err != nil { + t.Fatalf("failed to unmarshal JSON data: %v", err) + } + + t.Logf("testing betterTLS revision: %s", data.Revision) + t.Logf("number of test suites: %d", len(data.Suites)) + + rootDER, err := data.RootCert() + if err != nil { + t.Fatalf("failed to decode trust root: %v", err) + } + + rootCert, err := x509.ParseCertificate(rootDER) + if err != nil { + t.Fatalf("failed to parse trust root certificate: %v", err) + } + + roots := x509.NewCertPool() + roots.AddCert(rootCert) + + return data, roots +} + +type betterTLS struct { + Revision string `json:"betterTlsRevision"` + Root string `json:"trustRoot"` + Suites map[string]betterTLSSuite `json:"suites"` +} + +func (b *betterTLS) RootCert() ([]byte, error) { + return base64.StdEncoding.DecodeString(b.Root) +} + +type betterTLSSuite struct { + TestCases []betterTLSTest `json:"testCases"` +} + +type betterTLSTest struct { + ID uint32 `json:"id"` + Certificates []string `json:"certificates"` + Hostname string `json:"hostname"` + Expected expectedResult `json:"expected"` +} + +func (test *betterTLSTest) Certs() ([][]byte, error) { + certs := make([][]byte, len(test.Certificates)) + for i, cert := range test.Certificates { + decoded, err := base64.StdEncoding.DecodeString(cert) + if err != nil { + return nil, err + } + certs[i] = decoded + } + return certs, nil +} + +type expectedResult string + +const ( + expectedAccept expectedResult = "ACCEPT" + expectedReject expectedResult = "REJECT" +) diff --git a/src/debug/gosym/symtab.go b/src/debug/gosym/symtab.go index bf38927254f1d0..08d46684bf32c4 100644 --- a/src/debug/gosym/symtab.go +++ b/src/debug/gosym/symtab.go @@ -332,7 +332,8 @@ func walksymtab(data []byte, fn func(sym) error) error { // NewTable decodes the Go symbol table (the ".gosymtab" section in ELF), // returning an in-memory representation. -// Starting with Go 1.3, the Go symbol table no longer includes symbol data. +// Starting with Go 1.3, the Go symbol table no longer includes symbol data; +// callers should pass nil for the symtab parameter. func NewTable(symtab []byte, pcln *LineTable) (*Table, error) { var n int err := walksymtab(symtab, func(s sym) error { diff --git a/src/encoding/pem/pem.go b/src/encoding/pem/pem.go index 1da60d3227dc11..6bf2b41ad0eb7f 100644 --- a/src/encoding/pem/pem.go +++ b/src/encoding/pem/pem.go @@ -95,6 +95,9 @@ func Decode(data []byte) (p *Block, rest []byte) { for { // If we've already tried parsing a block, skip past the END we already // saw. + if endTrailerIndex < 0 || endTrailerIndex > len(rest) { + return nil, data + } rest = rest[endTrailerIndex:] // Find the first END line, and then find the last BEGIN line before diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index bc7eae69def15f..48a9f3e75bb7b5 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -479,6 +479,8 @@ var depsRules = ` io, math/rand/v2 < crypto/internal/randutil; + NONE < crypto/internal/constanttime; + STR < crypto/internal/impl; OS < crypto/internal/sysrand @@ -496,6 +498,7 @@ var depsRules = ` crypto/internal/impl, crypto/internal/entropy, crypto/internal/randutil, + crypto/internal/constanttime, crypto/internal/entropy/v1.0.0, crypto/internal/fips140deps/byteorder, crypto/internal/fips140deps/cpu, diff --git a/src/internal/godebugs/godebugs_test.go b/src/internal/godebugs/godebugs_test.go index 168acc134aa753..e242f58c5536c8 100644 --- a/src/internal/godebugs/godebugs_test.go +++ b/src/internal/godebugs/godebugs_test.go @@ -93,3 +93,11 @@ func incNonDefaults(t *testing.T) map[string]bool { } return seen } + +func TestRemoved(t *testing.T) { + for _, info := range godebugs.Removed { + if godebugs.Lookup(info.Name) != nil { + t.Fatalf("GODEBUG: %v exists in both Removed and All", info.Name) + } + } +} diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go index 852305e8553aab..271c58648dc2cd 100644 --- a/src/internal/godebugs/table.go +++ b/src/internal/godebugs/table.go @@ -78,6 +78,16 @@ var All = []Info{ {Name: "zipinsecurepath", Package: "archive/zip"}, } +type RemovedInfo struct { + Name string // name of the removed GODEBUG setting. + Removed int // minor version of Go, when the removal happened +} + +// Removed contains all GODEBUGs that we have removed. +var Removed = []RemovedInfo{ + {Name: "x509sha1", Removed: 24}, +} + // Lookup returns the Info with the given name. func Lookup(name string) *Info { // binary search, avoiding import of sort. diff --git a/src/internal/profile/proto.go b/src/internal/profile/proto.go index 58ff0ad2e07789..ad6f621f883c7d 100644 --- a/src/internal/profile/proto.go +++ b/src/internal/profile/proto.go @@ -24,6 +24,7 @@ package profile import ( "errors" "fmt" + "slices" ) type buffer struct { @@ -175,6 +176,16 @@ func le32(p []byte) uint32 { return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 } +func peekNumVarints(data []byte) (numVarints int) { + for ; len(data) > 0; numVarints++ { + var err error + if _, data, err = decodeVarint(data); err != nil { + break + } + } + return numVarints +} + func decodeVarint(data []byte) (uint64, []byte, error) { var i int var u uint64 @@ -275,6 +286,9 @@ func decodeInt64(b *buffer, x *int64) error { func decodeInt64s(b *buffer, x *[]int64) error { if b.typ == 2 { // Packed encoding + dataLen := peekNumVarints(b.data) + *x = slices.Grow(*x, dataLen) + data := b.data for len(data) > 0 { var u uint64 @@ -305,8 +319,11 @@ func decodeUint64(b *buffer, x *uint64) error { func decodeUint64s(b *buffer, x *[]uint64) error { if b.typ == 2 { - data := b.data // Packed encoding + dataLen := peekNumVarints(b.data) + *x = slices.Grow(*x, dataLen) + + data := b.data for len(data) > 0 { var u uint64 var err error diff --git a/src/internal/runtime/cgobench/bench_test.go b/src/internal/runtime/cgobench/bench_test.go index b4d8efec5efcf6..3b8f9a8ca3aee2 100644 --- a/src/internal/runtime/cgobench/bench_test.go +++ b/src/internal/runtime/cgobench/bench_test.go @@ -24,3 +24,17 @@ func BenchmarkCgoCallParallel(b *testing.B) { } }) } + +func BenchmarkCgoCallWithCallback(b *testing.B) { + for b.Loop() { + cgobench.Callback() + } +} + +func BenchmarkCgoCallParallelWithCallback(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + cgobench.Callback() + } + }) +} diff --git a/src/internal/runtime/cgobench/funcs.go b/src/internal/runtime/cgobench/funcs.go index db685180a1b594..91efa5127893e5 100644 --- a/src/internal/runtime/cgobench/funcs.go +++ b/src/internal/runtime/cgobench/funcs.go @@ -9,9 +9,24 @@ package cgobench /* static void empty() { } + +void go_empty_callback(); + +static void callback() { + go_empty_callback(); +} + */ import "C" func Empty() { C.empty() } + +func Callback() { + C.callback() +} + +//export go_empty_callback +func go_empty_callback() { +} diff --git a/src/internal/strconv/atofeisel.go b/src/internal/strconv/atofeisel.go index 10b8c96bba94e2..5fa92908b49ced 100644 --- a/src/internal/strconv/atofeisel.go +++ b/src/internal/strconv/atofeisel.go @@ -40,7 +40,7 @@ func eiselLemire64(man uint64, exp10 int, neg bool) (f float64, ok bool) { // Normalization. clz := bits.LeadingZeros64(man) man <<= uint(clz) - retExp2 := uint64(exp2+64-float64Bias) - uint64(clz) + retExp2 := uint64(exp2+63-float64Bias) - uint64(clz) // Multiplication. xHi, xLo := bits.Mul64(man, pow.Hi) @@ -115,7 +115,7 @@ func eiselLemire32(man uint64, exp10 int, neg bool) (f float32, ok bool) { // Normalization. clz := bits.LeadingZeros64(man) man <<= uint(clz) - retExp2 := uint64(exp2+64-float32Bias) - uint64(clz) + retExp2 := uint64(exp2+63-float32Bias) - uint64(clz) // Multiplication. xHi, xLo := bits.Mul64(man, pow.Hi) diff --git a/src/internal/strconv/atoi.go b/src/internal/strconv/atoi.go index 5bc259e7e55e83..4bbcb4f5da7aeb 100644 --- a/src/internal/strconv/atoi.go +++ b/src/internal/strconv/atoi.go @@ -41,8 +41,6 @@ const intSize = 32 << (^uint(0) >> 63) // IntSize is the size in bits of an int or uint value. const IntSize = intSize -const maxUint64 = 1<<64 - 1 - // ParseUint is like [ParseInt] but for unsigned numbers. // // A sign prefix is not permitted. diff --git a/src/internal/strconv/export_test.go b/src/internal/strconv/export_test.go index bea741e6fbe1bf..c879f24480a450 100644 --- a/src/internal/strconv/export_test.go +++ b/src/internal/strconv/export_test.go @@ -6,6 +6,11 @@ package strconv type Uint128 = uint128 +const ( + Pow10Min = pow10Min + Pow10Max = pow10Max +) + var ( MulLog10_2 = mulLog10_2 MulLog2_10 = mulLog2_10 @@ -13,6 +18,9 @@ var ( Pow10 = pow10 Umul128 = umul128 Umul192 = umul192 + Div5Tab = div5Tab + DivisiblePow5 = divisiblePow5 + TrimZeros = trimZeros ) func NewDecimal(i uint64) *decimal { diff --git a/src/internal/strconv/fp_test.go b/src/internal/strconv/fp_test.go index 042328c7d4e4c8..ba739941cc8a92 100644 --- a/src/internal/strconv/fp_test.go +++ b/src/internal/strconv/fp_test.go @@ -99,12 +99,14 @@ func TestFp(t *testing.T) { s := bufio.NewScanner(strings.NewReader(testfp)) for lineno := 1; s.Scan(); lineno++ { line := s.Text() - if len(line) == 0 || line[0] == '#' { + line, _, _ = strings.Cut(line, "#") + line = strings.TrimSpace(line) + if line == "" { continue } a := strings.Split(line, " ") if len(a) != 4 { - t.Error("testdata/testfp.txt:", lineno, ": wrong field count") + t.Errorf("testdata/testfp.txt:%d: wrong field count", lineno) continue } var s string @@ -114,22 +116,21 @@ func TestFp(t *testing.T) { var ok bool v, ok = myatof64(a[2]) if !ok { - t.Error("testdata/testfp.txt:", lineno, ": cannot atof64 ", a[2]) + t.Errorf("testdata/testfp.txt:%d: cannot atof64 %s", lineno, a[2]) continue } s = fmt.Sprintf(a[1], v) case "float32": v1, ok := myatof32(a[2]) if !ok { - t.Error("testdata/testfp.txt:", lineno, ": cannot atof32 ", a[2]) + t.Errorf("testdata/testfp.txt:%d: cannot atof32 %s", lineno, a[2]) continue } s = fmt.Sprintf(a[1], v1) v = float64(v1) } if s != a[3] { - t.Error("testdata/testfp.txt:", lineno, ": ", a[0], " ", a[1], " ", a[2], " (", v, ") ", - "want ", a[3], " got ", s) + t.Errorf("testdata/testfp.txt:%d: %s %s %s %s: have %s want %s", lineno, a[0], a[1], a[2], a[3], s, a[3]) } } if s.Err() != nil { diff --git a/src/internal/strconv/ftoa.go b/src/internal/strconv/ftoa.go index 1aec5447ece8b8..64be29e23efc7b 100644 --- a/src/internal/strconv/ftoa.go +++ b/src/internal/strconv/ftoa.go @@ -123,16 +123,17 @@ func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte { return bigFtoa(dst, prec, fmt, neg, mant, exp, flt) } - var digs decimalSlice - ok := false // Negative precision means "only as much as needed to be exact." shortest := prec < 0 + var digs decimalSlice + if mant == 0 { + return formatDigits(dst, shortest, neg, digs, prec, fmt) + } if shortest { // Use Ryu algorithm. var buf [32]byte digs.d = buf[:] ryuFtoaShortest(&digs, mant, exp-int(flt.mantbits), flt) - ok = true // Precision for shortest representation mode. switch fmt { case 'e', 'E': @@ -142,36 +143,44 @@ func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte { case 'g', 'G': prec = digs.nd } - } else if fmt != 'f' { - // Fixed number of digits. - digits := prec - switch fmt { - case 'e', 'E': - digits++ - case 'g', 'G': - if prec == 0 { - prec = 1 - } - digits = prec - default: - // Invalid mode. - digits = 1 + return formatDigits(dst, shortest, neg, digs, prec, fmt) + } + + // Fixed number of digits. + digits := prec + switch fmt { + case 'f': + // %f precision specifies digits after the decimal point. + // Estimate an upper bound on the total number of digits needed. + // ftoaFixed will shorten as needed according to prec. + if exp >= 0 { + digits = 1 + mulLog10_2(1+exp) + prec + } else { + digits = 1 + prec - mulLog10_2(-exp) } - var buf [24]byte - if bitSize == 32 && digits <= 9 { - digs.d = buf[:] - ryuFtoaFixed32(&digs, uint32(mant), exp-int(flt.mantbits), digits) - ok = true - } else if digits <= 18 { + case 'e', 'E': + digits++ + case 'g', 'G': + if prec == 0 { + prec = 1 + } + digits = prec + default: + // Invalid mode. + digits = 1 + } + if digits <= 18 { + // digits <= 0 happens for %f on very small numbers + // and means that we're guaranteed to print all zeros. + if digits > 0 { + var buf [24]byte digs.d = buf[:] - ryuFtoaFixed64(&digs, mant, exp-int(flt.mantbits), digits) - ok = true + fixedFtoa(&digs, mant, exp-int(flt.mantbits), digits, prec, fmt) } + return formatDigits(dst, false, neg, digs, prec, fmt) } - if !ok { - return bigFtoa(dst, prec, fmt, neg, mant, exp, flt) - } - return formatDigits(dst, shortest, neg, digs, prec, fmt) + + return bigFtoa(dst, prec, fmt, neg, mant, exp, flt) } // bigFtoa uses multiprecision computations to format a float. diff --git a/src/internal/strconv/ftoa_test.go b/src/internal/strconv/ftoa_test.go index 14d1200ff26b48..0393c3e17c3b59 100644 --- a/src/internal/strconv/ftoa_test.go +++ b/src/internal/strconv/ftoa_test.go @@ -5,9 +5,9 @@ package strconv_test import ( + . "internal/strconv" "math" "math/rand" - . "internal/strconv" "testing" ) @@ -42,6 +42,29 @@ var ftoatests = []ftoaTest{ {2000000, 'g', -1, "2e+06"}, {1e10, 'g', -1, "1e+10"}, + // f conversion basic cases + {12345, 'f', 2, "12345.00"}, + {1234.5, 'f', 2, "1234.50"}, + {123.45, 'f', 2, "123.45"}, + {12.345, 'f', 2, "12.35"}, + {1.2345, 'f', 2, "1.23"}, + {0.12345, 'f', 2, "0.12"}, + {0.12945, 'f', 2, "0.13"}, + {0.012345, 'f', 2, "0.01"}, + {0.015, 'f', 2, "0.01"}, + {0.016, 'f', 2, "0.02"}, + {0.0052345, 'f', 2, "0.01"}, + {0.0012345, 'f', 2, "0.00"}, + {0.00012345, 'f', 2, "0.00"}, + {0.000012345, 'f', 2, "0.00"}, + + {0.996644984, 'f', 6, "0.996645"}, + {0.996644984, 'f', 5, "0.99664"}, + {0.996644984, 'f', 4, "0.9966"}, + {0.996644984, 'f', 3, "0.997"}, + {0.996644984, 'f', 2, "1.00"}, + {0.996644984, 'f', 1, "1.0"}, + // g conversion and zero suppression {400, 'g', 2, "4e+02"}, {40, 'g', 2, "40"}, @@ -177,6 +200,16 @@ var ftoatests = []ftoaTest{ {1.801439850948199e+16, 'g', -1, "1.801439850948199e+16"}, {5.960464477539063e-08, 'g', -1, "5.960464477539063e-08"}, {1.012e-320, 'g', -1, "1.012e-320"}, + + // Cases from TestFtoaRandom that caught bugs in fixedFtoa. + {8177880169308380. * (1 << 1), 'e', 14, "1.63557603386168e+16"}, + {8393378656576888. * (1 << 1), 'e', 15, "1.678675731315378e+16"}, + {8738676561280626. * (1 << 4), 'e', 16, "1.3981882498049002e+17"}, + {8291032395191335. / (1 << 30), 'e', 5, "7.72163e+06"}, + + // Exercise divisiblePow5 case in fixedFtoa + {2384185791015625. * (1 << 12), 'e', 5, "9.76562e+18"}, + {2384185791015625. * (1 << 13), 'e', 5, "1.95312e+19"}, } func TestFtoa(t *testing.T) { @@ -253,7 +286,7 @@ func TestFtoaRandom(t *testing.T) { shortSlow = FormatFloat(x, 'e', prec, 64) SetOptimize(true) if shortSlow != shortFast { - t.Errorf("%b printed as %s, want %s", x, shortFast, shortSlow) + t.Errorf("%b printed with %%.%de as %s, want %s", x, prec, shortFast, shortSlow) } } } @@ -294,8 +327,10 @@ var ftoaBenches = []struct { {"64Fixed1", 123456, 'e', 3, 64}, {"64Fixed2", 123.456, 'e', 3, 64}, + {"64Fixed2.5", 1.2345e+06, 'e', 3, 64}, {"64Fixed3", 1.23456e+78, 'e', 3, 64}, {"64Fixed4", 1.23456e-78, 'e', 3, 64}, + {"64Fixed5Hard", 4.096e+25, 'e', 5, 64}, // needs divisiblePow5(..., 20) {"64Fixed12", 1.23456e-78, 'e', 12, 64}, {"64Fixed16", 1.23456e-78, 'e', 16, 64}, // From testdata/testfp.txt @@ -303,6 +338,10 @@ var ftoaBenches = []struct { {"64Fixed17Hard", math.Ldexp(8887055249355788, 665), 'e', 17, 64}, {"64Fixed18Hard", math.Ldexp(6994187472632449, 690), 'e', 18, 64}, + {"64FixedF1", 123.456, 'f', 6, 64}, + {"64FixedF2", 0.0123, 'f', 6, 64}, + {"64FixedF3", 12.3456, 'f', 2, 64}, + // Trigger slow path (see issue #15672). // The shortest is: 8.034137530808823e+43 {"Slowpath64", 8.03413753080882349e+43, 'e', -1, 64}, diff --git a/src/internal/strconv/ftoafixed.go b/src/internal/strconv/ftoafixed.go new file mode 100644 index 00000000000000..7f297e924e1606 --- /dev/null +++ b/src/internal/strconv/ftoafixed.go @@ -0,0 +1,184 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package strconv + +import "math/bits" + +var uint64pow10 = [...]uint64{ + 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, + 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, +} + +// fixedFtoa formats a number of decimal digits of mant*(2^exp) into d, +// where mant > 0 and 1 ≤ digits ≤ 18. +// If fmt == 'f', digits is a conservative overestimate, and the final +// number of digits is prec past the decimal point. +func fixedFtoa(d *decimalSlice, mant uint64, exp, digits, prec int, fmt byte) { + // The strategy here is to multiply (mant * 2^exp) by a power of 10 + // to make the resulting integer be the number of digits we want. + // + // Adams proved in the Ryu paper that 128-bit precision in the + // power-of-10 constant is sufficient to produce correctly + // rounded output for all float64s, up to 18 digits. + // https://dl.acm.org/doi/10.1145/3192366.3192369 + // + // TODO(rsc): The paper is not focused on, nor terribly clear about, + // this fact in this context, and the proof seems too complicated. + // Post a shorter, more direct proof and link to it here. + + if digits > 18 { + panic("fixedFtoa called with digits > 18") + } + + // Shift mantissa to have 64 bits, + // so that the 192-bit product below will + // have at least 63 bits in its top word. + b := 64 - bits.Len64(mant) + mant <<= b + exp -= b + + // We have f = mant * 2^exp ≥ 2^(63+exp) + // and we want to multiply it by some 10^p + // to make it have the number of digits plus one rounding bit: + // + // 2 * 10^(digits-1) ≤ f * 10^p < ~2 * 10^digits + // + // The lower bound is required, but the upper bound is approximate: + // we must not have too few digits, but we can round away extra ones. + // + // f * 10^p ≥ 2 * 10^(digits-1) + // 10^p ≥ 2 * 10^(digits-1) / f [dividing by f] + // p ≥ (log₁₀ 2) + (digits-1) - log₁₀ f [taking log₁₀] + // p ≥ (log₁₀ 2) + (digits-1) - log₁₀ (mant * 2^exp) [expanding f] + // p ≥ (log₁₀ 2) + (digits-1) - (log₁₀ 2) * (64 + exp) [mant < 2⁶⁴] + // p ≥ (digits - 1) - (log₁₀ 2) * (63 + exp) [refactoring] + // + // Once we have p, we can compute the scaled value: + // + // dm * 2^de = mant * 2^exp * 10^p + // = mant * 2^exp * pow/2^128 * 2^exp2. + // = (mant * pow/2^128) * 2^(exp+exp2). + p := (digits - 1) - mulLog10_2(63+exp) + pow, exp2, ok := pow10(p) + if !ok { + // This never happens due to the range of float32/float64 exponent + panic("fixedFtoa: pow10 out of range") + } + if -22 <= p && p < 0 { + // Special case: Let q=-p. q is in [1,22]. We are dividing by 10^q + // and the mantissa may be a multiple of 5^q (5^22 < 2^53), + // in which case the division must be computed exactly and + // recorded as exact for correct rounding. Our normal computation is: + // + // dm = floor(mant * floor(10^p * 2^s)) + // + // for some scaling shift s. To make this an exact division, + // it suffices to change the inner floor to a ceil: + // + // dm = floor(mant * ceil(10^p * 2^s)) + // + // In the range of values we are using, the floor and ceil + // cancel each other out and the high 64 bits of the product + // come out exactly right. + // (This is the same trick compilers use for division by constants. + // See Hacker's Delight, 2nd ed., Chapter 10.) + pow.Lo++ + } + dm, lo1, lo0 := umul192(mant, pow) + de := exp + exp2 + + // Check whether any bits have been truncated from dm. + // If so, set dt != 0. If not, leave dt == 0 (meaning dm is exact). + var dt uint + switch { + default: + // Most powers of 10 use a truncated constant, + // meaning the result is also truncated. + dt = 1 + case 0 <= p && p <= 55: + // Small positive powers of 10 (up to 10⁵⁵) can be represented + // precisely in a 128-bit mantissa (5⁵⁵ ≤ 2¹²⁸), so the only truncation + // comes from discarding the low bits of the 192-bit product. + // + // TODO(rsc): The new proof mentioned above should also + // prove that we can't have lo1 == 0 and lo0 != 0. + // After proving that, drop computation and use of lo0 here. + dt = bool2uint(lo1|lo0 != 0) + case -22 <= p && p < 0 && divisiblePow5(mant, -p): + // If the original mantissa was a multiple of 5^p, + // the result is exact. (See comment above for pow.Lo++.) + dt = 0 + } + + // The value we want to format is dm * 2^de, where de < 0. + // Multply by 2^de by shifting, but leave one extra bit for rounding. + // After the shift, the "integer part" of dm is dm>>1, + // the "rounding bit" (the first fractional bit) is dm&1, + // and the "truncated bit" (have any bits been discarded?) is dt. + shift := -de - 1 + dt |= bool2uint(dm&(1<>= shift + + // Set decimal point in eventual formatted digits, + // so we can update it as we adjust the digits. + d.dp = digits - p + + // Trim excess digit if any, updating truncation and decimal point. + // The << 1 is leaving room for the rounding bit. + max := uint64pow10[digits] << 1 + if dm >= max { + var r uint + dm, r = dm/10, uint(dm%10) + dt |= bool2uint(r != 0) + d.dp++ + } + + // If this is %.*f we may have overestimated the digits needed. + // Now that we know where the decimal point is, + // trim to the actual number of digits, which is d.dp+prec. + if fmt == 'f' && digits != d.dp+prec { + for digits > d.dp+prec { + var r uint + dm, r = dm/10, uint(dm%10) + dt |= bool2uint(r != 0) + digits-- + } + + // Dropping those digits can create a new leftmost + // non-zero digit, like if we are formatting %.1f and + // convert 0.09 -> 0.1. Detect and adjust for that. + if digits <= 0 { + digits = 1 + d.dp++ + } + + max = uint64pow10[digits] << 1 + } + + // Round and shift away rounding bit. + // We want to round up when + // (a) the fractional part is > 0.5 (dm&1 != 0 and dt == 1) + // (b) or the fractional part is ≥ 0.5 and the integer part is odd + // (dm&1 != 0 and dm&2 != 0). + // The bitwise expression encodes that logic. + dm += uint64(uint(dm) & (dt | uint(dm)>>1) & 1) + dm >>= 1 + if dm == max>>1 { + // 999... rolled over to 1000... + dm = uint64pow10[digits-1] + d.dp++ + } + + // Format digits into d. + if dm != 0 { + if formatBase10(d.d[:digits], dm) != 0 { + panic("formatBase10") + } + d.nd = digits + for d.d[d.nd-1] == '0' { + d.nd-- + } + } +} diff --git a/src/internal/strconv/ftoaryu.go b/src/internal/strconv/ftoaryu.go index 473e5b65be8d4c..9407bfec445680 100644 --- a/src/internal/strconv/ftoaryu.go +++ b/src/internal/strconv/ftoaryu.go @@ -4,203 +4,11 @@ package strconv -import ( - "math/bits" -) +import "math/bits" // binary to decimal conversion using the Ryū algorithm. // // See Ulf Adams, "Ryū: Fast Float-to-String Conversion" (doi:10.1145/3192366.3192369) -// -// Fixed precision formatting is a variant of the original paper's -// algorithm, where a single multiplication by 10^k is required, -// sharing the same rounding guarantees. - -// ryuFtoaFixed32 formats mant*(2^exp) with prec decimal digits. -func ryuFtoaFixed32(d *decimalSlice, mant uint32, exp int, prec int) { - if prec < 0 { - panic("ryuFtoaFixed32 called with negative prec") - } - if prec > 9 { - panic("ryuFtoaFixed32 called with prec > 9") - } - // Zero input. - if mant == 0 { - d.nd, d.dp = 0, 0 - return - } - // Renormalize to a 25-bit mantissa. - e2 := exp - if b := bits.Len32(mant); b < 25 { - mant <<= uint(25 - b) - e2 += b - 25 - } - // Choose an exponent such that rounded mant*(2^e2)*(10^q) has - // at least prec decimal digits, i.e - // mant*(2^e2)*(10^q) >= 10^(prec-1) - // Because mant >= 2^24, it is enough to choose: - // 2^(e2+24) >= 10^(-q+prec-1) - // or q = -mulLog10_2(e2+24) + prec - 1 - q := -mulLog10_2(e2+24) + prec - 1 - - // Now compute mant*(2^e2)*(10^q). - // Is it an exact computation? - // Only small positive powers of 10 are exact (5^28 has 66 bits). - exact := q <= 27 && q >= 0 - - di, dexp2, d0 := mult64bitPow10(mant, e2, q) - if dexp2 >= 0 { - panic("not enough significant bits after mult64bitPow10") - } - // As a special case, computation might still be exact, if exponent - // was negative and if it amounts to computing an exact division. - // In that case, we ignore all lower bits. - // Note that division by 10^11 cannot be exact as 5^11 has 26 bits. - if q < 0 && q >= -10 && divisibleByPower5(uint64(mant), -q) { - exact = true - d0 = true - } - // Remove extra lower bits and keep rounding info. - extra := uint(-dexp2) - extraMask := uint32(1<>extra, di&extraMask - roundUp := false - if exact { - // If we computed an exact product, d + 1/2 - // should round to d+1 if 'd' is odd. - roundUp = dfrac > 1<<(extra-1) || - (dfrac == 1<<(extra-1) && !d0) || - (dfrac == 1<<(extra-1) && d0 && di&1 == 1) - } else { - // otherwise, d+1/2 always rounds up because - // we truncated below. - roundUp = dfrac>>(extra-1) == 1 - } - if dfrac != 0 { - d0 = false - } - // Proceed to the requested number of digits - formatDecimal(d, uint64(di), !d0, roundUp, prec) - // Adjust exponent - d.dp -= q -} - -// ryuFtoaFixed64 formats mant*(2^exp) with prec decimal digits. -func ryuFtoaFixed64(d *decimalSlice, mant uint64, exp int, prec int) { - if prec > 18 { - panic("ryuFtoaFixed64 called with prec > 18") - } - // Zero input. - if mant == 0 { - d.nd, d.dp = 0, 0 - return - } - // Renormalize to a 55-bit mantissa. - e2 := exp - if b := bits.Len64(mant); b < 55 { - mant = mant << uint(55-b) - e2 += b - 55 - } - // Choose an exponent such that rounded mant*(2^e2)*(10^q) has - // at least prec decimal digits, i.e - // mant*(2^e2)*(10^q) >= 10^(prec-1) - // Because mant >= 2^54, it is enough to choose: - // 2^(e2+54) >= 10^(-q+prec-1) - // or q = -mulLog10_2(e2+54) + prec - 1 - // - // The minimal required exponent is -mulLog10_2(1025)+18 = -291 - // The maximal required exponent is mulLog10_2(1074)+18 = 342 - q := -mulLog10_2(e2+54) + prec - 1 - - // Now compute mant*(2^e2)*(10^q). - // Is it an exact computation? - // Only small positive powers of 10 are exact (5^55 has 128 bits). - exact := q <= 55 && q >= 0 - - di, dexp2, d0 := mult128bitPow10(mant, e2, q) - if dexp2 >= 0 { - panic("not enough significant bits after mult128bitPow10") - } - // As a special case, computation might still be exact, if exponent - // was negative and if it amounts to computing an exact division. - // In that case, we ignore all lower bits. - // Note that division by 10^23 cannot be exact as 5^23 has 54 bits. - if q < 0 && q >= -22 && divisibleByPower5(mant, -q) { - exact = true - d0 = true - } - // Remove extra lower bits and keep rounding info. - extra := uint(-dexp2) - extraMask := uint64(1<>extra, di&extraMask - roundUp := false - if exact { - // If we computed an exact product, d + 1/2 - // should round to d+1 if 'd' is odd. - roundUp = dfrac > 1<<(extra-1) || - (dfrac == 1<<(extra-1) && !d0) || - (dfrac == 1<<(extra-1) && d0 && di&1 == 1) - } else { - // otherwise, d+1/2 always rounds up because - // we truncated below. - roundUp = dfrac>>(extra-1) == 1 - } - if dfrac != 0 { - d0 = false - } - // Proceed to the requested number of digits - formatDecimal(d, di, !d0, roundUp, prec) - // Adjust exponent - d.dp -= q -} - -var uint64pow10 = [...]uint64{ - 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, -} - -// formatDecimal fills d with at most prec decimal digits -// of mantissa m. The boolean trunc indicates whether m -// is truncated compared to the original number being formatted. -func formatDecimal(d *decimalSlice, m uint64, trunc bool, roundUp bool, prec int) { - max := uint64pow10[prec] - trimmed := 0 - for m >= max { - a, b := m/10, m%10 - m = a - trimmed++ - if b > 5 { - roundUp = true - } else if b < 5 { - roundUp = false - } else { // b == 5 - // round up if there are trailing digits, - // or if the new value of m is odd (round-to-even convention) - roundUp = trunc || m&1 == 1 - } - if b != 0 { - trunc = true - } - } - if roundUp { - m++ - } - if m >= max { - // Happens if di was originally 99999....xx - m /= 10 - trimmed++ - } - // render digits - formatBase10(d.d[:prec], m) - d.nd = prec - for d.d[d.nd-1] == '0' { - d.nd-- - trimmed++ - } - d.dp = d.nd + trimmed -} // ryuFtoaShortest formats mant*2^exp with prec decimal digits. func ryuFtoaShortest(d *decimalSlice, mant uint64, exp int, flt *floatInfo) { @@ -249,13 +57,13 @@ func ryuFtoaShortest(d *decimalSlice, mant uint64, exp int, flt *floatInfo) { if q < 0 && q >= -24 { // Division by a power of ten may be exact. // (note that 5^25 is a 59-bit number so division by 5^25 is never exact). - if divisibleByPower5(ml, -q) { + if divisiblePow5(ml, -q) { dl0 = true } - if divisibleByPower5(mc, -q) { + if divisiblePow5(mc, -q) { dc0 = true } - if divisibleByPower5(mu, -q) { + if divisiblePow5(mu, -q) { du0 = true } } @@ -464,7 +272,7 @@ func mult64bitPow10(m uint32, e2, q int) (resM uint32, resE int, exact bool) { pow.Hi++ } hi, lo := bits.Mul64(uint64(m), pow.Hi) - e2 += exp2 - 63 + 57 + e2 += exp2 - 64 + 57 return uint32(hi<<7 | lo>>57), e2, lo<<7 == 0 } @@ -492,21 +300,8 @@ func mult128bitPow10(m uint64, e2, q int) (resM uint64, resE int, exact bool) { // Inverse powers of ten must be rounded up. pow.Lo++ } - e2 += exp2 - 127 + 119 + e2 += exp2 - 128 + 119 hi, mid, lo := umul192(m, pow) return hi<<9 | mid>>55, e2, mid<<9 == 0 && lo == 0 } - -func divisibleByPower5(m uint64, k int) bool { - if m == 0 { - return true - } - for i := 0; i < k; i++ { - if m%5 != 0 { - return false - } - m /= 5 - } - return true -} diff --git a/src/internal/strconv/import_test.go b/src/internal/strconv/import_test.go index 0cbc451651a015..3dab2bf9e56e2b 100644 --- a/src/internal/strconv/import_test.go +++ b/src/internal/strconv/import_test.go @@ -8,6 +8,11 @@ import . "internal/strconv" type uint128 = Uint128 +const ( + pow10Min = Pow10Min + pow10Max = Pow10Max +) + var ( mulLog10_2 = MulLog10_2 mulLog2_10 = MulLog2_10 @@ -15,4 +20,7 @@ var ( pow10 = Pow10 umul128 = Umul128 umul192 = Umul192 + div5Tab = Div5Tab + divisiblePow5 = DivisiblePow5 + trimZeros = TrimZeros ) diff --git a/src/internal/strconv/itoa.go b/src/internal/strconv/itoa.go index d06de4770f1052..2375e034f59786 100644 --- a/src/internal/strconv/itoa.go +++ b/src/internal/strconv/itoa.go @@ -174,6 +174,14 @@ func small(i int) string { return smalls[i*2 : i*2+2] } +// RuntimeFormatBase10 formats u into the tail of a +// and returns the offset to the first byte written to a. +// It is only for use by package runtime. +// Other packages should use AppendUint. +func RuntimeFormatBase10(a []byte, u uint64) int { + return formatBase10(a, u) +} + // formatBase10 formats the decimal representation of u into the tail of a // and returns the offset of the first byte written to a. That is, after // diff --git a/src/internal/strconv/math.go b/src/internal/strconv/math.go index f0f3d5fe540b23..3b884e846a6222 100644 --- a/src/internal/strconv/math.go +++ b/src/internal/strconv/math.go @@ -27,13 +27,14 @@ func umul192(x uint64, y uint128) (hi, mid, lo uint64) { return hi + carry, mid, lo } -// pow10 returns the 128-bit mantissa and binary exponent of 10**e +// pow10 returns the 128-bit mantissa and binary exponent of 10**e. +// That is, 10^e = mant/2^128 * 2**exp. // If e is out of range, pow10 returns ok=false. func pow10(e int) (mant uint128, exp int, ok bool) { if e < pow10Min || e > pow10Max { return } - return pow10Tab[e-pow10Min], mulLog2_10(e), true + return pow10Tab[e-pow10Min], 1 + mulLog2_10(e), true } // mulLog10_2 returns math.Floor(x * log(2)/log(10)) for an integer x in @@ -55,3 +56,124 @@ func mulLog2_10(x int) int { // log(10)/log(2) ≈ 3.32192809489 ≈ 108853 / 2^15 return (x * 108853) >> 15 } + +func bool2uint(b bool) uint { + if b { + return 1 + } + return 0 +} + +// Exact Division and Remainder Checking +// +// An exact division x/c (exact means x%c == 0) +// can be implemented by x*m where m is the multiplicative inverse of c (m*c == 1). +// +// Since c is also the multiplicative inverse of m, x*m is lossless, +// and all the exact multiples of c map to all of [0, maxUint64/c]. +// The non-multiples are forced to map to larger values. +// This also gives a quick test for whether x is an exact multiple of c: +// compute the exact division and check whether it's at most maxUint64/c: +// x%c == 0 => x*m <= maxUint64/c. +// +// Only odd c have multiplicative inverses mod powers of two. +// To do an exact divide x / (c<>s instead. +// And to check for remainder, we need to check that those low s +// bits are all zero before we shift them away. We can merge that +// with the <= for the exact odd remainder check by rotating the +// shifted bits into the high part instead: +// x%(c< bits.RotateLeft64(x*m, -s) <= maxUint64/c. +// +// The compiler does this transformation automatically in general, +// but we apply it here by hand in a few ways that the compiler can't help with. +// +// For a more detailed explanation, see +// Henry S. Warren, Jr., Hacker's Delight, 2nd ed., sections 10-16 and 10-17. + +// divisiblePow5 reports whether x is divisible by 5^p. +// It returns false for p not in [1, 22], +// because we only care about float64 mantissas, and 5^23 > 2^53. +func divisiblePow5(x uint64, p int) bool { + return 1 <= p && p <= 22 && x*div5Tab[p-1][0] <= div5Tab[p-1][1] +} + +const maxUint64 = 1<<64 - 1 + +// div5Tab[p-1] is the multiplicative inverse of 5^p and maxUint64/5^p. +var div5Tab = [22][2]uint64{ + {0xcccccccccccccccd, maxUint64 / 5}, + {0x8f5c28f5c28f5c29, maxUint64 / 5 / 5}, + {0x1cac083126e978d5, maxUint64 / 5 / 5 / 5}, + {0xd288ce703afb7e91, maxUint64 / 5 / 5 / 5 / 5}, + {0x5d4e8fb00bcbe61d, maxUint64 / 5 / 5 / 5 / 5 / 5}, + {0x790fb65668c26139, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xe5032477ae8d46a5, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xc767074b22e90e21, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x8e47ce423a2e9c6d, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x4fa7f60d3ed61f49, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x0fee64690c913975, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x3662e0e1cf503eb1, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xa47a2cf9f6433fbd, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x54186f653140a659, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x7738164770402145, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xe4a4d1417cd9a041, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xc75429d9e5c5200d, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xc1773b91fac10669, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x26b172506559ce15, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0xd489e3a9addec2d1, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x90e860bb892c8d5d, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, + {0x502e79bf1b6f4f79, maxUint64 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5 / 5}, +} + +// trimZeros trims trailing zeros from x. +// It finds the largest p such that x % 10^p == 0 +// and then returns x / 10^p, p. +// +// This is here for reference and tested, because it is an optimization +// used by other ftoa algorithms, but in our implementations it has +// never been benchmarked to be faster than trimming zeros after +// formatting into decimal bytes. +func trimZeros(x uint64) (uint64, int) { + const ( + div1e8m = 0xc767074b22e90e21 + div1e8le = maxUint64 / 100000000 + + div1e4m = 0xd288ce703afb7e91 + div1e4le = maxUint64 / 10000 + + div1e2m = 0x8f5c28f5c28f5c29 + div1e2le = maxUint64 / 100 + + div1e1m = 0xcccccccccccccccd + div1e1le = maxUint64 / 10 + ) + + // _ = assert[x - y] asserts at compile time that x == y. + // Assert that the multiplicative inverses are correct + // by checking that (div1eNm * 5^N) % 1<<64 == 1. + var assert [1]struct{} + _ = assert[(div1e8m*5*5*5*5*5*5*5*5)%(1<<64)-1] + _ = assert[(div1e4m*5*5*5*5)%(1<<64)-1] + _ = assert[(div1e2m*5*5)%(1<<64)-1] + _ = assert[(div1e1m*5)%(1<<64)-1] + + // Cut 8 zeros, then 4, then 2, then 1. + p := 0 + for d := bits.RotateLeft64(x*div1e8m, -8); d <= div1e8le; d = bits.RotateLeft64(x*div1e8m, -8) { + x = d + p += 8 + } + if d := bits.RotateLeft64(x*div1e4m, -4); d <= div1e4le { + x = d + p += 4 + } + if d := bits.RotateLeft64(x*div1e2m, -2); d <= div1e2le { + x = d + p += 2 + } + if d := bits.RotateLeft64(x*div1e1m, -1); d <= div1e1le { + x = d + p += 1 + } + return x, p +} diff --git a/src/internal/strconv/math_test.go b/src/internal/strconv/math_test.go index d4f881b5e74d20..55e25f98cfee28 100644 --- a/src/internal/strconv/math_test.go +++ b/src/internal/strconv/math_test.go @@ -5,8 +5,8 @@ package strconv_test import ( - "math" . "internal/strconv" + "math" "testing" ) @@ -17,21 +17,36 @@ var pow10Tests = []struct { ok bool }{ {-349, uint128{0, 0}, 0, false}, - {-348, uint128{0xFA8FD5A0081C0288, 0x1732C869CD60E453}, -1157, true}, - {0, uint128{0x8000000000000000, 0x0000000000000000}, 0, true}, - {347, uint128{0xD13EB46469447567, 0x4B7195F2D2D1A9FB}, 1152, true}, + {-348, uint128{0xFA8FD5A0081C0288, 0x1732C869CD60E453}, -1156, true}, + {0, uint128{0x8000000000000000, 0x0000000000000000}, 1, true}, + {347, uint128{0xD13EB46469447567, 0x4B7195F2D2D1A9FB}, 1153, true}, {348, uint128{0, 0}, 0, false}, } func TestPow10(t *testing.T) { for _, tt := range pow10Tests { - mant, exp2, ok := Pow10(tt.exp10) + mant, exp2, ok := pow10(tt.exp10) if mant != tt.mant || exp2 != tt.exp2 { t.Errorf("pow10(%d) = %#016x, %#016x, %d, %v want %#016x,%#016x, %d, %v", tt.exp10, mant.Hi, mant.Lo, exp2, ok, tt.mant.Hi, tt.mant.Lo, tt.exp2, tt.ok) } } + + for p := pow10Min; p <= pow10Max; p++ { + mant, exp2, ok := pow10(p) + if !ok { + t.Errorf("pow10(%d) not ok", p) + continue + } + // Note: -64 instead of -128 because we only used mant.Hi, not all of mant. + have := math.Ldexp(float64(mant.Hi), exp2-64) + want := math.Pow(10, float64(p)) + if math.Abs(have-want)/want > 0.00001 { + t.Errorf("pow10(%d) = %#016x%016x/2^128 * 2^%d = %g want ~%g", p, mant.Hi, mant.Lo, exp2, have, want) + } + } + } func u128(hi, lo uint64) uint128 { @@ -78,3 +93,73 @@ func TestMulLog2_10(t *testing.T) { } } } + +func pow5(p int) uint64 { + x := uint64(1) + for range p { + x *= 5 + } + return x +} + +func TestDivisiblePow5(t *testing.T) { + for p := 1; p <= 22; p++ { + x := pow5(p) + if divisiblePow5(1, p) { + t.Errorf("divisiblePow5(1, %d) = true, want, false", p) + } + if divisiblePow5(x-1, p) { + t.Errorf("divisiblePow5(%d, %d) = true, want false", x-1, p) + } + if divisiblePow5(x+1, p) { + t.Errorf("divisiblePow5(%d, %d) = true, want false", x-1, p) + } + if divisiblePow5(x/5, p) { + t.Errorf("divisiblePow5(%d, %d) = true, want false", x/5, p) + } + if !divisiblePow5(0, p) { + t.Errorf("divisiblePow5(0, %d) = false, want true", p) + } + if !divisiblePow5(x, p) { + t.Errorf("divisiblePow5(%d, %d) = false, want true", x, p) + } + if 2*x > x && !divisiblePow5(2*x, p) { + t.Errorf("divisiblePow5(%d, %d) = false, want true", 2*x, p) + } + } +} + +func TestDiv5Tab(t *testing.T) { + for p := 1; p <= 22; p++ { + m := div5Tab[p-1][0] + le := div5Tab[p-1][1] + + // See comment in math.go on div5Tab. + // m needs to be multiplicative inverse of pow5(p). + if m*pow5(p) != 1 { + t.Errorf("pow5Tab[%d-1][0] = %#x, but %#x * (5**%d) = %d, want 1", p, m, m, p, m*pow5(p)) + } + + // le needs to be ⌊(1<<64 - 1) / 5^p⌋. + want := (1<<64 - 1) / pow5(p) + if le != want { + t.Errorf("pow5Tab[%d-1][1] = %#x, want %#x", p, le, want) + } + } +} + +func TestTrimZeros(t *testing.T) { + for _, x := range []uint64{1, 2, 3, 4, 101, 123} { + want := x + for p := range 20 { + haveX, haveP := trimZeros(x) + if haveX != want || haveP != p { + t.Errorf("trimZeros(%d) = %d, %d, want %d, %d", x, haveX, haveP, want, p) + } + if x >= (1<<64-1)/10 { + break + } + x *= 10 + } + } +} diff --git a/src/internal/syscall/windows/at_windows.go b/src/internal/syscall/windows/at_windows.go index 2890e1fdcfc511..b7ca8433c2a87a 100644 --- a/src/internal/syscall/windows/at_windows.go +++ b/src/internal/syscall/windows/at_windows.go @@ -131,6 +131,14 @@ func Openat(dirfd syscall.Handle, name string, flag uint64, perm uint32) (_ sysc if flag&syscall.O_TRUNC != 0 { err = syscall.Ftruncate(h, 0) + if err == ERROR_INVALID_PARAMETER { + // ERROR_INVALID_PARAMETER means truncation is not supported on this file handle. + // Unix's O_TRUNC specification says to ignore O_TRUNC on named pipes and terminal devices. + // We do the same here. + if t, err1 := syscall.GetFileType(h); err1 == nil && (t == syscall.FILE_TYPE_PIPE || t == syscall.FILE_TYPE_CHAR) { + err = nil + } + } if err != nil { syscall.CloseHandle(h) return syscall.InvalidHandle, err diff --git a/src/internal/syscall/windows/syscall_windows.go b/src/internal/syscall/windows/syscall_windows.go index deea3f35ca8d25..b908a2c2519219 100644 --- a/src/internal/syscall/windows/syscall_windows.go +++ b/src/internal/syscall/windows/syscall_windows.go @@ -531,7 +531,7 @@ const ( //sys GetOverlappedResult(handle syscall.Handle, overlapped *syscall.Overlapped, done *uint32, wait bool) (err error) //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys ReOpenFile(filehandle syscall.Handle, desiredAccess uint32, shareMode uint32, flagAndAttributes uint32) (handle syscall.Handle, err error) +//sys ReOpenFile(filehandle syscall.Handle, desiredAccess uint32, shareMode uint32, flagAndAttributes uint32) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] // NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and // other native functions. diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go index 64bb5041073946..d087fd46f6b27b 100644 --- a/src/internal/syscall/windows/zsyscall_windows.go +++ b/src/internal/syscall/windows/zsyscall_windows.go @@ -444,7 +444,7 @@ func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, func ReOpenFile(filehandle syscall.Handle, desiredAccess uint32, shareMode uint32, flagAndAttributes uint32) (handle syscall.Handle, err error) { r0, _, e1 := syscall.SyscallN(procReOpenFile.Addr(), uintptr(filehandle), uintptr(desiredAccess), uintptr(shareMode), uintptr(flagAndAttributes)) handle = syscall.Handle(r0) - if handle == 0 { + if handle == syscall.InvalidHandle { err = errnoErr(e1) } return diff --git a/src/os/os_windows_test.go b/src/os/os_windows_test.go index cd2413d26d4a53..3e7bddc791154c 100644 --- a/src/os/os_windows_test.go +++ b/src/os/os_windows_test.go @@ -2275,3 +2275,16 @@ func TestOpenFileFlagInvalid(t *testing.T) { } f.Close() } + +func TestOpenFileTruncateNamedPipe(t *testing.T) { + t.Parallel() + name := pipeName() + pipe := newBytePipe(t, name, false) + defer pipe.Close() + + f, err := os.OpenFile(name, os.O_TRUNC|os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + t.Fatal(err) + } + f.Close() +} diff --git a/src/os/root_windows_test.go b/src/os/root_windows_test.go index 8ae6f0c9d34d74..47643f98d103bd 100644 --- a/src/os/root_windows_test.go +++ b/src/os/root_windows_test.go @@ -228,3 +228,22 @@ func TestRootSymlinkToDirectory(t *testing.T) { }) } } + +func TestRootOpenFileTruncateNamedPipe(t *testing.T) { + t.Parallel() + name := pipeName() + pipe := newBytePipe(t, name, false) + defer pipe.Close() + + root, err := os.OpenRoot(filepath.Dir(name)) + if err != nil { + t.Fatal(err) + } + defer root.Close() + + f, err := root.OpenFile(filepath.Base(name), os.O_TRUNC|os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + t.Fatal(err) + } + f.Close() +} diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 3671c65ab73151..3a8c374fc0f497 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -382,7 +382,6 @@ func dumpgoroutine(gp *g) { dumpint(uint64(uintptr(unsafe.Pointer(d)))) dumpint(uint64(uintptr(unsafe.Pointer(gp)))) dumpint(uint64(d.sp)) - dumpint(uint64(d.pc)) fn := *(**funcval)(unsafe.Pointer(&d.fn)) dumpint(uint64(uintptr(unsafe.Pointer(fn)))) if d.fn == nil { diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index d745d5f5b95885..12d2d6e3317567 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1261,7 +1261,7 @@ func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.P //go:linkname pprof_goroutineLeakProfileWithLabels func pprof_goroutineLeakProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) { - return goroutineLeakProfileWithLabelsConcurrent(p, labels) + return goroutineLeakProfileWithLabels(p, labels) } // labels may be nil. If labels is non-nil, it must have the same length as p. @@ -1323,30 +1323,30 @@ func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, lab return work.goroutineLeak.count, false } - // Use the same semaphore as goroutineProfileWithLabelsConcurrent, - // because ultimately we still use goroutine profiles. - semacquire(&goroutineProfile.sema) - // Unlike in goroutineProfileWithLabelsConcurrent, we don't need to // save the current goroutine stack, because it is obviously not leaked. - + // We also do not need acquire any semaphores on goroutineProfile, because + // we don't use it for storage. pcbuf := makeProfStack() // see saveg() for explanation // Prepare a profile large enough to store all leaked goroutines. n = work.goroutineLeak.count if n > len(p) { - // There's not enough space in p to store the whole profile, so (per the - // contract of runtime.GoroutineProfile) we're not allowed to write to p - // at all and must return n, false. - semrelease(&goroutineProfile.sema) + // There's not enough space in p to store the whole profile, so + // we're not allowed to write to p at all and must return n, false. return n, false } // Visit each leaked goroutine and try to record its stack. + var offset int forEachGRace(func(gp1 *g) { - if readgstatus(gp1) == _Gleaked { - doRecordGoroutineProfile(gp1, pcbuf) + if readgstatus(gp1)&^_Gscan == _Gleaked { + systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &p[offset], pcbuf) }) + if labels != nil { + labels[offset] = gp1.labels + } + offset++ } }) @@ -1354,7 +1354,6 @@ func goroutineLeakProfileWithLabelsConcurrent(p []profilerecord.StackRecord, lab raceacquire(unsafe.Pointer(&labelSync)) } - semrelease(&goroutineProfile.sema) return n, true } diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 175452fec9c08b..ded85feffa12cb 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -354,7 +354,6 @@ func deferproc(fn func()) { d.link = gp._defer gp._defer = d d.fn = fn - d.pc = sys.GetCallerPC() // We must not be preempted between calling GetCallerSP and // storing it to d.sp because GetCallerSP's result is a // uintptr stack pointer. @@ -458,7 +457,6 @@ func deferrangefunc() any { d := newdefer() d.link = gp._defer gp._defer = d - d.pc = sys.GetCallerPC() // We must not be preempted between calling GetCallerSP and // storing it to d.sp because GetCallerSP's result is a // uintptr stack pointer. @@ -518,7 +516,6 @@ func deferconvert(d0 *_defer) { } for d1 := d; ; d1 = d1.link { d1.sp = d0.sp - d1.pc = d0.pc if d1.link == nil { d1.link = tail break @@ -547,17 +544,14 @@ func deferprocStack(d *_defer) { d.heap = false d.rangefunc = false d.sp = sys.GetCallerSP() - d.pc = sys.GetCallerPC() // The lines below implement: - // d.panic = nil - // d.fd = nil // d.link = gp._defer // d.head = nil // gp._defer = d - // But without write barriers. The first three are writes to + // But without write barriers. The first two are writes to // the stack so they don't need a write barrier, and furthermore // are to uninitialized memory, so they must not use a write barrier. - // The fourth write does not require a write barrier because we + // The third write does not require a write barrier because we // explicitly mark all the defer structures, so we don't need to // keep track of pointers to them with a write barrier. *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) @@ -977,8 +971,6 @@ func (p *_panic) nextDefer() (func(), bool) { fn := d.fn - p.retpc = d.pc - // Unlink and free. popDefer(gp) @@ -1018,6 +1010,12 @@ func (p *_panic) nextFrame() (ok bool) { // it's non-zero. if u.frame.sp == limit { + f := u.frame.fn + if f.deferreturn == 0 { + throw("no deferreturn") + } + p.retpc = f.entry() + uintptr(f.deferreturn) + break // found a frame with linked defers } @@ -1273,15 +1271,6 @@ func recovery(gp *g) { pc, sp, fp := p.retpc, uintptr(p.sp), uintptr(p.fp) p0, saveOpenDeferState := p, p.deferBitsPtr != nil && *p.deferBitsPtr != 0 - // The linker records the f-relative address of a call to deferreturn in f's funcInfo. - // Assuming a "normal" call to recover() inside one of f's deferred functions - // invoked for a panic, that is the desired PC for exiting f. - f := findfunc(pc) - if f.deferreturn == 0 { - throw("no deferreturn") - } - gotoPc := f.entry() + uintptr(f.deferreturn) - // Unwind the panic stack. for ; p != nil && uintptr(p.startSP) < sp; p = p.link { // Don't allow jumping past a pending Goexit. @@ -1304,7 +1293,7 @@ func recovery(gp *g) { // With how subtle defer handling is, this might not actually be // worthwhile though. if p.goexit { - gotoPc, sp = p.startPC, uintptr(p.startSP) + pc, sp = p.startPC, uintptr(p.startSP) saveOpenDeferState = false // goexit is unwinding the stack anyway break } @@ -1367,7 +1356,7 @@ func recovery(gp *g) { // branch directly to the deferreturn gp.sched.sp = sp - gp.sched.pc = gotoPc + gp.sched.pc = pc gp.sched.lr = 0 // Restore the bp on platforms that support frame pointers. // N.B. It's fine to not set anything for platforms that don't diff --git a/src/runtime/pinner.go b/src/runtime/pinner.go index 424dd065efd577..dad14a4d09c5e3 100644 --- a/src/runtime/pinner.go +++ b/src/runtime/pinner.go @@ -143,8 +143,8 @@ func isPinned(ptr unsafe.Pointer) bool { } // setPinned marks or unmarks a Go pointer as pinned, when the ptr is a Go pointer. -// It will be ignored while try to pin a non-Go pointer, -// and it will be panic while try to unpin a non-Go pointer, +// It will be ignored while trying to pin a non-Go pointer. +// It will panic while trying to unpin a non-Go pointer, // which should not happen in normal usage. func setPinned(ptr unsafe.Pointer, pin bool) bool { span := spanOfHeap(uintptr(ptr)) diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go index b524e992b8b209..23d3da7adc171f 100644 --- a/src/runtime/pprof/pprof.go +++ b/src/runtime/pprof/pprof.go @@ -228,6 +228,15 @@ var mutexProfile = &Profile{ write: writeMutex, } +// goroutineLeakProfileLock ensures that the goroutine leak profile writer observes the +// leaked goroutines discovered during the goroutine leak detection GC cycle +// that was triggered when the profile was requested. +// +// This is needed to prevent a race condition between the garbage collector +// and the goroutine leak profile writer when multiple profile requests are +// issued concurrently. +var goroutineLeakProfileLock sync.Mutex + func lockProfiles() { profiles.mu.Lock() if profiles.m == nil { @@ -763,6 +772,15 @@ func writeGoroutine(w io.Writer, debug int) error { // writeGoroutineLeak first invokes a GC cycle that performs goroutine leak detection. // It then writes the goroutine profile, filtering for leaked goroutines. func writeGoroutineLeak(w io.Writer, debug int) error { + // Acquire the goroutine leak detection lock and release + // it after the goroutine leak profile is written. + // + // While the critical section is long, this is needed to prevent + // a race condition between the garbage collector and the goroutine + // leak profile writer when multiple profile requests are issued concurrently. + goroutineLeakProfileLock.Lock() + defer goroutineLeakProfileLock.Unlock() + // Run the GC with leak detection first so that leaked goroutines // may transition to the leaked state. runtime_goroutineLeakGC() diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index b816833e52285f..496716a78e766f 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -774,7 +774,11 @@ func TestMorestack(t *testing.T) { for { go func() { growstack1() - c <- true + // NOTE(vsaioc): This goroutine may leak without this select. + select { + case c <- true: + case <-time.After(duration): + } }() select { case <-t: @@ -1565,6 +1569,210 @@ func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[strin return true } +func goroutineLeakExample() { + <-make(chan struct{}) + panic("unreachable") +} + +func TestGoroutineLeakProfileConcurrency(t *testing.T) { + const leakCount = 3 + + testenv.MustHaveParallelism(t) + regexLeakCount := regexp.MustCompile("goroutineleak profile: total ") + whiteSpace := regexp.MustCompile("\\s+") + + // Regular goroutine profile. Used to check that there is no interference between + // the two profile types. + goroutineProf := Lookup("goroutine") + goroutineLeakProf := goroutineLeakProfile + + // Check that a profile with debug information contains + includesLeak := func(t *testing.T, name, s string) { + if !strings.Contains(s, "runtime/pprof.goroutineLeakExample") { + t.Errorf("%s profile does not contain expected leaked goroutine (runtime/pprof.goroutineLeakExample): %s", name, s) + } + } + + checkFrame := func(i int, j int, locations []*profile.Location, expectedFunctionName string) { + if len(locations) <= i { + t.Errorf("leaked goroutine stack locations out of range at %d of %d", i+1, len(locations)) + return + } + location := locations[i] + if len(location.Line) <= j { + t.Errorf("leaked goroutine stack location lines out of range at %d of %d", j+1, len(location.Line)) + return + } + if location.Line[j].Function.Name != expectedFunctionName { + t.Errorf("leaked goroutine stack expected %s as the location[%d].Line[%d] but found %s (%s:%d)", expectedFunctionName, i, j, location.Line[j].Function.Name, location.Line[j].Function.Filename, location.Line[j].Line) + } + } + + // We use this helper to count the total number of leaked goroutines in the profile. + // + // NOTE(vsaioc): This value should match for the number of leaks produced in this test, + // but other tests could also leak goroutines, in which case we would have a mismatch + // when bulk-running tests. + // + // The two mismatching outcomes are therefore: + // - More leaks than expected, which is a correctness issue with other tests. + // In this case, this test effectively checks other tests wrt + // goroutine leaks during bulk executions (e.g., running all.bash). + // + // - Fewer leaks than expected; this is an unfortunate symptom of scheduling + // non-determinism, which may occur once in a blue moon. We make + // a best-effort attempt to allow the expected leaks to occur, by yielding + // the main thread, but it is never a guarantee. + countLeaks := func(t *testing.T, number int, s string) { + // Strip the profile header + parts := regexLeakCount.Split(s, -1) + if len(parts) < 2 { + t.Fatalf("goroutineleak profile does not contain 'goroutineleak profile: total ': %s\nparts: %v", s, parts) + return + } + + parts = whiteSpace.Split(parts[1], -1) + + count, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + t.Fatalf("goroutineleak profile count is not a number: %s\nerror: %v", s, err) + } + + // Check that the total number of leaked goroutines is exactly the expected number. + if count != int64(number) { + t.Errorf("goroutineleak profile does not contain exactly %d leaked goroutines: %d", number, count) + } + } + + checkLeakStack := func(t *testing.T) func(pc uintptr, locations []*profile.Location, _ map[string][]string) { + return func(pc uintptr, locations []*profile.Location, _ map[string][]string) { + if pc != leakCount { + t.Errorf("expected %d leaked goroutines with specific stack configurations, but found %d", leakCount, pc) + return + } + switch len(locations) { + case 4: + // We expect a receive operation. This is the typical stack. + checkFrame(0, 0, locations, "runtime.gopark") + checkFrame(1, 0, locations, "runtime.chanrecv") + checkFrame(2, 0, locations, "runtime.chanrecv1") + switch len(locations[3].Line) { + case 2: + // Running `go func() { goroutineLeakExample() }()` will produce a stack with 2 lines. + // The anonymous function will have the call to goroutineLeakExample inlined. + checkFrame(3, 1, locations, "runtime/pprof.TestGoroutineLeakProfileConcurrency.func5") + fallthrough + case 1: + // Running `go goroutineLeakExample()` will produce a stack with 1 line. + checkFrame(3, 0, locations, "runtime/pprof.goroutineLeakExample") + default: + t.Errorf("leaked goroutine stack location expected 1 or 2 lines in the 4th location but found %d", len(locations[3].Line)) + return + } + default: + message := fmt.Sprintf("leaked goroutine stack expected 4 or 5 locations but found %d", len(locations)) + for _, location := range locations { + for _, line := range location.Line { + message += fmt.Sprintf("\n%s:%d", line.Function.Name, line.Line) + } + } + t.Errorf("%s", message) + } + } + } + // Leak some goroutines that will feature in the goroutine leak profile + for i := 0; i < leakCount; i++ { + go goroutineLeakExample() + go func() { + // Leak another goroutine that will feature a slightly different stack. + // This includes the frame runtime/pprof.TestGoroutineLeakProfileConcurrency.func1. + goroutineLeakExample() + panic("unreachable") + }() + // Yield several times to allow the goroutines to leak. + runtime.Gosched() + runtime.Gosched() + } + + // Give all goroutines a chance to leak. + time.Sleep(time.Second) + + t.Run("profile contains leak", func(t *testing.T) { + var w strings.Builder + goroutineLeakProf.WriteTo(&w, 0) + parseProfile(t, []byte(w.String()), checkLeakStack(t)) + }) + + t.Run("leak persists between sequential profiling runs", func(t *testing.T) { + for i := 0; i < 2; i++ { + var w strings.Builder + goroutineLeakProf.WriteTo(&w, 0) + parseProfile(t, []byte(w.String()), checkLeakStack(t)) + } + }) + + // Concurrent calls to the goroutine leak profiler should not trigger data races + // or corruption. + t.Run("overlapping profile requests", func(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) { + go func() { + defer wg.Done() + for ctx.Err() == nil { + var w strings.Builder + goroutineLeakProf.WriteTo(&w, 1) + countLeaks(t, 2*leakCount, w.String()) + includesLeak(t, "goroutineleak", w.String()) + } + }() + }) + } + wg.Wait() + }) + + // Concurrent calls to the goroutine leak profiler should not trigger data races + // or corruption, or interfere with regular goroutine profiles. + t.Run("overlapping goroutine and goroutine leak profile requests", func(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(2) + Do(ctx, Labels("i", fmt.Sprint(i)), func(context.Context) { + go func() { + defer wg.Done() + for ctx.Err() == nil { + var w strings.Builder + goroutineLeakProf.WriteTo(&w, 1) + countLeaks(t, 2*leakCount, w.String()) + includesLeak(t, "goroutineleak", w.String()) + } + }() + go func() { + defer wg.Done() + for ctx.Err() == nil { + var w strings.Builder + goroutineProf.WriteTo(&w, 1) + // The regular goroutine profile should see the leaked + // goroutines. We simply check that the goroutine leak + // profile does not corrupt the goroutine profile state. + includesLeak(t, "goroutine", w.String()) + } + }() + }) + } + wg.Wait() + }) +} + func TestGoroutineProfileConcurrency(t *testing.T) { testenv.MustHaveParallelism(t) diff --git a/src/runtime/print.go b/src/runtime/print.go index e32ecb94503e63..c01db9d7f98689 100644 --- a/src/runtime/print.go +++ b/src/runtime/print.go @@ -140,13 +140,32 @@ func printcomplex64(c complex64) { } func printuint(v uint64) { + // Note: Avoiding strconv.AppendUint so that it's clearer + // that there are no allocations in this routine. + // cmd/link/internal/ld.TestAbstractOriginSanity + // sees the append and doesn't realize it doesn't allocate. var buf [20]byte - gwrite(strconv.AppendUint(buf[:0], v, 10)) + i := strconv.RuntimeFormatBase10(buf[:], v) + gwrite(buf[i:]) } func printint(v int64) { + // Note: Avoiding strconv.AppendUint so that it's clearer + // that there are no allocations in this routine. + // cmd/link/internal/ld.TestAbstractOriginSanity + // sees the append and doesn't realize it doesn't allocate. + neg := v < 0 + u := uint64(v) + if neg { + u = -u + } var buf [20]byte - gwrite(strconv.AppendInt(buf[:0], v, 10)) + i := strconv.RuntimeFormatBase10(buf[:], u) + if neg { + i-- + buf[i] = '-' + } + gwrite(buf[i:]) } var minhexdigits = 0 // protected by printlock diff --git a/src/runtime/proc.go b/src/runtime/proc.go index ef3a0b7a0e4c60..91740d1fa6d58f 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -1254,8 +1254,8 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool { } } print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") - throw("castogscanstatus") - panic("not reached") + throw("bad oldval passed to castogscanstatus") + return false } // casgstatusAlwaysTrack is a debug flag that causes casgstatus to always track diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index b346337d60354c..1deeb1244caead 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -1009,7 +1009,7 @@ const ( type _func struct { sys.NotInHeap // Only in static data - entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart + entryOff uint32 // start pc, as offset from moduledata.text nameOff int32 // function name, as index into moduledata.funcnametab. args int32 // in/out args size @@ -1090,7 +1090,6 @@ type _defer struct { heap bool rangefunc bool // true for rangefunc list sp uintptr // sp at time of defer - pc uintptr // pc at time of defer fn func() // can be nil for open-coded defers link *_defer // next defer on G; can point to either heap or stack! diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index 62ad8d13611150..3a814cd2032ea1 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -374,13 +374,19 @@ func (f *_func) funcInfo() funcInfo { // pcHeader holds data used by the pclntab lookups. type pcHeader struct { - magic uint32 // 0xFFFFFFF1 - pad1, pad2 uint8 // 0,0 - minLC uint8 // min instruction size - ptrSize uint8 // size of a ptr in bytes - nfunc int // number of functions in the module - nfiles uint // number of entries in the file tab - textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text + magic uint32 // 0xFFFFFFF1 + pad1, pad2 uint8 // 0,0 + minLC uint8 // min instruction size + ptrSize uint8 // size of a ptr in bytes + nfunc int // number of functions in the module + nfiles uint // number of entries in the file tab + + // The next field used to be textStart. This is no longer stored + // as it requires a relocation. Code should use the moduledata text + // field instead. This unused field can be removed in coordination + // with Delve. + _ uintptr + funcnameOffset uintptr // offset to the funcnametab variable from pcHeader cuOffset uintptr // offset to the cutab variable from pcHeader filetabOffset uintptr // offset to the filetab variable from pcHeader @@ -618,10 +624,9 @@ func moduledataverify1(datap *moduledata) { // Check that the pclntab's format is valid. hdr := datap.pcHeader if hdr.magic != 0xfffffff1 || hdr.pad1 != 0 || hdr.pad2 != 0 || - hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize || hdr.textStart != datap.text { + hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize { println("runtime: pcHeader: magic=", hex(hdr.magic), "pad1=", hdr.pad1, "pad2=", hdr.pad2, - "minLC=", hdr.minLC, "ptrSize=", hdr.ptrSize, "pcHeader.textStart=", hex(hdr.textStart), - "text=", hex(datap.text), "pluginpath=", datap.pluginpath) + "minLC=", hdr.minLC, "ptrSize=", hdr.ptrSize, "pluginpath=", datap.pluginpath) throw("invalid function symbol table") } diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index de62762ba76906..6649f72471629a 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -1314,7 +1314,16 @@ func tracebacksomeothers(me *g, showf func(*g) bool) { // from a signal handler initiated during a systemstack call. // The original G is still in the running state, and we want to // print its stack. - if gp.m != getg().m && readgstatus(gp)&^_Gscan == _Grunning { + // + // There's a small window of time in exitsyscall where a goroutine could be + // in _Grunning as it's exiting a syscall. This could be the case even if the + // world is stopped or frozen. + // + // This is OK because the goroutine will not exit the syscall while the world + // is stopped or frozen. This is also why it's safe to check syscallsp here, + // and safe to take the goroutine's stack trace. The syscall path mutates + // syscallsp only just before exiting the syscall. + if gp.m != getg().m && readgstatus(gp)&^_Gscan == _Grunning && gp.syscallsp == 0 { print("\tgoroutine running on other thread; stack unavailable\n") printcreatedby(gp) } else { diff --git a/src/strings/strings_test.go b/src/strings/strings_test.go index b10b5f05ccae53..edfeb0e8138b2f 100644 --- a/src/strings/strings_test.go +++ b/src/strings/strings_test.go @@ -694,7 +694,7 @@ func rot13(r rune) rune { func TestMap(t *testing.T) { // Run a couple of awful growth/shrinkage tests a := tenRunes('a') - // 1. Grow. This triggers two reallocations in Map. + // 1. Grow. This triggers two reallocations in Map. maxRune := func(rune) rune { return unicode.MaxRune } m := Map(maxRune, a) expect := tenRunes(unicode.MaxRune) diff --git a/src/syscall/syscall_windows.go b/src/syscall/syscall_windows.go index 817eeb68113495..3e63897b6bc3a3 100644 --- a/src/syscall/syscall_windows.go +++ b/src/syscall/syscall_windows.go @@ -468,6 +468,14 @@ func Open(name string, flag int, perm uint32) (fd Handle, err error) { if flag&O_TRUNC == O_TRUNC && (createmode == OPEN_EXISTING || (createmode == OPEN_ALWAYS && err == ERROR_ALREADY_EXISTS)) { err = Ftruncate(h, 0) + if err == _ERROR_INVALID_PARAMETER { + // ERROR_INVALID_PARAMETER means truncation is not supported on this file handle. + // Unix's O_TRUNC specification says to ignore O_TRUNC on named pipes and terminal devices. + // We do the same here. + if t, err1 := GetFileType(h); err1 == nil && (t == FILE_TYPE_PIPE || t == FILE_TYPE_CHAR) { + err = nil + } + } if err != nil { CloseHandle(h) return InvalidHandle, err diff --git a/src/syscall/types_windows.go b/src/syscall/types_windows.go index b40b455e7de9b2..3c6d18a8509e97 100644 --- a/src/syscall/types_windows.go +++ b/src/syscall/types_windows.go @@ -34,6 +34,10 @@ const ( WSAECONNRESET Errno = 10054 ) +const ( + _ERROR_INVALID_PARAMETER Errno = 87 +) + const ( // Invented values to support what package os expects. O_RDONLY = 0x00000 diff --git a/test/codegen/divmod.go b/test/codegen/divmod.go index 98d0852398c437..9de091af7a022c 100644 --- a/test/codegen/divmod.go +++ b/test/codegen/divmod.go @@ -124,6 +124,7 @@ func div7_int8(i int8) int8 { // arm64: "MULW" // arm64: "SBFX [$]10, R[0-9]+, [$]22," // arm64: "SUB R[0-9]+->31," + // wasm: "I64Const [$]147" return i / 7 } @@ -136,6 +137,7 @@ func div7_int16(i int16) int16 { // arm64: "MULW" // arm64: "SBFX [$]18, R[0-9]+, [$]14," // arm64: "SUB R[0-9]+->31," + // wasm: "I64Const [$]37450" return i / 7 } @@ -145,6 +147,7 @@ func div7_int32(i int32) int32 { // arm64: "MUL " // arm64: "ASR [$]34," // arm64: "SUB R[0-9]+->63," + // wasm: "I64Const [$]2454267027" return i / 7 } @@ -160,6 +163,7 @@ func div9_int32(i int32) int32 { // arm64: "MUL " // arm64: "ASR [$]35," // arm64: "SUB R[0-9]+->63," + // wasm: "I64Const [$]3817748708" return i / 9 } @@ -170,6 +174,8 @@ func div7_int64(i int64) int64 { // arm64: "SMULH" // arm64: "ASR [$]1," // arm64: "SUB R[0-9]+->63," + // wasm: "I64Const [$]613566757" + // wasm: "I64Const [$]1227133513" return i / 7 } @@ -185,6 +191,7 @@ func div3_int32(i int32) int32 { // arm64: "MUL" // arm64: "ASR [$]33," // arm64: "SUB R[0-9]+->63," + // wasm: "I64Const [$]2863311531" return i / 3 } @@ -195,6 +202,8 @@ func div3_int64(i int64) int64 { // arm64: "ADD" // arm64: "ASR [$]1," // arm64: "SUB R[0-9]+->63," + // wasm: "I64Const [$]-1431655766" + // wasm: "I64Const [$]2863311531" return i / 3 } @@ -211,6 +220,8 @@ func div7_int16u(i int16) int16 { // arm64: "MULW" // arm64: "UBFX [$]18, R[0-9]+, [$]14," // arm64: -"SUB" + // wasm: "I64Const [$]37450" + // wasm -"I64Sub" return i / 7 } @@ -226,6 +237,8 @@ func div7_int32u(i int32) int32 { // arm64: "MUL" // arm64: "LSR [$]34," // arm64: -"SUB" + // wasm: "I64Const [$]2454267027" + // wasm -"I64Sub" return i / 7 } @@ -238,6 +251,9 @@ func div7_int64u(i int64) int64 { // arm64: "UMULH" // arm64: "LSR [$]2," // arm64: -"SUB" + // wasm: "I64Const [$]1227133514" + // wasm: "I64Const [$]2454267026" + // wasm -"I64Sub" return i / 7 } @@ -249,6 +265,7 @@ func div7_uint8(i uint8) uint8 { // arm64: "MOVD [$]293," // arm64: "MULW" // arm64: "UBFX [$]11, R[0-9]+, [$]21," + // wasm: "I64Const [$]293" return i / 7 } @@ -257,6 +274,7 @@ func div7_uint16(i uint16) uint16 { // arm64: "MOVD [$]74899," // arm64: "MUL" // arm64: "LSR [$]19," + // wasm: "I64Const [$]74899" return i / 7 } @@ -267,6 +285,7 @@ func div3_uint16(i uint16) uint16 { // arm64: "MOVD [$]87382," // arm64: "MUL" // arm64: "LSR [$]18," + // wasm: "I64Const [$]87382" return i / 3 } @@ -275,6 +294,7 @@ func div3_uint32(i uint32) uint32 { // arm64: "MOVD [$]2863311531," // arm64: "MUL" // arm64: "LSR [$]33," + // wasm: "I64Const [$]2863311531" return i / 3 } @@ -286,6 +306,8 @@ func div3_uint64(i uint64) uint64 { // arm64: "MOVD [$]-6148914691236517205," // arm64: "UMULH" // arm64: "LSR [$]1," + // wasm: "I64Const [$]2863311530" + // wasm: "I64Const [$]2863311531" return i / 3 } @@ -307,6 +329,7 @@ func div14_uint32(i uint32) uint32 { // arm64: "MOVD [$]2454267027," // arm64: "MUL" // arm64: "LSR [$]34," + // wasm: "I64Const [$]2454267027" return i / 14 } @@ -318,6 +341,8 @@ func div14_uint64(i uint64) uint64 { // arm64: "MOVD [$]-7905747460161236406," // arm64: "UMULH" // arm64: "LSR [$]2," + // wasm: "I64Const [$]1227133514" + // wasm: "I64Const [$]2454267026" return i / 14 } @@ -345,6 +370,7 @@ func div7_uint32(i uint32) uint32 { // arm64: "SUB" // arm64: "ADD R[0-9]+>>1," // arm64: "LSR [$]34," + // wasm: "I64Const [$]613566757" return i / 7 } @@ -358,6 +384,8 @@ func div7_uint64(i uint64) uint64 { // arm64: "SUB", // arm64: "ADD R[0-9]+>>1," // arm64: "LSR [$]2," + // wasm: "I64Const [$]613566756" + // wasm: "I64Const [$]2454267027" return i / 7 } @@ -370,6 +398,8 @@ func div12345_uint64(i uint64) uint64 { // arm64: "MOVD [$]-6205696892516465602," // arm64: "UMULH" // arm64: "LSR [$]13," + // wasm: "I64Const [$]835683390" + // wasm: "I64Const [$]2850090894" return i / 12345 } @@ -480,7 +510,7 @@ func div_divis32_uint8(i uint8) (uint8, bool) { // arm64: "UBFX [$]5, R[0-9]+, [$]3" // arm64: "TSTW [$]31," // arm64: "CSET EQ" - return i/32, i%32 == 0 + return i / 32, i%32 == 0 } func div_ndivis32_uint8(i uint8) (uint8, bool) { @@ -490,7 +520,7 @@ func div_ndivis32_uint8(i uint8) (uint8, bool) { // arm64: "UBFX [$]5, R[0-9]+, [$]3" // arm64: "TSTW [$]31," // arm64: "CSET NE" - return i/32, i%32 != 0 + return i / 32, i%32 != 0 } func div_divis32_uint16(i uint16) (uint16, bool) { @@ -500,7 +530,7 @@ func div_divis32_uint16(i uint16) (uint16, bool) { // arm64: "UBFX [$]5, R[0-9]+, [$]11" // arm64: "TSTW [$]31," // arm64: "CSET EQ" - return i/32, i%32 == 0 + return i / 32, i%32 == 0 } func div_ndivis32_uint16(i uint16) (uint16, bool) { @@ -510,7 +540,7 @@ func div_ndivis32_uint16(i uint16) (uint16, bool) { // arm64: "UBFX [$]5, R[0-9]+, [$]11," // arm64: "TSTW [$]31," // arm64: "CSET NE" - return i/32, i%32 != 0 + return i / 32, i%32 != 0 } func div_divis32_uint32(i uint32) (uint32, bool) { @@ -520,7 +550,7 @@ func div_divis32_uint32(i uint32) (uint32, bool) { // arm64: "UBFX [$]5, R[0-9]+, [$]27," // arm64: "TSTW [$]31," // arm64: "CSET EQ" - return i/32, i%32 == 0 + return i / 32, i%32 == 0 } func div_ndivis32_uint32(i uint32) (uint32, bool) { @@ -530,7 +560,7 @@ func div_ndivis32_uint32(i uint32) (uint32, bool) { // arm64: "UBFX [$]5, R[0-9]+, [$]27," // arm64: "TSTW [$]31," // arm64: "CSET NE" - return i/32, i%32 != 0 + return i / 32, i%32 != 0 } func div_divis32_uint64(i uint64) (uint64, bool) { @@ -541,7 +571,7 @@ func div_divis32_uint64(i uint64) (uint64, bool) { // arm64: "LSR [$]5," // arm64: "TST [$]31," // arm64: "CSET EQ" - return i/32, i%32 == 0 + return i / 32, i%32 == 0 } func div_ndivis32_uint64(i uint64) (uint64, bool) { @@ -552,7 +582,7 @@ func div_ndivis32_uint64(i uint64) (uint64, bool) { // arm64: "LSR [$]5," // arm64: "TST [$]31," // arm64: "CSET NE" - return i/32, i%32 != 0 + return i / 32, i%32 != 0 } func div_divis32_int8(i int8) (int8, bool) { @@ -566,7 +596,7 @@ func div_divis32_int8(i int8) (int8, bool) { // arm64: "SBFX [$]5, R[0-9]+, [$]3," // arm64: "TSTW [$]31," // arm64: "CSET EQ" - return i/32, i%32 == 0 + return i / 32, i%32 == 0 } func div_ndivis32_int8(i int8) (int8, bool) { @@ -580,7 +610,7 @@ func div_ndivis32_int8(i int8) (int8, bool) { // arm64: "SBFX [$]5, R[0-9]+, [$]3," // arm64: "TSTW [$]31," // arm64: "CSET NE" - return i/32, i%32 != 0 + return i / 32, i%32 != 0 } func div_divis32_int16(i int16) (int16, bool) { @@ -594,7 +624,7 @@ func div_divis32_int16(i int16) (int16, bool) { // arm64: "SBFX [$]5, R[0-9]+, [$]11," // arm64: "TSTW [$]31," // arm64: "CSET EQ" - return i/32, i%32 == 0 + return i / 32, i%32 == 0 } func div_ndivis32_int16(i int16) (int16, bool) { @@ -608,7 +638,7 @@ func div_ndivis32_int16(i int16) (int16, bool) { // arm64: "SBFX [$]5, R[0-9]+, [$]11," // arm64: "TSTW [$]31," // arm64: "CSET NE" - return i/32, i%32 != 0 + return i / 32, i%32 != 0 } func div_divis32_int32(i int32) (int32, bool) { @@ -622,7 +652,7 @@ func div_divis32_int32(i int32) (int32, bool) { // arm64: "SBFX [$]5, R[0-9]+, [$]27," // arm64: "TSTW [$]31," // arm64: "CSET EQ" - return i/32, i%32 == 0 + return i / 32, i%32 == 0 } func div_ndivis32_int32(i int32) (int32, bool) { @@ -636,7 +666,7 @@ func div_ndivis32_int32(i int32) (int32, bool) { // arm64: "SBFX [$]5, R[0-9]+, [$]27," // arm64: "TSTW [$]31," // arm64: "CSET NE" - return i/32, i%32 != 0 + return i / 32, i%32 != 0 } func div_divis32_int64(i int64) (int64, bool) { @@ -651,7 +681,7 @@ func div_divis32_int64(i int64) (int64, bool) { // arm64: "ASR [$]5," // arm64: "TST [$]31," // arm64: "CSET EQ" - return i/32, i%32 == 0 + return i / 32, i%32 == 0 } func div_ndivis32_int64(i int64) (int64, bool) { @@ -666,7 +696,7 @@ func div_ndivis32_int64(i int64) (int64, bool) { // arm64: "ASR [$]5," // arm64: "TST [$]31," // arm64: "CSET NE" - return i/32, i%32 != 0 + return i / 32, i%32 != 0 } // Divisibility and non-divisibility by non-power-of-two. @@ -923,7 +953,7 @@ func div_divis6_uint8(i uint8) (uint8, bool) { // arm64: "UBFX [$]11, R[0-9]+, [$]21," // arm64: "CSET EQ" // arm64: -"RO[RL]" - return i/6, i%6 == 0 + return i / 6, i%6 == 0 } func div_ndivis6_uint8(i uint8) (uint8, bool) { @@ -936,7 +966,7 @@ func div_ndivis6_uint8(i uint8) (uint8, bool) { // arm64: "UBFX [$]11, R[0-9]+, [$]21," // arm64: "CSET NE" // arm64: -"RO[RL]" - return i/6, i%6 != 0 + return i / 6, i%6 != 0 } func div_divis6_uint16(i uint16) (uint16, bool) { @@ -950,7 +980,7 @@ func div_divis6_uint16(i uint16) (uint16, bool) { // arm64: "LSR [$]19," // arm64: "CSET EQ" // arm64: -"RO[RL]" - return i/6, i%6 == 0 + return i / 6, i%6 == 0 } func div_ndivis6_uint16(i uint16) (uint16, bool) { @@ -964,7 +994,7 @@ func div_ndivis6_uint16(i uint16) (uint16, bool) { // arm64: "LSR [$]19," // arm64: "CSET NE" // arm64: -"RO[RL]" - return i/6, i%6 != 0 + return i / 6, i%6 != 0 } func div_divis6_uint32(i uint32) (uint32, bool) { @@ -978,7 +1008,7 @@ func div_divis6_uint32(i uint32) (uint32, bool) { // arm64: "LSR [$]34," // arm64: "CSET EQ" // arm64: -"RO[RL]" - return i/6, i%6 == 0 + return i / 6, i%6 == 0 } func div_ndivis6_uint32(i uint32) (uint32, bool) { @@ -992,7 +1022,7 @@ func div_ndivis6_uint32(i uint32) (uint32, bool) { // arm64: "LSR [$]34," // arm64: "CSET NE" // arm64: -"RO[RL]" - return i/6, i%6 != 0 + return i / 6, i%6 != 0 } func div_divis6_uint64(i uint64) (uint64, bool) { @@ -1009,7 +1039,7 @@ func div_divis6_uint64(i uint64) (uint64, bool) { // arm64: "LSR [$]2," // arm64: "CSET EQ" // arm64: -"RO[RL]" - return i/6, i%6 == 0 + return i / 6, i%6 == 0 } func div_ndivis6_uint64(i uint64) (uint64, bool) { @@ -1026,7 +1056,7 @@ func div_ndivis6_uint64(i uint64) (uint64, bool) { // arm64: "LSR [$]2," // arm64: "CSET NE" // arm64: -"RO[RL]" - return i/6, i%6 != 0 + return i / 6, i%6 != 0 } func div_divis6_int8(i int8) (int8, bool) { @@ -1042,7 +1072,7 @@ func div_divis6_int8(i int8) (int8, bool) { // arm64: "SUB R[0-9]+->31," // arm64: "CSET EQ" // arm64: -"RO[RL]" - return i/6, i%6 == 0 + return i / 6, i%6 == 0 } func div_ndivis6_int8(i int8) (int8, bool) { @@ -1058,7 +1088,7 @@ func div_ndivis6_int8(i int8) (int8, bool) { // arm64: "SUB R[0-9]+->31," // arm64: "CSET NE" // arm64: -"RO[RL]" - return i/6, i%6 != 0 + return i / 6, i%6 != 0 } func div_divis6_int16(i int16) (int16, bool) { @@ -1074,7 +1104,7 @@ func div_divis6_int16(i int16) (int16, bool) { // arm64: "SUB R[0-9]+->31," // arm64: "CSET EQ" // arm64: -"RO[RL]" - return i/6, i%6 == 0 + return i / 6, i%6 == 0 } func div_ndivis6_int16(i int16) (int16, bool) { @@ -1090,7 +1120,7 @@ func div_ndivis6_int16(i int16) (int16, bool) { // arm64: "SUB R[0-9]+->31," // arm64: "CSET NE" // arm64: -"RO[RL]" - return i/6, i%6 != 0 + return i / 6, i%6 != 0 } func div_divis6_int32(i int32) (int32, bool) { @@ -1107,7 +1137,7 @@ func div_divis6_int32(i int32) (int32, bool) { // arm64: "SUB R[0-9]+->63," // arm64: "CSET EQ" // arm64: -"RO[RL]" - return i/6, i%6 == 0 + return i / 6, i%6 == 0 } func div_ndivis6_int32(i int32) (int32, bool) { @@ -1124,7 +1154,7 @@ func div_ndivis6_int32(i int32) (int32, bool) { // arm64: "SUB R[0-9]+->63," // arm64: "CSET NE" // arm64: -"RO[RL]" - return i/6, i%6 != 0 + return i / 6, i%6 != 0 } func div_divis6_int64(i int64) (int64, bool) { @@ -1145,7 +1175,7 @@ func div_divis6_int64(i int64) (int64, bool) { // arm64: "SUB R[0-9]+->63," // arm64: "CSET EQ" // arm64: -"RO[RL]" - return i/6, i%6 == 0 + return i / 6, i%6 == 0 } func div_ndivis6_int64(i int64) (int64, bool) { @@ -1166,5 +1196,5 @@ func div_ndivis6_int64(i int64) (int64, bool) { // arm64: "SUB R[0-9]+->63," // arm64: "CSET NE" // arm64: -"RO[RL]" - return i/6, i%6 != 0 + return i / 6, i%6 != 0 } diff --git a/test/codegen/maps.go b/test/codegen/maps.go index fe38c99cb8f007..48438eb90c6bae 100644 --- a/test/codegen/maps.go +++ b/test/codegen/maps.go @@ -37,6 +37,28 @@ func AccessString2(m map[string]int) bool { return ok } +func AccessStringIntArray2(m map[string][16]int, k string) bool { + // amd64:-"MOVUPS" + _, ok := m[k] + return ok +} + +type Struct struct { + A, B, C, D, E, F, G, H, I, J int +} + +func AccessStringStruct2(m map[string]Struct, k string) bool { + // amd64:-"MOVUPS" + _, ok := m[k] + return ok +} + +func AccessIntArrayLarge2(m map[int][512]int, k int) bool { + // amd64:-"REP",-"MOVSQ" + _, ok := m[k] + return ok +} + // ------------------- // // String Conversion // // ------------------- //