diff options
| author | Cherry Mui <cherryyz@google.com> | 2025-11-20 14:40:43 -0500 |
|---|---|---|
| committer | Cherry Mui <cherryyz@google.com> | 2025-11-20 14:40:43 -0500 |
| commit | e3d4645693bc030b9ff9b867f1d374a1d72ef2fe (patch) | |
| tree | 5d9c6783b4b1901e072ed253acc6ecdd909b23bc /src | |
| parent | 95b4ad525fc8d70c881960ab9f75f31548023bed (diff) | |
| parent | ca37d24e0b9369b8086959df5bc230b38bf98636 (diff) | |
| download | go-e3d4645693bc030b9ff9b867f1d374a1d72ef2fe.tar.xz | |
[dev.simd] all: merge master (ca37d24) into dev.simd
Conflicts:
- src/cmd/compile/internal/typecheck/builtin.go
Merge List:
+ 2025-11-20 ca37d24e0b net/http: drop unused "broken" field from persistConn
+ 2025-11-20 4b740af56a cmd/internal/obj/x86: handle global reference in From3 in dynlink mode
+ 2025-11-20 790384c6c2 spec: adjust rule for type parameter on RHS of alias declaration
+ 2025-11-20 a49b0302d0 net/http: correctly close fake net.Conns
+ 2025-11-20 32f5aadd2f cmd/compile: stack allocate backing stores during append
+ 2025-11-20 a18aff8057 runtime: select GC mark workers during start-the-world
+ 2025-11-20 829779f4fe runtime: split findRunnableGCWorker in two
+ 2025-11-20 ab59569099 go/version: use "custom" as an example of a version suffix
+ 2025-11-19 c4bb9653ba cmd/compile: Implement LoweredZeroLoop with LSX Instruction on loong64
+ 2025-11-19 7f2ae21fb4 cmd/internal/obj/loong64: add MULW.D.W[U] instructions
+ 2025-11-19 a2946f2385 crypto: add Encapsulator and Decapsulator interfaces
+ 2025-11-19 6b83bd7146 crypto/ecdh: add KeyExchanger interface
+ 2025-11-19 4fef9f8b55 go/types, types2: fix object path for grouped declaration statements
+ 2025-11-19 33529db142 spec: escape double-ampersands
+ 2025-11-19 dc42565a20 cmd/compile: fix control flow for unsigned divisions proof relations
+ 2025-11-19 e64023dcbf cmd/compile: cleanup useless if statement in prove
+ 2025-11-19 2239520d1c test: go fmt prove.go tests
+ 2025-11-19 489d3dafb7 math: switch s390x math.Pow to generic implementation
+ 2025-11-18 8c41a482f9 runtime: add dlog.hexdump
+ 2025-11-18 e912618bd2 runtime: add hexdumper
+ 2025-11-18 2cf9d4b62f Revert "net/http: do not discard body content when closing it within request handlers"
+ 2025-11-18 4d0658bb08 cmd/compile: prefer fixed registers for values
+ 2025-11-18 ba634ca5c7 cmd/compile: fold boolean NOT into branches
+ 2025-11-18 8806d53c10 cmd/link: align sections, not symbols after DWARF compress
+ 2025-11-18 c93766007d runtime: do not print recovered when double panic with the same value
+ 2025-11-18 9859b43643 cmd/asm,cmd/compile,cmd/internal/obj/riscv: use compressed instructions on riscv64
+ 2025-11-17 b9ef0633f6 cmd/internal/sys,internal/goarch,runtime: enable the use of compressed instructions on riscv64
+ 2025-11-17 a087dea869 debug/elf: sync new loong64 relocation types up to LoongArch ELF psABI v20250521
+ 2025-11-17 e1a12c781f cmd/compile: use 32x32->64 multiplies on arm64
+ 2025-11-17 6caab99026 runtime: relax TestMemoryLimit on darwin a bit more
+ 2025-11-17 eda2e8c683 runtime: clear frame pointer at thread entry points
+ 2025-11-17 6919858338 runtime: rename findrunnable references to findRunnable
+ 2025-11-17 8e734ec954 go/ast: fix BasicLit.End position for raw strings containing \r
+ 2025-11-17 592775ec7d crypto/mlkem: avoid a few unnecessary inverse NTT calls
+ 2025-11-17 590cf18daf crypto/mlkem/mlkemtest: add derandomized Encapsulate768/1024
+ 2025-11-17 c12c337099 cmd/compile: teach prove about subtract idioms
+ 2025-11-17 bc15963813 cmd/compile: clean up prove pass
+ 2025-11-17 1297fae708 go/token: add (*File).End method
+ 2025-11-17 65c09eafdf runtime: hoist invariant code out of heapBitsSmallForAddrInline
+ 2025-11-17 594129b80c internal/runtime/maps: update doc for table.Clear
+ 2025-11-15 c58d075e9a crypto/rsa: deprecate PKCS#1 v1.5 encryption
+ 2025-11-14 d55ecea9e5 runtime: usleep before stealing runnext only if not in syscall
+ 2025-11-14 410ef44f00 cmd: update x/tools to 59ff18c
+ 2025-11-14 50128a2154 runtime: support runtime.freegc in size-specialized mallocs for noscan objects
+ 2025-11-14 c3708350a4 cmd/go: tests: rename git-min-vers->git-sha256
+ 2025-11-14 aea881230d std: fix printf("%q", int) mistakes
+ 2025-11-14 120f1874ef runtime: add more precise test of assist credit handling for runtime.freegc
+ 2025-11-14 fecfcaa4f6 runtime: add runtime.freegc to reduce GC work
+ 2025-11-14 5a347b775e runtime: set GOEXPERIMENT=runtimefreegc to disabled by default
+ 2025-11-14 1a03d0db3f runtime: skip tests for GOEXPERIMENT=arenas that do not handle clobberfree=1
+ 2025-11-14 cb0d9980f5 net/http: do not discard body content when closing it within request handlers
+ 2025-11-14 03ed43988f cmd/compile: allow multi-field structs to be stored directly in interfaces
+ 2025-11-14 1bb1f2bf0c runtime: put AddCleanup cleanup arguments in their own allocation
+ 2025-11-14 9fd2e44439 runtime: add AddCleanup benchmark
+ 2025-11-14 80c91eedbb runtime: ensure weak handles end up in their own allocation
+ 2025-11-14 7a8d0b5d53 runtime: add debug mode to extend _Grunning-without-P windows
+ 2025-11-14 710abf74da internal/runtime/cgobench: add Go function call benchmark for comparison
+ 2025-11-14 b24aec598b doc, cmd/internal/obj/riscv: document the riscv64 assembler
+ 2025-11-14 a0e738c657 cmd/compile/internal: remove incorrect riscv64 SLTI rule
+ 2025-11-14 2cdcc4150b cmd/compile: fold negation into multiplication
+ 2025-11-14 b57962b7c7 bytes: fix panic in bytes.Buffer.Peek
+ 2025-11-14 0a569528ea cmd/compile: optimize comparisons with single bit difference
+ 2025-11-14 1e5e6663e9 cmd/compile: remove unnecessary casts and types from riscv64 rules
+ 2025-11-14 ddd8558e61 go/types, types2: swap object.color for Checker.objPathIdx
+ 2025-11-14 9daaab305c cmd/link/internal/ld: make runtime.buildVersion with experiments valid
+ 2025-11-13 d50a571ddf test: fix tests to work with sizespecializedmalloc turned off
+ 2025-11-13 704f841eab cmd/trace: annotation proc start/stop with thread and proc always
+ 2025-11-13 17a02b9106 net/http: remove unused isLitOrSingle and isNotToken
+ 2025-11-13 ff61991aed cmd/go: fix flaky TestScript/mod_get_direct
+ 2025-11-13 129d0cb543 net/http/cgi: accept INCLUDED as protocol for server side includes
+ 2025-11-13 77c5130100 go/types: minor simplification
+ 2025-11-13 7601cd3880 go/types: generate cycles.go
+ 2025-11-13 7a372affd9 go/types, types2: rename definedType to declaredType and clarify docs
Change-Id: Ibaa9bdb982364892f80e511c1bb12661fcd5fb86
Diffstat (limited to 'src')
318 files changed, 9810 insertions, 2739 deletions
diff --git a/src/bytes/buffer.go b/src/bytes/buffer.go index 3eb5b350c3..6cb4d6a8f6 100644 --- a/src/bytes/buffer.go +++ b/src/bytes/buffer.go @@ -86,7 +86,7 @@ func (b *Buffer) Peek(n int) ([]byte, error) { if b.Len() < n { return b.buf[b.off:], io.EOF } - return b.buf[b.off:n], nil + return b.buf[b.off : b.off+n], nil } // empty reports whether the unread portion of the buffer is empty. diff --git a/src/bytes/buffer_test.go b/src/bytes/buffer_test.go index 5f5cc483b0..9c1ba0a838 100644 --- a/src/bytes/buffer_test.go +++ b/src/bytes/buffer_test.go @@ -533,19 +533,25 @@ func TestReadString(t *testing.T) { var peekTests = []struct { buffer string + skip int n int expected string err error }{ - {"", 0, "", nil}, - {"aaa", 3, "aaa", nil}, - {"foobar", 2, "fo", nil}, - {"a", 2, "a", io.EOF}, + {"", 0, 0, "", nil}, + {"aaa", 0, 3, "aaa", nil}, + {"foobar", 0, 2, "fo", nil}, + {"a", 0, 2, "a", io.EOF}, + {"helloworld", 4, 3, "owo", nil}, + {"helloworld", 5, 5, "world", nil}, + {"helloworld", 5, 6, "world", io.EOF}, + {"helloworld", 10, 1, "", io.EOF}, } func TestPeek(t *testing.T) { for _, test := range peekTests { buf := NewBufferString(test.buffer) + buf.Next(test.skip) bytes, err := buf.Peek(test.n) if string(bytes) != test.expected { t.Errorf("expected %q, got %q", test.expected, bytes) @@ -553,8 +559,8 @@ func TestPeek(t *testing.T) { if err != test.err { t.Errorf("expected error %v, got %v", test.err, err) } - if buf.Len() != len(test.buffer) { - t.Errorf("bad length after peek: %d, want %d", buf.Len(), len(test.buffer)) + if buf.Len() != len(test.buffer)-test.skip { + t.Errorf("bad length after peek: %d, want %d", buf.Len(), len(test.buffer)-test.skip) } } } diff --git a/src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s b/src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s index 4bf58a39a4..8b104307cd 100644 --- a/src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s +++ b/src/cmd/asm/internal/asm/testdata/amd64dynlinkerror.s @@ -169,3 +169,8 @@ TEXT ·a34(SB), 0, $0-0 SHLXQ AX, CX, R15 ADDQ $1, R15 RET + +// Ensure from3 get GOT-rewritten without errors. +TEXT ·a35(SB), 0, $0-0 + VGF2P8AFFINEQB $0, runtime·writeBarrier(SB), Z1, Z1 + RET diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s index c820a0a5a1..277396bf27 100644 --- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s +++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s @@ -212,6 +212,12 @@ lable2: SRLV $32, R4, R5 // 85804500 SRLV $32, R4 // 84804500 + // MULW.D.W[U] instructions + MULWVW R4, R5 // a5101f00 + MULWVW R4, R5, R6 // a6101f00 + MULWVWU R4, R5 // a5901f00 + MULWVWU R4, R5, R6 // a6901f00 + MASKEQZ R4, R5, R6 // a6101300 MASKNEZ R4, R5, R6 // a6901300 diff --git a/src/cmd/asm/internal/flags/flags.go b/src/cmd/asm/internal/flags/flags.go index e15a062749..19aa65630f 100644 --- a/src/cmd/asm/internal/flags/flags.go +++ b/src/cmd/asm/internal/flags/flags.go @@ -29,8 +29,9 @@ var ( ) var DebugFlags struct { - MayMoreStack string `help:"call named function before all stack growth checks"` - PCTab string `help:"print named pc-value table\nOne of: pctospadj, pctofile, pctoline, pctoinline, pctopcdata"` + CompressInstructions int `help:"use compressed instructions when possible (if supported by architecture)"` + MayMoreStack string `help:"call named function before all stack growth checks"` + PCTab string `help:"print named pc-value table\nOne of: pctospadj, pctofile, pctoline, pctoinline, pctopcdata"` } var ( @@ -47,6 +48,8 @@ func init() { flag.Var(objabi.NewDebugFlag(&DebugFlags, nil), "d", "enable debugging settings; try -d help") objabi.AddVersionFlag() // -V objabi.Flagcount("S", "print assembly and machine code", &PrintOut) + + DebugFlags.CompressInstructions = 1 } // MultiFlag allows setting a value multiple times to collect a list, as in -I=dir1 -I=dir2. diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go index f2697db516..25cf307140 100644 --- a/src/cmd/asm/main.go +++ b/src/cmd/asm/main.go @@ -40,6 +40,7 @@ func main() { log.Fatalf("unrecognized architecture %s", GOARCH) } ctxt := obj.Linknew(architecture.LinkArch) + ctxt.CompressInstructions = flags.DebugFlags.CompressInstructions != 0 ctxt.Debugasm = flags.PrintOut ctxt.Debugvlog = flags.DebugV ctxt.Flag_dynlink = *flags.Dynlink diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go index 9e8ab2f488..b532bf435e 100644 --- a/src/cmd/compile/internal/base/debug.go +++ b/src/cmd/compile/internal/base/debug.go @@ -20,6 +20,7 @@ type DebugFlags struct { Append int `help:"print information about append compilation"` Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation" concurrent:"ok"` Closure int `help:"print information about closure compilation"` + CompressInstructions int `help:"use compressed instructions when possible (if supported by architecture)"` Converthash string `help:"hash value for use in debugging changes to platform-dependent float-to-[u]int conversion" concurrent:"ok"` Defer int `help:"print information about defer compilation"` DisableNil int `help:"disable nil checks" concurrent:"ok"` diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index 1d211e0a2d..63cae41524 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -177,6 +177,7 @@ func ParseFlags() { Flag.WB = true Debug.ConcurrentOk = true + Debug.CompressInstructions = 1 Debug.MaxShapeLen = 500 Debug.AlignHot = 1 Debug.InlFuncsWithClosures = 1 @@ -299,6 +300,7 @@ func ParseFlags() { } parseSpectre(Flag.Spectre) // left as string for RecordFlags + Ctxt.CompressInstructions = Debug.CompressInstructions != 0 Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared Ctxt.Flag_optimize = Flag.N == 0 Ctxt.Debugasm = int(Flag.S) diff --git a/src/cmd/compile/internal/deadlocals/deadlocals.go b/src/cmd/compile/internal/deadlocals/deadlocals.go index 238450416a..55ad0387a4 100644 --- a/src/cmd/compile/internal/deadlocals/deadlocals.go +++ b/src/cmd/compile/internal/deadlocals/deadlocals.go @@ -44,6 +44,11 @@ func Funcs(fns []*ir.Func) { *as.lhs = ir.BlankNode *as.rhs = zero } + if len(assigns) > 0 { + // k.Defn might be pointing at one of the + // assignments we're overwriting. + k.Defn = nil + } } } } diff --git a/src/cmd/compile/internal/escape/leaks.go b/src/cmd/compile/internal/escape/leaks.go index 942f87d2a2..176bccd847 100644 --- a/src/cmd/compile/internal/escape/leaks.go +++ b/src/cmd/compile/internal/escape/leaks.go @@ -124,3 +124,21 @@ func parseLeaks(s string) leaks { copy(l[:], s[4:]) return l } + +func ParseLeaks(s string) leaks { + return parseLeaks(s) +} + +// Any reports whether the value flows anywhere at all. +func (l leaks) Any() bool { + // TODO: do mutator/callee matter? + if l.Heap() >= 0 || l.Mutator() >= 0 || l.Callee() >= 0 { + return true + } + for i := range numEscResults { + if l.Result(i) >= 0 { + return true + } + } + return false +} diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 918d3f3514..6418ab9357 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -22,6 +22,7 @@ import ( "cmd/compile/internal/pkginit" "cmd/compile/internal/reflectdata" "cmd/compile/internal/rttype" + "cmd/compile/internal/slice" "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" "cmd/compile/internal/staticinit" @@ -271,6 +272,8 @@ func Main(archInit func(*ssagen.ArchInfo)) { base.Timer.Start("fe", "escapes") escape.Funcs(typecheck.Target.Funcs) + slice.Funcs(typecheck.Target.Funcs) + loopvar.LogTransformations(transformed) // Collect information for go:nowritebarrierrec diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index 25654ca253..1a3514db6c 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -192,6 +192,7 @@ type CallExpr struct { IsDDD bool GoDefer bool // whether this call is part of a go or defer statement NoInline bool // whether this call must not be inlined + UseBuf bool // use stack buffer for backing store (OAPPEND only) } func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { @@ -1280,3 +1281,28 @@ func MethodExprFunc(n Node) *types.Field { base.Fatalf("unexpected node: %v (%v)", n, n.Op()) panic("unreachable") } + +// A MoveToHeapExpr takes a slice as input and moves it to the +// heap (by copying the backing store if it is not already +// on the heap). +type MoveToHeapExpr struct { + miniExpr + Slice Node + // An expression that evaluates to a *runtime._type + // that represents the slice element type. + RType Node + // If PreserveCapacity is true, the capacity of + // the resulting slice, and all of the elements in + // [len:cap], must be preserved. + // If PreserveCapacity is false, the resulting + // slice may have any capacity >= len, with any + // elements in the resulting [len:cap] range zeroed. + PreserveCapacity bool +} + +func NewMoveToHeapExpr(pos src.XPos, slice Node) *MoveToHeapExpr { + n := &MoveToHeapExpr{Slice: slice} + n.pos = pos + n.op = OMOVE2HEAP + return n +} diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go index ae4ff62652..eb64cce47b 100644 --- a/src/cmd/compile/internal/ir/fmt.go +++ b/src/cmd/compile/internal/ir/fmt.go @@ -574,7 +574,7 @@ func exprFmt(n Node, s fmt.State, prec int) { // Special case for rune constants. if typ == types.RuneType || typ == types.UntypedRune { if x, ok := constant.Uint64Val(val); ok && x <= utf8.MaxRune { - fmt.Fprintf(s, "%q", x) + fmt.Fprintf(s, "%q", rune(x)) return } } diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go index 01f1c0c502..63f1b1c931 100644 --- a/src/cmd/compile/internal/ir/name.go +++ b/src/cmd/compile/internal/ir/name.go @@ -43,7 +43,7 @@ type Name struct { Func *Func // TODO(austin): nil for I.M Offset_ int64 val constant.Value - Opt any // for use by escape analysis + Opt any // for use by escape or slice analysis Embed *[]Embed // list of embedded files, for ONAME var // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2). diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 8c61bb6ed5..f26f61cb18 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -293,6 +293,7 @@ const ( OLINKSYMOFFSET // offset within a name OJUMPTABLE // A jump table structure for implementing dense expression switches OINTERFACESWITCH // A type switch with interface cases + OMOVE2HEAP // Promote a stack-backed slice to heap // opcodes for generics ODYNAMICDOTTYPE // x = i.(T) where T is a type parameter (or derived from a type parameter) diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go index 2221045c93..4298b3a43d 100644 --- a/src/cmd/compile/internal/ir/node_gen.go +++ b/src/cmd/compile/internal/ir/node_gen.go @@ -1175,6 +1175,34 @@ func (n *MakeExpr) editChildrenWithHidden(edit func(Node) Node) { } } +func (n *MoveToHeapExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } +func (n *MoveToHeapExpr) copy() Node { + c := *n + c.init = copyNodes(c.init) + return &c +} +func (n *MoveToHeapExpr) doChildren(do func(Node) bool) bool { + if doNodes(n.init, do) { + return true + } + if n.Slice != nil && do(n.Slice) { + return true + } + return false +} +func (n *MoveToHeapExpr) doChildrenWithHidden(do func(Node) bool) bool { + return n.doChildren(do) +} +func (n *MoveToHeapExpr) editChildren(edit func(Node) Node) { + editNodes(n.init, edit) + if n.Slice != nil { + n.Slice = edit(n.Slice).(Node) + } +} +func (n *MoveToHeapExpr) editChildrenWithHidden(edit func(Node) Node) { + n.editChildren(edit) +} + func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) } diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go index 7494beee4c..f042ad84a4 100644 --- a/src/cmd/compile/internal/ir/op_string.go +++ b/src/cmd/compile/internal/ir/op_string.go @@ -151,18 +151,19 @@ func _() { _ = x[OLINKSYMOFFSET-140] _ = x[OJUMPTABLE-141] _ = x[OINTERFACESWITCH-142] - _ = x[ODYNAMICDOTTYPE-143] - _ = x[ODYNAMICDOTTYPE2-144] - _ = x[ODYNAMICTYPE-145] - _ = x[OTAILCALL-146] - _ = x[OGETG-147] - _ = x[OGETCALLERSP-148] - _ = x[OEND-149] + _ = x[OMOVE2HEAP-143] + _ = x[ODYNAMICDOTTYPE-144] + _ = x[ODYNAMICDOTTYPE2-145] + _ = x[ODYNAMICTYPE-146] + _ = x[OTAILCALL-147] + _ = x[OGETG-148] + _ = x[OGETCALLERSP-149] + _ = x[OEND-150] } -const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLEARCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTLNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERSTRINGHEADERRECOVERRECVRUNESTRSELRECV2MINMAXREALIMAGCOMPLEXUNSAFEADDUNSAFESLICEUNSAFESLICEDATAUNSAFESTRINGUNSAFESTRINGDATAMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWINLCALLMAKEFACEITABIDATASPTRCFUNCCHECKNILRESULTINLMARKLINKSYMOFFSETJUMPTABLEINTERFACESWITCHDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERSPEND" +const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLEARCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTLNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERSTRINGHEADERRECOVERRECVRUNESTRSELRECV2MINMAXREALIMAGCOMPLEXUNSAFEADDUNSAFESLICEUNSAFESLICEDATAUNSAFESTRINGUNSAFESTRINGDATAMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWINLCALLMAKEFACEITABIDATASPTRCFUNCCHECKNILRESULTINLMARKLINKSYMOFFSETJUMPTABLEINTERFACESWITCHMOVE2HEAPDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERSPEND" -var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 129, 141, 143, 146, 156, 163, 170, 177, 181, 185, 193, 201, 210, 213, 218, 223, 230, 237, 243, 252, 260, 268, 274, 278, 287, 294, 298, 301, 308, 314, 317, 323, 330, 338, 342, 349, 357, 359, 361, 363, 365, 367, 369, 374, 379, 387, 390, 399, 402, 406, 414, 421, 430, 443, 446, 449, 452, 455, 458, 461, 467, 470, 473, 479, 483, 486, 490, 495, 500, 507, 512, 516, 521, 529, 537, 543, 552, 563, 575, 582, 586, 593, 601, 604, 607, 611, 615, 622, 631, 642, 657, 669, 685, 693, 702, 707, 712, 716, 724, 729, 733, 736, 740, 742, 747, 749, 754, 760, 766, 772, 778, 785, 793, 797, 802, 806, 811, 819, 825, 832, 845, 854, 869, 883, 898, 909, 917, 921, 932, 935} +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 129, 141, 143, 146, 156, 163, 170, 177, 181, 185, 193, 201, 210, 213, 218, 223, 230, 237, 243, 252, 260, 268, 274, 278, 287, 294, 298, 301, 308, 314, 317, 323, 330, 338, 342, 349, 357, 359, 361, 363, 365, 367, 369, 374, 379, 387, 390, 399, 402, 406, 414, 421, 430, 443, 446, 449, 452, 455, 458, 461, 467, 470, 473, 479, 483, 486, 490, 495, 500, 507, 512, 516, 521, 529, 537, 543, 552, 563, 575, 582, 586, 593, 601, 604, 607, 611, 615, 622, 631, 642, 657, 669, 685, 693, 702, 707, 712, 716, 724, 729, 733, 736, 740, 742, 747, 749, 754, 760, 766, 772, 778, 785, 793, 797, 802, 806, 811, 819, 825, 832, 845, 854, 869, 878, 892, 907, 918, 926, 930, 941, 944} func (i Op) String() string { if i >= Op(len(_Op_index)-1) { diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go index 0801ecdd9e..affa5f4551 100644 --- a/src/cmd/compile/internal/ir/stmt.go +++ b/src/cmd/compile/internal/ir/stmt.go @@ -42,6 +42,7 @@ func (*Decl) isStmt() {} type Stmt interface { Node isStmt() + PtrInit() *Nodes } // A miniStmt is a miniNode with extra fields common to statements. diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go index 344985f7be..4b5bf17a3d 100644 --- a/src/cmd/compile/internal/ir/symtab.go +++ b/src/cmd/compile/internal/ir/symtab.go @@ -29,6 +29,11 @@ type symsStruct struct { GCWriteBarrier [8]*obj.LSym Goschedguarded *obj.LSym Growslice *obj.LSym + GrowsliceBuf *obj.LSym + MoveSlice *obj.LSym + MoveSliceNoScan *obj.LSym + MoveSliceNoCap *obj.LSym + MoveSliceNoCapNoScan *obj.LSym InterfaceSwitch *obj.LSym MallocGC *obj.LSym MallocGCSmallNoScan [27]*obj.LSym diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go index 84bbf9b394..71953109c4 100644 --- a/src/cmd/compile/internal/loong64/ssa.go +++ b/src/cmd/compile/internal/loong64/ssa.go @@ -575,6 +575,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpLOONG64LoweredZeroLoop: ptrReg := v.Args[0].Reg() countReg := v.RegTmp() + flagReg := int16(loong64.REGTMP) var off int64 n := v.AuxInt loopSize := int64(64) @@ -587,58 +588,119 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { // vs // 16 instuctions in the straightline code // Might as well use straightline code. - v.Fatalf("ZeroLoop size tool small %d", n) + v.Fatalf("ZeroLoop size too small %d", n) } - // Put iteration count in a register. - // MOVV $n/loopSize, countReg - p := s.Prog(loong64.AMOVV) - p.From.Type = obj.TYPE_CONST - p.From.Offset = n / loopSize - p.To.Type = obj.TYPE_REG - p.To.Reg = countReg - cntInit := p + // MOVV $n/loopSize, countReg + // MOVBU ir.Syms.Loong64HasLSX, flagReg + // BNE flagReg, lsxInit + // genericInit: + // for off = 0; off < loopSize; off += 8 { + // zero8(s, ptrReg, off) + // } + // ADDV $loopSize, ptrReg + // SUBV $1, countReg + // BNE countReg, genericInit + // JMP tail + // lsxInit: + // VXORV V31, V31, V31, v31 = 0 + // for off = 0; off < loopSize; off += 16 { + // zero16(s, V31, ptrReg, off) + // } + // ADDV $loopSize, ptrReg + // SUBV $1, countReg + // BNE countReg, lsxInit + // tail: + // n %= loopSize + // for off = 0; n >= 8; off += 8, n -= 8 { + // zero8(s, ptrReg, off) + // } + // + // if n != 0 { + // zero8(s, ptrReg, off+n-8) + // } - // Zero loopSize bytes starting at ptrReg. - for range loopSize / 8 { - // MOVV ZR, off(ptrReg) + p1 := s.Prog(loong64.AMOVV) + p1.From.Type = obj.TYPE_CONST + p1.From.Offset = n / loopSize + p1.To.Type = obj.TYPE_REG + p1.To.Reg = countReg + + p2 := s.Prog(loong64.AMOVBU) + p2.From.Type = obj.TYPE_MEM + p2.From.Name = obj.NAME_EXTERN + p2.From.Sym = ir.Syms.Loong64HasLSX + p2.To.Type = obj.TYPE_REG + p2.To.Reg = flagReg + + p3 := s.Prog(loong64.ABNE) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = flagReg + p3.To.Type = obj.TYPE_BRANCH + + for off = 0; off < loopSize; off += 8 { zero8(s, ptrReg, off) - off += 8 } - // Increment ptrReg by loopSize. - // ADDV $loopSize, ptrReg - p = s.Prog(loong64.AADDV) - p.From.Type = obj.TYPE_CONST - p.From.Offset = loopSize - p.To.Type = obj.TYPE_REG - p.To.Reg = ptrReg + p4 := s.Prog(loong64.AADDV) + p4.From.Type = obj.TYPE_CONST + p4.From.Offset = loopSize + p4.To.Type = obj.TYPE_REG + p4.To.Reg = ptrReg - // Decrement loop count. - // SUBV $1, countReg - p = s.Prog(loong64.ASUBV) - p.From.Type = obj.TYPE_CONST - p.From.Offset = 1 - p.To.Type = obj.TYPE_REG - p.To.Reg = countReg + p5 := s.Prog(loong64.ASUBV) + p5.From.Type = obj.TYPE_CONST + p5.From.Offset = 1 + p5.To.Type = obj.TYPE_REG + p5.To.Reg = countReg - // Jump to loop header if we're not done yet. - // BNE countReg, loop header - p = s.Prog(loong64.ABNE) - p.From.Type = obj.TYPE_REG - p.From.Reg = countReg - p.To.Type = obj.TYPE_BRANCH - p.To.SetTarget(cntInit.Link) + p6 := s.Prog(loong64.ABNE) + p6.From.Type = obj.TYPE_REG + p6.From.Reg = countReg + p6.To.Type = obj.TYPE_BRANCH + p6.To.SetTarget(p3.Link) + + p7 := s.Prog(obj.AJMP) + p7.To.Type = obj.TYPE_BRANCH + + p8 := s.Prog(loong64.AVXORV) + p8.From.Type = obj.TYPE_REG + p8.From.Reg = loong64.REG_V31 + p8.To.Type = obj.TYPE_REG + p8.To.Reg = loong64.REG_V31 + p3.To.SetTarget(p8) + + for off = 0; off < loopSize; off += 16 { + zero16(s, loong64.REG_V31, ptrReg, off) + } + + p9 := s.Prog(loong64.AADDV) + p9.From.Type = obj.TYPE_CONST + p9.From.Offset = loopSize + p9.To.Type = obj.TYPE_REG + p9.To.Reg = ptrReg + + p10 := s.Prog(loong64.ASUBV) + p10.From.Type = obj.TYPE_CONST + p10.From.Offset = 1 + p10.To.Type = obj.TYPE_REG + p10.To.Reg = countReg + + p11 := s.Prog(loong64.ABNE) + p11.From.Type = obj.TYPE_REG + p11.From.Reg = countReg + p11.To.Type = obj.TYPE_BRANCH + p11.To.SetTarget(p8.Link) + + p12 := s.Prog(obj.ANOP) + p7.To.SetTarget(p12) // Multiples of the loop size are now done. n %= loopSize - - off = 0 // Write any fractional portion. - for n >= 8 { - // MOVV ZR, off(ptrReg) + for off = 0; n >= 8; off += 8 { + // MOVV ZR, off(ptrReg) zero8(s, ptrReg, off) - off += 8 n -= 8 } @@ -1333,7 +1395,7 @@ func move8(s *ssagen.State, src, dst, tmp int16, off int64) { // zero8 zeroes 8 bytes at reg+off. func zero8(s *ssagen.State, reg int16, off int64) { - // MOVV ZR, off(reg) + // MOVV ZR, off(reg) p := s.Prog(loong64.AMOVV) p.From.Type = obj.TYPE_REG p.From.Reg = loong64.REGZERO @@ -1341,3 +1403,14 @@ func zero8(s *ssagen.State, reg int16, off int64) { p.To.Reg = reg p.To.Offset = off } + +// zero16 zeroes 16 bytes at reg+off. +func zero16(s *ssagen.State, regZero, regBase int16, off int64) { + // VMOVQ regZero, off(regBase) + p := s.Prog(loong64.AVMOVQ) + p.From.Type = obj.TYPE_REG + p.From.Reg = regZero + p.To.Type = obj.TYPE_MEM + p.To.Reg = regBase + p.To.Offset = off +} diff --git a/src/cmd/compile/internal/slice/slice.go b/src/cmd/compile/internal/slice/slice.go new file mode 100644 index 0000000000..7a32e7adbd --- /dev/null +++ b/src/cmd/compile/internal/slice/slice.go @@ -0,0 +1,455 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slice + +// This file implements a stack-allocation optimization +// for the backing store of slices. +// +// Consider the code: +// +// var s []int +// for i := range ... { +// s = append(s, i) +// } +// return s +// +// Some of the append operations will need to do an allocation +// by calling growslice. This will happen on the 1st, 2nd, 4th, +// 8th, etc. append calls. The allocations done by all but the +// last growslice call will then immediately be garbage. +// +// We'd like to avoid doing some of those intermediate +// allocations if possible. +// +// If we can determine that the "return s" statement is the +// *only* way that the backing store for s escapes, then we +// can rewrite the code to something like: +// +// var s []int +// for i := range N { +// s = append(s, i) +// } +// s = move2heap(s) +// return s +// +// Using the move2heap runtime function, which does: +// +// move2heap(s): +// If s is not backed by a stackframe-allocated +// backing store, return s. Otherwise, copy s +// to the heap and return the copy. +// +// Now we can treat the backing store of s allocated at the +// append site as not escaping. Previous stack allocation +// optimizations now apply, which can use a fixed-size +// stack-allocated backing store for s when appending. +// (See ../ssagen/ssa.go:(*state).append) +// +// It is tricky to do this optimization safely. To describe +// our analysis, we first define what an "exclusive" slice +// variable is. +// +// A slice variable (a variable of slice type) is called +// "exclusive" if, when it has a reference to a +// stackframe-allocated backing store, it is the only +// variable with such a reference. +// +// In other words, a slice variable is exclusive if +// any of the following holds: +// 1) It points to a heap-allocated backing store +// 2) It points to a stack-allocated backing store +// for any parent frame. +// 3) It is the only variable that references its +// backing store. +// 4) It is nil. +// +// The nice thing about exclusive slice variables is that +// it is always safe to do +// s = move2heap(s) +// whenever s is an exclusive slice variable. Because no +// one else has a reference to the backing store, no one +// else can tell that we moved the backing store from one +// location to another. +// +// Note that exclusiveness is a dynamic property. A slice +// variable may be exclusive during some parts of execution +// and not exclusive during others. +// +// The following operations set or preserve the exclusivity +// of a slice variable s: +// s = nil +// s = append(s, ...) +// s = s[i:j] +// ... = s[i] +// s[i] = ... +// f(s) where f does not escape its argument +// Other operations destroy exclusivity. A non-exhaustive list includes: +// x = s +// *p = s +// f(s) where f escapes its argument +// return s +// To err on the safe side, we white list exclusivity-preserving +// operations and we asssume that any other operations that mention s +// destroy its exclusivity. +// +// Our strategy is to move the backing store of s to the heap before +// any exclusive->nonexclusive transition. That way, s will only ever +// have a reference to a stack backing store while it is exclusive. +// +// move2heap for a variable s is implemented with: +// if s points to within the stack frame { +// s2 := make([]T, s.len, s.cap) +// copy(s2[:s.cap], s[:s.cap]) +// s = s2 +// } +// Note that in general we need to copy all of s[:cap(s)] elements when +// moving to the heap. As an optimization, we keep track of slice variables +// whose capacity, and the elements in s[len(s):cap(s)], are never accessed. +// For those slice variables, we can allocate to the next size class above +// the length, which saves memory and copying cost. + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/escape" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" +) + +func Funcs(all []*ir.Func) { + if base.Flag.N != 0 { + return + } + for _, fn := range all { + analyze(fn) + } +} + +func analyze(fn *ir.Func) { + type sliceInfo struct { + // Slice variable. + s *ir.Name + + // Count of uses that this pass understands. + okUses int32 + // Count of all uses found. + allUses int32 + + // A place where the slice variable transitions from + // exclusive to nonexclusive. + // We could keep track of more than one, but one is enough for now. + // Currently, this can be either a return statement or + // an assignment. + // TODO: other possible transitions? + transition ir.Stmt + + // Each s = append(s, ...) instance we found. + appends []*ir.CallExpr + + // Weight of the number of s = append(s, ...) instances we found. + // The optimizations we do are only really useful if there are at + // least weight 2. (Note: appends in loops have weight >= 2.) + appendWeight int + + // Whether we ever do cap(s), or other operations that use cap(s) + // (possibly implicitly), like s[i:j]. + capUsed bool + } + + // Every variable (*ir.Name) that we are tracking will have + // a non-nil *sliceInfo in its Opt field. + haveLocalSlice := false + maxStackSize := int64(base.Debug.VariableMakeThreshold) + var namedRets []*ir.Name + for _, s := range fn.Dcl { + if !s.Type().IsSlice() { + continue + } + if s.Type().Elem().Size() > maxStackSize { + continue + } + if !base.VariableMakeHash.MatchPos(s.Pos(), nil) { + continue + } + s.Opt = &sliceInfo{s: s} // start tracking s + haveLocalSlice = true + if s.Class == ir.PPARAMOUT { + namedRets = append(namedRets, s) + } + } + if !haveLocalSlice { + return + } + + // Keep track of loop depth while walking. + loopDepth := 0 + + // tracking returns the info for the slice variable if n is a slice + // variable that we're still considering, or nil otherwise. + tracking := func(n ir.Node) *sliceInfo { + if n == nil || n.Op() != ir.ONAME { + return nil + } + s := n.(*ir.Name) + if s.Opt == nil { + return nil + } + return s.Opt.(*sliceInfo) + } + + // addTransition(n, loc) records that s experiences an exclusive->nonexclusive + // transition somewhere within loc. + addTransition := func(i *sliceInfo, loc ir.Stmt) { + if i.transition != nil { + // We only keep track of a single exclusive->nonexclusive transition + // for a slice variable. If we find more than one, give up. + // (More than one transition location would be fine, but we would + // start to get worried about introducing too much additional code.) + i.s.Opt = nil + return + } + i.transition = loc + } + + // Examine an x = y assignment that occurs somewhere within statement stmt. + assign := func(x, y ir.Node, stmt ir.Stmt) { + if i := tracking(x); i != nil { + // s = y. Check for understood patterns for y. + if y == nil || y.Op() == ir.ONIL { + // s = nil is ok. + i.okUses++ + } else if y.Op() == ir.OSLICELIT { + // s = []{...} is ok. + // Note: this reveals capacity. Should it? + i.okUses++ + i.capUsed = true + } else if y.Op() == ir.OSLICE { + y := y.(*ir.SliceExpr) + if y.X == i.s { + // s = s[...:...] is ok + i.okUses += 2 + i.capUsed = true + } + } else if y.Op() == ir.OAPPEND { + y := y.(*ir.CallExpr) + if y.Args[0] == i.s { + // s = append(s, ...) is ok + i.okUses += 2 + i.appends = append(i.appends, y) + i.appendWeight += 1 + loopDepth + } + // TODO: s = append(nil, ...)? + } + // Note that technically s = make([]T, ...) preserves exclusivity, but + // we don't track that because we assume users who wrote that know + // better than the compiler does. + + // TODO: figure out how to handle s = fn(..., s, ...) + // It would be nice to maintain exclusivity of s in this situation. + // But unfortunately, fn can return one of its other arguments, which + // may be a slice with a stack-allocated backing store other than s. + // (which may have preexisting references to its backing store). + // + // Maybe we could do it if s is the only argument? + } + + if i := tracking(y); i != nil { + // ... = s + // Treat this as an exclusive->nonexclusive transition. + i.okUses++ + addTransition(i, stmt) + } + } + + var do func(ir.Node) bool + do = func(n ir.Node) bool { + if n == nil { + return false + } + switch n.Op() { + case ir.ONAME: + if i := tracking(n); i != nil { + // A use of a slice variable. Count it. + i.allUses++ + } + case ir.ODCL: + n := n.(*ir.Decl) + if i := tracking(n.X); i != nil { + i.okUses++ + } + case ir.OINDEX: + n := n.(*ir.IndexExpr) + if i := tracking(n.X); i != nil { + // s[i] is ok. + i.okUses++ + } + case ir.OLEN: + n := n.(*ir.UnaryExpr) + if i := tracking(n.X); i != nil { + // len(s) is ok + i.okUses++ + } + case ir.OCAP: + n := n.(*ir.UnaryExpr) + if i := tracking(n.X); i != nil { + // cap(s) is ok + i.okUses++ + i.capUsed = true + } + case ir.OADDR: + n := n.(*ir.AddrExpr) + if n.X.Op() == ir.OINDEX { + n := n.X.(*ir.IndexExpr) + if i := tracking(n.X); i != nil { + // &s[i] is definitely a nonexclusive transition. + // (We need this case because s[i] is ok, but &s[i] is not.) + i.s.Opt = nil + } + } + case ir.ORETURN: + n := n.(*ir.ReturnStmt) + for _, x := range n.Results { + if i := tracking(x); i != nil { + i.okUses++ + // We go exclusive->nonexclusive here + addTransition(i, n) + } + } + if len(n.Results) == 0 { + // Uses of named result variables are implicit here. + for _, x := range namedRets { + if i := tracking(x); i != nil { + addTransition(i, n) + } + } + } + case ir.OCALLFUNC: + n := n.(*ir.CallExpr) + for idx, arg := range n.Args { + if i := tracking(arg); i != nil { + if !argLeak(n, idx) { + // Passing s to a nonescaping arg is ok. + i.okUses++ + i.capUsed = true + } + } + } + case ir.ORANGE: + // Range over slice is ok. + n := n.(*ir.RangeStmt) + if i := tracking(n.X); i != nil { + i.okUses++ + } + case ir.OAS: + n := n.(*ir.AssignStmt) + assign(n.X, n.Y, n) + case ir.OAS2: + n := n.(*ir.AssignListStmt) + for i := range len(n.Lhs) { + assign(n.Lhs[i], n.Rhs[i], n) + } + case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) + for _, v := range n.Func.ClosureVars { + do(v.Outer) + } + } + if n.Op() == ir.OFOR || n.Op() == ir.ORANGE { + // Note: loopDepth isn't really right for init portion + // of the for statement, but that's ok. Correctness + // does not depend on depth info. + loopDepth++ + defer func() { loopDepth-- }() + } + // Check all the children. + ir.DoChildren(n, do) + return false + } + + // Run the analysis over the whole body. + for _, stmt := range fn.Body { + do(stmt) + } + + // Process accumulated info to find slice variables + // that we can allocate on the stack. + for _, s := range fn.Dcl { + if s.Opt == nil { + continue + } + i := s.Opt.(*sliceInfo) + s.Opt = nil + if i.okUses != i.allUses { + // Some use of i.s that don't understand lurks. Give up. + continue + } + + // At this point, we've decided that we *can* do + // the optimization. + + if i.transition == nil { + // Exclusive for its whole lifetime. That means it + // didn't escape. We can already handle nonescaping + // slices without this pass. + continue + } + if i.appendWeight < 2 { + // This optimization only really helps if there is + // (dynamically) more than one append. + continue + } + + // Commit point - at this point we've decided we *should* + // do the optimization. + + // Insert a move2heap operation before the exclusive->nonexclusive + // transition. + move := ir.NewMoveToHeapExpr(i.transition.Pos(), i.s) + if i.capUsed { + move.PreserveCapacity = true + } + move.RType = reflectdata.AppendElemRType(i.transition.Pos(), i.appends[0]) + move.SetType(i.s.Type()) + move.SetTypecheck(1) + as := ir.NewAssignStmt(i.transition.Pos(), i.s, move) + as.SetTypecheck(1) + i.transition.PtrInit().Prepend(as) + // Note: we prepend because we need to put the move2heap + // operation first, before any other init work, as the transition + // might occur in the init work. + + // Now that we've inserted a move2heap operation before every + // exclusive -> nonexclusive transition, appends can now use + // stack backing stores. + // (This is the whole point of this pass, to enable stack + // allocation of append backing stores.) + for _, a := range i.appends { + a.SetEsc(ir.EscNone) + if i.capUsed { + a.UseBuf = true + } + } + } +} + +// argLeak reports if the idx'th argument to the call n escapes anywhere +// (to the heap, another argument, return value, etc.) +// If unknown returns true. +func argLeak(n *ir.CallExpr, idx int) bool { + if n.Op() != ir.OCALLFUNC { + return true + } + fn := ir.StaticCalleeName(ir.StaticValue(n.Fun)) + if fn == nil { + return true + } + fntype := fn.Type() + if recv := fntype.Recv(); recv != nil { + if idx == 0 { + return escape.ParseLeaks(recv.Note).Any() + } + idx-- + } + return escape.ParseLeaks(fntype.Params()[idx].Note).Any() +} diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go index 1e9eb0146e..e77f55ab5e 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -156,6 +156,7 @@ func init() { gp11sb = regInfo{inputs: []regMask{gpspsbg}, outputs: gponly} gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} + gp21sp2 = regInfo{inputs: []regMask{gp, gpsp}, outputs: gponly} gp21sb = regInfo{inputs: []regMask{gpspsbg, gpsp}, outputs: gponly} gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}} gp31shift = regInfo{inputs: []regMask{gp, gp, cx}, outputs: []regMask{gp}} @@ -361,7 +362,7 @@ func init() { {name: "ADDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, - {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, + {name: "SUBQ", argLength: 2, reg: gp21sp2, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64.rules b/src/cmd/compile/internal/ssa/_gen/ARM64.rules index f54a692725..53bb35d289 100644 --- a/src/cmd/compile/internal/ssa/_gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/_gen/ARM64.rules @@ -573,6 +573,8 @@ (TBNZ [0] (GreaterThanF cc) yes no) => (FGT cc yes no) (TBNZ [0] (GreaterEqualF cc) yes no) => (FGE cc yes no) +(TB(Z|NZ) [0] (XORconst [1] x) yes no) => (TB(NZ|Z) [0] x yes no) + ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TST x y) yes no) ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTconst [c] y) yes no) ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTW x y) yes no) @@ -1814,3 +1816,7 @@ (Select0 (Mul64uover x y)) => (MUL x y) (Select1 (Mul64uover x y)) => (NotEqual (CMPconst (UMULH <typ.UInt64> x y) [0])) + +// 32 mul 32 -> 64 +(MUL r:(MOVWUreg x) s:(MOVWUreg y)) && r.Uses == 1 && s.Uses == 1 => (UMULL x y) +(MUL r:(MOVWreg x) s:(MOVWreg y)) && r.Uses == 1 && s.Uses == 1 => (MULL x y) diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules index 9691296043..2beba0b1c5 100644 --- a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules +++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules @@ -743,9 +743,6 @@ (MULV x (MOVVconst [c])) && canMulStrengthReduce(config, c) => {mulStrengthReduce(v, x, c)} -(MULV (NEGV x) (MOVVconst [c])) => (MULV x (MOVVconst [-c])) -(MULV (NEGV x) (NEGV y)) => (MULV x y) - (ADDV x0 x1:(SLLVconst [c] y)) && x1.Uses == 1 && c > 0 && c <= 4 => (ADDshiftLLV x0 y [c]) // fold constant in ADDshift op diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go index 7e8b8bf497..81d3a3665b 100644 --- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go @@ -388,6 +388,7 @@ func init() { argLength: 2, reg: regInfo{ inputs: []regMask{gp}, + clobbers: buildReg("F31"), clobbersArg0: true, }, faultOnNilArg0: true, diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules index 646948f2df..13a8cab3b5 100644 --- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules +++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules @@ -689,36 +689,36 @@ (MOVDnop (MOVDconst [c])) => (MOVDconst [c]) // Avoid unnecessary zero and sign extension when right shifting. -(SRAI <t> [x] (MOVWreg y)) && x >= 0 && x <= 31 => (SRAIW <t> [int64(x)] y) -(SRLI <t> [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW <t> [int64(x)] y) +(SRAI [x] (MOVWreg y)) && x >= 0 && x <= 31 => (SRAIW [x] y) +(SRLI [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW [x] y) // Replace right shifts that exceed size of signed type. (SRAI <t> [x] (MOVBreg y)) && x >= 8 => (SRAI [63] (SLLI <t> [56] y)) (SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI [63] (SLLI <t> [48] y)) -(SRAI <t> [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y) +(SRAI [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y) // Eliminate right shifts that exceed size of unsigned type. -(SRLI <t> [x] (MOVBUreg y)) && x >= 8 => (MOVDconst <t> [0]) -(SRLI <t> [x] (MOVHUreg y)) && x >= 16 => (MOVDconst <t> [0]) -(SRLI <t> [x] (MOVWUreg y)) && x >= 32 => (MOVDconst <t> [0]) +(SRLI [x] (MOVBUreg y)) && x >= 8 => (MOVDconst [0]) +(SRLI [x] (MOVHUreg y)) && x >= 16 => (MOVDconst [0]) +(SRLI [x] (MOVWUreg y)) && x >= 32 => (MOVDconst [0]) // Fold constant into immediate instructions where possible. (ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x) (AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x) (OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x) (XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x) -(ROL x (MOVDconst [val])) => (RORI [int64(int8(-val)&63)] x) -(ROLW x (MOVDconst [val])) => (RORIW [int64(int8(-val)&31)] x) -(ROR x (MOVDconst [val])) => (RORI [int64(val&63)] x) -(RORW x (MOVDconst [val])) => (RORIW [int64(val&31)] x) -(SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x) -(SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x) -(SLLW x (MOVDconst [val])) => (SLLIW [int64(val&31)] x) -(SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x) -(SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x) -(SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x) -(SLT x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI [val] x) -(SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x) +(ROL x (MOVDconst [val])) => (RORI [-val&63] x) +(ROLW x (MOVDconst [val])) => (RORIW [-val&31] x) +(ROR x (MOVDconst [val])) => (RORI [val&63] x) +(RORW x (MOVDconst [val])) => (RORIW [val&31] x) +(SLL x (MOVDconst [val])) => (SLLI [val&63] x) +(SLLW x (MOVDconst [val])) => (SLLIW [val&31] x) +(SRL x (MOVDconst [val])) => (SRLI [val&63] x) +(SRLW x (MOVDconst [val])) => (SRLIW [val&31] x) +(SRA x (MOVDconst [val])) => (SRAI [val&63] x) +(SRAW x (MOVDconst [val])) => (SRAIW [val&31] x) +(SLT x (MOVDconst [val])) && is12Bit(val) => (SLTI [val] x) +(SLTU x (MOVDconst [val])) && is12Bit(val) => (SLTIU [val] x) // Replace negated left rotation with right rotation. (ROL x (NEG y)) => (ROR x y) @@ -782,7 +782,7 @@ (SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)]) // Combine doubling via addition with shift. -(SLLI <t> [c] (ADD x x)) && c < t.Size() * 8 - 1 => (SLLI <t> [c+1] x) +(SLLI <t> [c] (ADD x x)) && c < t.Size() * 8 - 1 => (SLLI [c+1] x) (SLLI <t> [c] (ADD x x)) && c >= t.Size() * 8 - 1 => (MOVDconst [0]) // SLTI/SLTIU with constants. @@ -792,7 +792,6 @@ // SLTI/SLTIU with known outcomes. (SLTI [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1]) (SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1]) -(SLTI [x] (ORI [y] _)) && y >= 0 && int64(y) >= int64(x) => (MOVDconst [0]) (SLTIU [x] (ORI [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0]) // SLT/SLTU with known outcomes. diff --git a/src/cmd/compile/internal/ssa/_gen/dec.rules b/src/cmd/compile/internal/ssa/_gen/dec.rules index 9f6dc36975..fce0026211 100644 --- a/src/cmd/compile/internal/ssa/_gen/dec.rules +++ b/src/cmd/compile/internal/ssa/_gen/dec.rules @@ -97,8 +97,10 @@ // Helpers for expand calls // Some of these are copied from generic.rules -(IMake _typ (StructMake val)) => (IMake _typ val) -(StructSelect [0] (IData x)) => (IData x) +(IMake _typ (StructMake ___)) => imakeOfStructMake(v) +(StructSelect (IData x)) && v.Type.Size() > 0 => (IData x) +(StructSelect (IData x)) && v.Type.Size() == 0 && v.Type.IsStruct() => (StructMake) +(StructSelect (IData x)) && v.Type.Size() == 0 && v.Type.IsArray() => (ArrayMake0) (StructSelect [i] x:(StructMake ___)) => x.Args[i] @@ -109,7 +111,7 @@ // More annoying case: (ArraySelect[0] (StructSelect[0] isAPtr)) // There, result of the StructSelect is an Array (not a pointer) and // the pre-rewrite input to the ArraySelect is a struct, not a pointer. -(StructSelect [0] x) && x.Type.IsPtrShaped() => x +(StructSelect x) && x.Type.IsPtrShaped() => x (ArraySelect [0] x) && x.Type.IsPtrShaped() => x // These, too. Bits is bits. @@ -119,6 +121,7 @@ (Store _ (StructMake ___) _) => rewriteStructStore(v) +(IMake _typ (ArrayMake1 val)) => (IMake _typ val) (ArraySelect (ArrayMake1 x)) => x (ArraySelect [0] (IData x)) => (IData x) diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index ccdf0bf50d..6a213cd03a 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -195,6 +195,11 @@ // Convert x * -1 to -x. (Mul(8|16|32|64) (Const(8|16|32|64) [-1]) x) => (Neg(8|16|32|64) x) +// Convert -x * c to x * -c +(Mul(8|16|32|64) (Const(8|16|32|64) <t> [c]) (Neg(8|16|32|64) x)) => (Mul(8|16|32|64) x (Const(8|16|32|64) <t> [-c])) + +(Mul(8|16|32|64) (Neg(8|16|32|64) x) (Neg(8|16|32|64) y)) => (Mul(8|16|32|64) x y) + // DeMorgan's Laws (And(8|16|32|64) <t> (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (Or(8|16|32|64) <t> x y)) (Or(8|16|32|64) <t> (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (And(8|16|32|64) <t> x y)) @@ -337,6 +342,12 @@ (OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1]))) (OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1]))) +// single bit difference: ( x != c && x != d ) -> ( x|(c^d) != c ) +(AndB (Neq(64|32|16|8) x cv:(Const(64|32|16|8) [c])) (Neq(64|32|16|8) x (Const(64|32|16|8) [d]))) && c|d == c && oneBit(c^d) => (Neq(64|32|16|8) (Or(64|32|16|8) <x.Type> x (Const(64|32|16|8) <x.Type> [c^d])) cv) + +// single bit difference: ( x == c || x == d ) -> ( x|(c^d) == c ) +(OrB (Eq(64|32|16|8) x cv:(Const(64|32|16|8) [c])) (Eq(64|32|16|8) x (Const(64|32|16|8) [d]))) && c|d == c && oneBit(c^d) => (Eq(64|32|16|8) (Or(64|32|16|8) <x.Type> x (Const(64|32|16|8) <x.Type> [c^d])) cv) + // NaN check: ( x != x || x (>|>=|<|<=) c ) -> ( !(c (>=|>|<=|<) x) ) (OrB (Neq64F x x) ((Less|Leq)64F x y:(Const64F [c]))) => (Not ((Leq|Less)64F y x)) (OrB (Neq64F x x) ((Less|Leq)64F y:(Const64F [c]) x)) => (Not ((Leq|Less)64F x y)) @@ -933,8 +944,10 @@ @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem) // Putting struct{*byte} and similar into direct interfaces. -(IMake _typ (StructMake val)) => (IMake _typ val) -(StructSelect [0] (IData x)) => (IData x) +(IMake _typ (StructMake ___)) => imakeOfStructMake(v) +(StructSelect (IData x)) && v.Type.Size() > 0 => (IData x) +(StructSelect (IData x)) && v.Type.Size() == 0 && v.Type.IsStruct() => (StructMake) +(StructSelect (IData x)) && v.Type.Size() == 0 && v.Type.IsArray() => (ArrayMake0) // un-SSAable values use mem->mem copies (Store {t} dst (Load src mem) mem) && !CanSSA(t) => @@ -2222,4 +2235,4 @@ (Neq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => x (Neq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [1])) => (Not x) (Eq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [1])) => x -(Eq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => (Not x)
\ No newline at end of file +(Eq(64|32|16) (SignExt8to(64|32|16) (CvtBoolToUint8 x)) (Const(64|32|16) [0])) => (Not x) diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index c1726b2797..1a2985d5af 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -426,7 +426,14 @@ func (x *expandState) decomposeAsNecessary(pos src.XPos, b *Block, a, m0 *Value, if a.Op == OpIMake { data := a.Args[1] for data.Op == OpStructMake || data.Op == OpArrayMake1 { - data = data.Args[0] + // A struct make might have a few zero-sized fields. + // Use the pointer-y one we know is there. + for _, a := range data.Args { + if a.Type.Size() > 0 { + data = a + break + } + } } return x.decomposeAsNecessary(pos, b, data, mem, rc.next(data.Type)) } diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go index 0cee91b532..e95064c1df 100644 --- a/src/cmd/compile/internal/ssa/fuse.go +++ b/src/cmd/compile/internal/ssa/fuse.go @@ -10,7 +10,9 @@ import ( ) // fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeNanCheck). -func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeNanCheck) } +func fuseEarly(f *Func) { + fuse(f, fuseTypePlain|fuseTypeIntInRange|fuseTypeSingleBitDifference|fuseTypeNanCheck) +} // fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect). func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect) } @@ -21,6 +23,7 @@ const ( fuseTypePlain fuseType = 1 << iota fuseTypeIf fuseTypeIntInRange + fuseTypeSingleBitDifference fuseTypeNanCheck fuseTypeBranchRedirect fuseTypeShortCircuit @@ -41,6 +44,9 @@ func fuse(f *Func, typ fuseType) { if typ&fuseTypeIntInRange != 0 { changed = fuseIntInRange(b) || changed } + if typ&fuseTypeSingleBitDifference != 0 { + changed = fuseSingleBitDifference(b) || changed + } if typ&fuseTypeNanCheck != 0 { changed = fuseNanCheck(b) || changed } diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go index b6eb8fcb90..898c034485 100644 --- a/src/cmd/compile/internal/ssa/fuse_comparisons.go +++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go @@ -19,6 +19,14 @@ func fuseNanCheck(b *Block) bool { return fuseComparisons(b, canOptNanCheck) } +// fuseSingleBitDifference replaces the short-circuit operators between equality checks with +// constants that only differ by a single bit. For example, it would convert +// `if x == 4 || x == 6 { ... }` into `if (x == 4) | (x == 6) { ... }`. Rewrite rules can +// then optimize these using a bitwise operation, in this case generating `if x|2 == 6 { ... }`. +func fuseSingleBitDifference(b *Block) bool { + return fuseComparisons(b, canOptSingleBitDifference) +} + // fuseComparisons looks for control graphs that match this pattern: // // p - predecessor @@ -229,3 +237,40 @@ func canOptNanCheck(x, y *Value, op Op) bool { } return false } + +// canOptSingleBitDifference returns true if x op y matches either: +// +// v == c || v == d +// v != c && v != d +// +// Where c and d are constant values that differ by a single bit. +func canOptSingleBitDifference(x, y *Value, op Op) bool { + if x.Op != y.Op { + return false + } + switch x.Op { + case OpEq64, OpEq32, OpEq16, OpEq8: + if op != OpOrB { + return false + } + case OpNeq64, OpNeq32, OpNeq16, OpNeq8: + if op != OpAndB { + return false + } + default: + return false + } + + xi := getConstIntArgIndex(x) + if xi < 0 { + return false + } + yi := getConstIntArgIndex(y) + if yi < 0 { + return false + } + if x.Args[xi^1] != y.Args[yi^1] { + return false + } + return oneBit(x.Args[xi].AuxInt ^ y.Args[yi].AuxInt) +} diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 9c5d79fa56..ea5491362f 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -11481,7 +11481,7 @@ var opcodeTable = [...]opInfo{ reg: regInfo{ inputs: []inputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 - {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 + {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 }, outputs: []outputInfo{ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15 @@ -68770,6 +68770,7 @@ var opcodeTable = [...]opInfo{ inputs: []inputInfo{ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, + clobbers: 2305843009213693952, // F31 clobbersArg0: true, }, }, diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 4919d6ad37..d4e7ed14b1 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -466,57 +466,56 @@ func (ft *factsTable) initLimitForNewValue(v *Value) { // signedMin records the fact that we know v is at least // min in the signed domain. -func (ft *factsTable) signedMin(v *Value, min int64) bool { - return ft.newLimit(v, limit{min: min, max: math.MaxInt64, umin: 0, umax: math.MaxUint64}) +func (ft *factsTable) signedMin(v *Value, min int64) { + ft.newLimit(v, limit{min: min, max: math.MaxInt64, umin: 0, umax: math.MaxUint64}) } // signedMax records the fact that we know v is at most // max in the signed domain. -func (ft *factsTable) signedMax(v *Value, max int64) bool { - return ft.newLimit(v, limit{min: math.MinInt64, max: max, umin: 0, umax: math.MaxUint64}) +func (ft *factsTable) signedMax(v *Value, max int64) { + ft.newLimit(v, limit{min: math.MinInt64, max: max, umin: 0, umax: math.MaxUint64}) } -func (ft *factsTable) signedMinMax(v *Value, min, max int64) bool { - return ft.newLimit(v, limit{min: min, max: max, umin: 0, umax: math.MaxUint64}) +func (ft *factsTable) signedMinMax(v *Value, min, max int64) { + ft.newLimit(v, limit{min: min, max: max, umin: 0, umax: math.MaxUint64}) } // setNonNegative records the fact that v is known to be non-negative. -func (ft *factsTable) setNonNegative(v *Value) bool { - return ft.signedMin(v, 0) +func (ft *factsTable) setNonNegative(v *Value) { + ft.signedMin(v, 0) } // unsignedMin records the fact that we know v is at least // min in the unsigned domain. -func (ft *factsTable) unsignedMin(v *Value, min uint64) bool { - return ft.newLimit(v, limit{min: math.MinInt64, max: math.MaxInt64, umin: min, umax: math.MaxUint64}) +func (ft *factsTable) unsignedMin(v *Value, min uint64) { + ft.newLimit(v, limit{min: math.MinInt64, max: math.MaxInt64, umin: min, umax: math.MaxUint64}) } // unsignedMax records the fact that we know v is at most // max in the unsigned domain. -func (ft *factsTable) unsignedMax(v *Value, max uint64) bool { - return ft.newLimit(v, limit{min: math.MinInt64, max: math.MaxInt64, umin: 0, umax: max}) +func (ft *factsTable) unsignedMax(v *Value, max uint64) { + ft.newLimit(v, limit{min: math.MinInt64, max: math.MaxInt64, umin: 0, umax: max}) } -func (ft *factsTable) unsignedMinMax(v *Value, min, max uint64) bool { - return ft.newLimit(v, limit{min: math.MinInt64, max: math.MaxInt64, umin: min, umax: max}) +func (ft *factsTable) unsignedMinMax(v *Value, min, max uint64) { + ft.newLimit(v, limit{min: math.MinInt64, max: math.MaxInt64, umin: min, umax: max}) } -func (ft *factsTable) booleanFalse(v *Value) bool { - return ft.newLimit(v, limit{min: 0, max: 0, umin: 0, umax: 0}) +func (ft *factsTable) booleanFalse(v *Value) { + ft.newLimit(v, limit{min: 0, max: 0, umin: 0, umax: 0}) } -func (ft *factsTable) booleanTrue(v *Value) bool { - return ft.newLimit(v, limit{min: 1, max: 1, umin: 1, umax: 1}) +func (ft *factsTable) booleanTrue(v *Value) { + ft.newLimit(v, limit{min: 1, max: 1, umin: 1, umax: 1}) } -func (ft *factsTable) pointerNil(v *Value) bool { - return ft.newLimit(v, limit{min: 0, max: 0, umin: 0, umax: 0}) +func (ft *factsTable) pointerNil(v *Value) { + ft.newLimit(v, limit{min: 0, max: 0, umin: 0, umax: 0}) } -func (ft *factsTable) pointerNonNil(v *Value) bool { +func (ft *factsTable) pointerNonNil(v *Value) { l := noLimit l.umin = 1 - return ft.newLimit(v, l) + ft.newLimit(v, l) } // newLimit adds new limiting information for v. -// Returns true if the new limit added any new information. -func (ft *factsTable) newLimit(v *Value, newLim limit) bool { +func (ft *factsTable) newLimit(v *Value, newLim limit) { oldLim := ft.limits[v.ID] // Merge old and new information. @@ -531,13 +530,12 @@ func (ft *factsTable) newLimit(v *Value, newLim limit) bool { } if lim == oldLim { - return false // nothing new to record + return // nothing new to record } if lim.unsat() { - r := !ft.unsat ft.unsat = true - return r + return } // Check for recursion. This normally happens because in unsatisfiable @@ -548,7 +546,7 @@ func (ft *factsTable) newLimit(v *Value, newLim limit) bool { // the posets will not notice. if ft.recurseCheck[v.ID] { // This should only happen for unsatisfiable cases. TODO: check - return false + return } ft.recurseCheck[v.ID] = true defer func() { @@ -713,8 +711,6 @@ func (ft *factsTable) newLimit(v *Value, newLim limit) bool { } } } - - return true } func (ft *factsTable) addOrdering(v, w *Value, d domain, r relation) { @@ -1825,7 +1821,7 @@ func initLimit(v *Value) limit { return lim } -// flowLimit updates the known limits of v in ft. Returns true if anything changed. +// flowLimit updates the known limits of v in ft. // flowLimit can use the ranges of input arguments. // // Note: this calculation only happens at the point the value is defined. We do not reevaluate @@ -1838,10 +1834,10 @@ func initLimit(v *Value) limit { // block. We could recompute the range of v once we enter the block so // we know that it is 0 <= v <= 8, but we don't have a mechanism to do // that right now. -func (ft *factsTable) flowLimit(v *Value) bool { +func (ft *factsTable) flowLimit(v *Value) { if !v.Type.IsInteger() { // TODO: boolean? - return false + return } // Additional limits based on opcode and argument. @@ -1851,36 +1847,36 @@ func (ft *factsTable) flowLimit(v *Value) bool { // extensions case OpZeroExt8to64, OpZeroExt8to32, OpZeroExt8to16, OpZeroExt16to64, OpZeroExt16to32, OpZeroExt32to64: a := ft.limits[v.Args[0].ID] - return ft.unsignedMinMax(v, a.umin, a.umax) + ft.unsignedMinMax(v, a.umin, a.umax) case OpSignExt8to64, OpSignExt8to32, OpSignExt8to16, OpSignExt16to64, OpSignExt16to32, OpSignExt32to64: a := ft.limits[v.Args[0].ID] - return ft.signedMinMax(v, a.min, a.max) + ft.signedMinMax(v, a.min, a.max) case OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpTrunc32to8, OpTrunc32to16, OpTrunc16to8: a := ft.limits[v.Args[0].ID] if a.umax <= 1<<(uint64(v.Type.Size())*8)-1 { - return ft.unsignedMinMax(v, a.umin, a.umax) + ft.unsignedMinMax(v, a.umin, a.umax) } // math/bits case OpCtz64: a := ft.limits[v.Args[0].ID] if a.nonzero() { - return ft.unsignedMax(v, uint64(bits.Len64(a.umax)-1)) + ft.unsignedMax(v, uint64(bits.Len64(a.umax)-1)) } case OpCtz32: a := ft.limits[v.Args[0].ID] if a.nonzero() { - return ft.unsignedMax(v, uint64(bits.Len32(uint32(a.umax))-1)) + ft.unsignedMax(v, uint64(bits.Len32(uint32(a.umax))-1)) } case OpCtz16: a := ft.limits[v.Args[0].ID] if a.nonzero() { - return ft.unsignedMax(v, uint64(bits.Len16(uint16(a.umax))-1)) + ft.unsignedMax(v, uint64(bits.Len16(uint16(a.umax))-1)) } case OpCtz8: a := ft.limits[v.Args[0].ID] if a.nonzero() { - return ft.unsignedMax(v, uint64(bits.Len8(uint8(a.umax))-1)) + ft.unsignedMax(v, uint64(bits.Len8(uint8(a.umax))-1)) } case OpPopCount64, OpPopCount32, OpPopCount16, OpPopCount8: @@ -1889,26 +1885,26 @@ func (ft *factsTable) flowLimit(v *Value) bool { sharedLeadingMask := ^(uint64(1)<<changingBitsCount - 1) fixedBits := a.umax & sharedLeadingMask min := uint64(bits.OnesCount64(fixedBits)) - return ft.unsignedMinMax(v, min, min+changingBitsCount) + ft.unsignedMinMax(v, min, min+changingBitsCount) case OpBitLen64: a := ft.limits[v.Args[0].ID] - return ft.unsignedMinMax(v, + ft.unsignedMinMax(v, uint64(bits.Len64(a.umin)), uint64(bits.Len64(a.umax))) case OpBitLen32: a := ft.limits[v.Args[0].ID] - return ft.unsignedMinMax(v, + ft.unsignedMinMax(v, uint64(bits.Len32(uint32(a.umin))), uint64(bits.Len32(uint32(a.umax)))) case OpBitLen16: a := ft.limits[v.Args[0].ID] - return ft.unsignedMinMax(v, + ft.unsignedMinMax(v, uint64(bits.Len16(uint16(a.umin))), uint64(bits.Len16(uint16(a.umax)))) case OpBitLen8: a := ft.limits[v.Args[0].ID] - return ft.unsignedMinMax(v, + ft.unsignedMinMax(v, uint64(bits.Len8(uint8(a.umin))), uint64(bits.Len8(uint8(a.umax)))) @@ -1921,43 +1917,43 @@ func (ft *factsTable) flowLimit(v *Value) bool { // AND can only make the value smaller. a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] - return ft.unsignedMax(v, min(a.umax, b.umax)) + ft.unsignedMax(v, min(a.umax, b.umax)) case OpOr64, OpOr32, OpOr16, OpOr8: // OR can only make the value bigger and can't flip bits proved to be zero in both inputs. a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] - return ft.unsignedMinMax(v, + ft.unsignedMinMax(v, max(a.umin, b.umin), 1<<bits.Len64(a.umax|b.umax)-1) case OpXor64, OpXor32, OpXor16, OpXor8: // XOR can't flip bits that are proved to be zero in both inputs. a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] - return ft.unsignedMax(v, 1<<bits.Len64(a.umax|b.umax)-1) + ft.unsignedMax(v, 1<<bits.Len64(a.umax|b.umax)-1) case OpCom64, OpCom32, OpCom16, OpCom8: a := ft.limits[v.Args[0].ID] - return ft.newLimit(v, a.com(uint(v.Type.Size())*8)) + ft.newLimit(v, a.com(uint(v.Type.Size())*8)) // Arithmetic. case OpAdd64, OpAdd32, OpAdd16, OpAdd8: a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] - return ft.newLimit(v, a.add(b, uint(v.Type.Size())*8)) + ft.newLimit(v, a.add(b, uint(v.Type.Size())*8)) case OpSub64, OpSub32, OpSub16, OpSub8: a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] - sub := ft.newLimit(v, a.sub(b, uint(v.Type.Size())*8)) - mod := ft.detectMod(v) - inferred := ft.detectSliceLenRelation(v) - return sub || mod || inferred + ft.newLimit(v, a.sub(b, uint(v.Type.Size())*8)) + ft.detectMod(v) + ft.detectSliceLenRelation(v) + ft.detectSubRelations(v) case OpNeg64, OpNeg32, OpNeg16, OpNeg8: a := ft.limits[v.Args[0].ID] bitsize := uint(v.Type.Size()) * 8 - return ft.newLimit(v, a.com(bitsize).add(limit{min: 1, max: 1, umin: 1, umax: 1}, bitsize)) + ft.newLimit(v, a.com(bitsize).add(limit{min: 1, max: 1, umin: 1, umax: 1}, bitsize)) case OpMul64, OpMul32, OpMul16, OpMul8: a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] - return ft.newLimit(v, a.mul(b, uint(v.Type.Size())*8)) + ft.newLimit(v, a.mul(b, uint(v.Type.Size())*8)) case OpLsh64x64, OpLsh64x32, OpLsh64x16, OpLsh64x8, OpLsh32x64, OpLsh32x32, OpLsh32x16, OpLsh32x8, OpLsh16x64, OpLsh16x32, OpLsh16x16, OpLsh16x8, @@ -1965,7 +1961,7 @@ func (ft *factsTable) flowLimit(v *Value) bool { a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] bitsize := uint(v.Type.Size()) * 8 - return ft.newLimit(v, a.mul(b.exp2(bitsize), bitsize)) + ft.newLimit(v, a.mul(b.exp2(bitsize), bitsize)) case OpRsh64x64, OpRsh64x32, OpRsh64x16, OpRsh64x8, OpRsh32x64, OpRsh32x32, OpRsh32x16, OpRsh32x8, OpRsh16x64, OpRsh16x32, OpRsh16x16, OpRsh16x8, @@ -1979,7 +1975,7 @@ func (ft *factsTable) flowLimit(v *Value) bool { // Easier to compute min and max of both than to write sign logic. vmin := min(a.min>>b.min, a.min>>b.max) vmax := max(a.max>>b.min, a.max>>b.max) - return ft.signedMinMax(v, vmin, vmax) + ft.signedMinMax(v, vmin, vmax) } case OpRsh64Ux64, OpRsh64Ux32, OpRsh64Ux16, OpRsh64Ux8, OpRsh32Ux64, OpRsh32Ux32, OpRsh32Ux16, OpRsh32Ux8, @@ -1988,7 +1984,7 @@ func (ft *factsTable) flowLimit(v *Value) bool { a := ft.limits[v.Args[0].ID] b := ft.limits[v.Args[1].ID] if b.min >= 0 { - return ft.unsignedMinMax(v, a.umin>>b.max, a.umax>>b.min) + ft.unsignedMinMax(v, a.umin>>b.max, a.umax>>b.min) } case OpDiv64, OpDiv32, OpDiv16, OpDiv8: a := ft.limits[v.Args[0].ID] @@ -2008,11 +2004,11 @@ func (ft *factsTable) flowLimit(v *Value) bool { if b.umin > 0 { lim = lim.unsignedMax(a.umax / b.umin) } - return ft.newLimit(v, lim) + ft.newLimit(v, lim) case OpMod64, OpMod32, OpMod16, OpMod8: - return ft.modLimit(true, v, v.Args[0], v.Args[1]) + ft.modLimit(true, v, v.Args[0], v.Args[1]) case OpMod64u, OpMod32u, OpMod16u, OpMod8u: - return ft.modLimit(false, v, v.Args[0], v.Args[1]) + ft.modLimit(false, v, v.Args[0], v.Args[1]) case OpPhi: // Compute the union of all the input phis. @@ -2032,9 +2028,8 @@ func (ft *factsTable) flowLimit(v *Value) bool { l.umin = min(l.umin, l2.umin) l.umax = max(l.umax, l2.umax) } - return ft.newLimit(v, l) + ft.newLimit(v, l) } - return false } // detectSliceLenRelation matches the pattern where @@ -2047,13 +2042,13 @@ func (ft *factsTable) flowLimit(v *Value) bool { // // Note that "index" is not useed for indexing in this pattern, but // in the motivating example (chunked slice iteration) it is. -func (ft *factsTable) detectSliceLenRelation(v *Value) (inferred bool) { +func (ft *factsTable) detectSliceLenRelation(v *Value) { if v.Op != OpSub64 { - return false + return } if !(v.Args[0].Op == OpSliceLen || v.Args[0].Op == OpSliceCap) { - return false + return } slice := v.Args[0].Args[0] @@ -2093,13 +2088,54 @@ func (ft *factsTable) detectSliceLenRelation(v *Value) (inferred bool) { if K < 0 { // We hate thinking about overflow continue } - inferred = inferred || ft.signedMin(v, K) + ft.signedMin(v, K) + } +} + +// v must be Sub{64,32,16,8}. +func (ft *factsTable) detectSubRelations(v *Value) { + // v = x-y + x := v.Args[0] + y := v.Args[1] + if x == y { + ft.signedMinMax(v, 0, 0) + return + } + xLim := ft.limits[x.ID] + yLim := ft.limits[y.ID] + + // Check if we might wrap around. If so, give up. + width := uint(v.Type.Size()) * 8 + if _, ok := safeSub(xLim.min, yLim.max, width); !ok { + return // x-y might underflow + } + if _, ok := safeSub(xLim.max, yLim.min, width); !ok { + return // x-y might overflow + } + + // Subtracting a positive number only makes + // things smaller. + if yLim.min >= 0 { + ft.update(v.Block, v, x, signed, lt|eq) + // TODO: is this worth it? + //if yLim.min > 0 { + // ft.update(v.Block, v, x, signed, lt) + //} + } + + // Subtracting a number from a bigger one + // can't go below 0. + if ft.orderS.OrderedOrEqual(y, x) { + ft.setNonNegative(v) + // TODO: is this worth it? + //if ft.orderS.Ordered(y, x) { + // ft.signedMin(v, 1) + //} } - return inferred } // x%d has been rewritten to x - (x/d)*d. -func (ft *factsTable) detectMod(v *Value) bool { +func (ft *factsTable) detectMod(v *Value) { var opDiv, opDivU, opMul, opConst Op switch v.Op { case OpSub64: @@ -2126,36 +2162,37 @@ func (ft *factsTable) detectMod(v *Value) bool { mul := v.Args[1] if mul.Op != opMul { - return false + return } div, con := mul.Args[0], mul.Args[1] if div.Op == opConst { div, con = con, div } if con.Op != opConst || (div.Op != opDiv && div.Op != opDivU) || div.Args[0] != v.Args[0] || div.Args[1].Op != opConst || div.Args[1].AuxInt != con.AuxInt { - return false + return } - return ft.modLimit(div.Op == opDiv, v, v.Args[0], con) + ft.modLimit(div.Op == opDiv, v, v.Args[0], con) } // modLimit sets v with facts derived from v = p % q. -func (ft *factsTable) modLimit(signed bool, v, p, q *Value) bool { +func (ft *factsTable) modLimit(signed bool, v, p, q *Value) { a := ft.limits[p.ID] b := ft.limits[q.ID] if signed { if a.min < 0 && b.min > 0 { - return ft.signedMinMax(v, -(b.max - 1), b.max-1) + ft.signedMinMax(v, -(b.max - 1), b.max-1) + return } if !(a.nonnegative() && b.nonnegative()) { // TODO: we could handle signed limits but I didn't bother. - return false + return } if a.min >= 0 && b.min > 0 { ft.setNonNegative(v) } } // Underflow in the arithmetic below is ok, it gives to MaxUint64 which does nothing to the limit. - return ft.unsignedMax(v, min(a.umax, b.umax-1)) + ft.unsignedMax(v, min(a.umax, b.umax-1)) } // getBranch returns the range restrictions added by p @@ -2466,15 +2503,13 @@ func addLocalFacts(ft *factsTable, b *Block) { xl := ft.limits[x.ID] y := add.Args[1] yl := ft.limits[y.ID] - if unsignedAddOverflows(xl.umax, yl.umax, add.Type) { - continue - } - - if xl.umax < uminDivisor { - ft.update(b, v, y, unsigned, lt|eq) - } - if yl.umax < uminDivisor { - ft.update(b, v, x, unsigned, lt|eq) + if !unsignedAddOverflows(xl.umax, yl.umax, add.Type) { + if xl.umax < uminDivisor { + ft.update(b, v, y, unsigned, lt|eq) + } + if yl.umax < uminDivisor { + ft.update(b, v, x, unsigned, lt|eq) + } } } ft.update(b, v, v.Args[0], unsigned, lt|eq) @@ -2993,16 +3028,14 @@ func (ft *factsTable) topoSortValuesInBlock(b *Block) { want := f.NumValues() scores := ft.reusedTopoSortScoresTable - if len(scores) < want { - if want <= cap(scores) { - scores = scores[:want] - } else { - if cap(scores) > 0 { - f.Cache.freeUintSlice(scores) - } - scores = f.Cache.allocUintSlice(want) - ft.reusedTopoSortScoresTable = scores + if want <= cap(scores) { + scores = scores[:want] + } else { + if cap(scores) > 0 { + f.Cache.freeUintSlice(scores) } + scores = f.Cache.allocUintSlice(want) + ft.reusedTopoSortScoresTable = scores } for _, v := range b.Values { diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 4d022555b7..11dd53bfc7 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -596,17 +596,18 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos var c *Value if vi.regs != 0 { // Copy from a register that v is already in. - r2 := pickReg(vi.regs) var current *Value - if !s.allocatable.contains(r2) { - current = v // v is in a fixed register + if vi.regs&^s.allocatable != 0 { + // v is in a fixed register, prefer that + current = v } else { + r2 := pickReg(vi.regs) if s.regs[r2].v != v { panic("bad register state") } current = s.regs[r2].c + s.usedSinceBlockStart |= regMask(1) << r2 } - s.usedSinceBlockStart |= regMask(1) << r2 c = s.curBlock.NewValue1(pos, OpCopy, v.Type, current) } else if v.rematerializeable() { // Rematerialize instead of loading from the spill location. diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 07308973b1..af2568ae89 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -2772,3 +2772,17 @@ func panicBoundsCCToAux(p PanicBoundsCC) Aux { func isDictArgSym(sym Sym) bool { return sym.(*ir.Name).Sym().Name == typecheck.LocalDictName } + +// When v is (IMake typ (StructMake ...)), convert to +// (IMake typ arg) where arg is the pointer-y argument to +// the StructMake (there must be exactly one). +func imakeOfStructMake(v *Value) *Value { + var arg *Value + for _, a := range v.Args[1].Args { + if a.Type.Size() > 0 { + arg = a + break + } + } + return v.Block.NewValue2(v.Pos, OpIMake, v.Type, v.Args[0], arg) +} diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 6af1558833..b3f790dbda 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -12556,6 +12556,54 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { } break } + // match: (MUL r:(MOVWUreg x) s:(MOVWUreg y)) + // cond: r.Uses == 1 && s.Uses == 1 + // result: (UMULL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r := v_0 + if r.Op != OpARM64MOVWUreg { + continue + } + x := r.Args[0] + s := v_1 + if s.Op != OpARM64MOVWUreg { + continue + } + y := s.Args[0] + if !(r.Uses == 1 && s.Uses == 1) { + continue + } + v.reset(OpARM64UMULL) + v.AddArg2(x, y) + return true + } + break + } + // match: (MUL r:(MOVWreg x) s:(MOVWreg y)) + // cond: r.Uses == 1 && s.Uses == 1 + // result: (MULL x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + r := v_0 + if r.Op != OpARM64MOVWreg { + continue + } + x := r.Args[0] + s := v_1 + if s.Op != OpARM64MOVWreg { + continue + } + y := s.Args[0] + if !(r.Uses == 1 && s.Uses == 1) { + continue + } + v.reset(OpARM64MULL) + v.AddArg2(x, y) + return true + } + break + } return false } func rewriteValueARM64_OpARM64MULW(v *Value) bool { @@ -25273,6 +25321,37 @@ func rewriteBlockARM64(b *Block) bool { b.resetWithControl(BlockARM64FGE, cc) return true } + // match: (TBNZ [0] (XORconst [1] x) yes no) + // result: (TBZ [0] x yes no) + for b.Controls[0].Op == OpARM64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64TBZ, x) + b.AuxInt = int64ToAuxInt(0) + return true + } + case BlockARM64TBZ: + // match: (TBZ [0] (XORconst [1] x) yes no) + // result: (TBNZ [0] x yes no) + for b.Controls[0].Op == OpARM64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + if auxIntToInt64(b.AuxInt) != 0 { + break + } + b.resetWithControl(BlockARM64TBNZ, x) + b.AuxInt = int64ToAuxInt(0) + return true + } case BlockARM64UGE: // match: (UGE (FlagConstant [fc]) yes no) // cond: fc.uge() diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go index 4262d4e0fb..bf2dd114a9 100644 --- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go +++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go @@ -5866,7 +5866,6 @@ func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool { v_0 := v.Args[0] b := v.Block config := b.Func.Config - typ := &b.Func.Config.Types // match: (MULV _ (MOVVconst [0])) // result: (MOVVconst [0]) for { @@ -5911,44 +5910,6 @@ func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool { } break } - // match: (MULV (NEGV x) (MOVVconst [c])) - // result: (MULV x (MOVVconst [-c])) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLOONG64NEGV { - continue - } - x := v_0.Args[0] - if v_1.Op != OpLOONG64MOVVconst { - continue - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpLOONG64MULV) - v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(-c) - v.AddArg2(x, v0) - return true - } - break - } - // match: (MULV (NEGV x) (NEGV y)) - // result: (MULV x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLOONG64NEGV { - continue - } - x := v_0.Args[0] - if v_1.Op != OpLOONG64NEGV { - continue - } - y := v_1.Args[0] - v.reset(OpLOONG64MULV) - v.AddArg2(x, y) - return true - } - break - } // match: (MULV (MOVVconst [c]) (MOVVconst [d])) // result: (MOVVconst [c*d]) for { diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index 191c7b3d48..284d88967b 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -7027,7 +7027,7 @@ func rewriteValueRISCV64_OpRISCV64ROL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ROL x (MOVDconst [val])) - // result: (RORI [int64(int8(-val)&63)] x) + // result: (RORI [-val&63] x) for { x := v_0 if v_1.Op != OpRISCV64MOVDconst { @@ -7035,7 +7035,7 @@ func rewriteValueRISCV64_OpRISCV64ROL(v *Value) bool { } val := auxIntToInt64(v_1.AuxInt) v.reset(OpRISCV64RORI) - v.AuxInt = int64ToAuxInt(int64(int8(-val) & 63)) + v.AuxInt = int64ToAuxInt(-val & 63) v.AddArg(x) return true } @@ -7057,7 +7057,7 @@ func rewriteValueRISCV64_OpRISCV64ROLW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ROLW x (MOVDconst [val])) - // result: (RORIW [int64(int8(-val)&31)] x) + // result: (RORIW [-val&31] x) for { x := v_0 if v_1.Op != OpRISCV64MOVDconst { @@ -7065,7 +7065,7 @@ func rewriteValueRISCV64_OpRISCV64ROLW(v *Value) bool { } val := auxIntToInt64(v_1.AuxInt) v.reset(OpRISCV64RORIW) - v.AuxInt = int64ToAuxInt(int64(int8(-val) & 31)) + v.AuxInt = int64ToAuxInt(-val & 31) v.AddArg(x) return true } @@ -7087,7 +7087,7 @@ func rewriteValueRISCV64_OpRISCV64ROR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ROR x (MOVDconst [val])) - // result: (RORI [int64(val&63)] x) + // result: (RORI [val&63] x) for { x := v_0 if v_1.Op != OpRISCV64MOVDconst { @@ -7095,7 +7095,7 @@ func rewriteValueRISCV64_OpRISCV64ROR(v *Value) bool { } val := auxIntToInt64(v_1.AuxInt) v.reset(OpRISCV64RORI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) + v.AuxInt = int64ToAuxInt(val & 63) v.AddArg(x) return true } @@ -7105,7 +7105,7 @@ func rewriteValueRISCV64_OpRISCV64RORW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (RORW x (MOVDconst [val])) - // result: (RORIW [int64(val&31)] x) + // result: (RORIW [val&31] x) for { x := v_0 if v_1.Op != OpRISCV64MOVDconst { @@ -7113,7 +7113,7 @@ func rewriteValueRISCV64_OpRISCV64RORW(v *Value) bool { } val := auxIntToInt64(v_1.AuxInt) v.reset(OpRISCV64RORIW) - v.AuxInt = int64ToAuxInt(int64(val & 31)) + v.AuxInt = int64ToAuxInt(val & 31) v.AddArg(x) return true } @@ -7212,7 +7212,7 @@ func rewriteValueRISCV64_OpRISCV64SLL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SLL x (MOVDconst [val])) - // result: (SLLI [int64(val&63)] x) + // result: (SLLI [val&63] x) for { x := v_0 if v_1.Op != OpRISCV64MOVDconst { @@ -7220,7 +7220,7 @@ func rewriteValueRISCV64_OpRISCV64SLL(v *Value) bool { } val := auxIntToInt64(v_1.AuxInt) v.reset(OpRISCV64SLLI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) + v.AuxInt = int64ToAuxInt(val & 63) v.AddArg(x) return true } @@ -7246,7 +7246,7 @@ func rewriteValueRISCV64_OpRISCV64SLLI(v *Value) bool { } // match: (SLLI <t> [c] (ADD x x)) // cond: c < t.Size() * 8 - 1 - // result: (SLLI <t> [c+1] x) + // result: (SLLI [c+1] x) for { t := v.Type c := auxIntToInt64(v.AuxInt) @@ -7258,7 +7258,6 @@ func rewriteValueRISCV64_OpRISCV64SLLI(v *Value) bool { break } v.reset(OpRISCV64SLLI) - v.Type = t v.AuxInt = int64ToAuxInt(c + 1) v.AddArg(x) return true @@ -7286,7 +7285,7 @@ func rewriteValueRISCV64_OpRISCV64SLLW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SLLW x (MOVDconst [val])) - // result: (SLLIW [int64(val&31)] x) + // result: (SLLIW [val&31] x) for { x := v_0 if v_1.Op != OpRISCV64MOVDconst { @@ -7294,7 +7293,7 @@ func rewriteValueRISCV64_OpRISCV64SLLW(v *Value) bool { } val := auxIntToInt64(v_1.AuxInt) v.reset(OpRISCV64SLLIW) - v.AuxInt = int64ToAuxInt(int64(val & 31)) + v.AuxInt = int64ToAuxInt(val & 31) v.AddArg(x) return true } @@ -7304,7 +7303,7 @@ func rewriteValueRISCV64_OpRISCV64SLT(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SLT x (MOVDconst [val])) - // cond: val >= -2048 && val <= 2047 + // cond: is12Bit(val) // result: (SLTI [val] x) for { x := v_0 @@ -7312,7 +7311,7 @@ func rewriteValueRISCV64_OpRISCV64SLT(v *Value) bool { break } val := auxIntToInt64(v_1.AuxInt) - if !(val >= -2048 && val <= 2047) { + if !(is12Bit(val)) { break } v.reset(OpRISCV64SLTI) @@ -7363,22 +7362,6 @@ func rewriteValueRISCV64_OpRISCV64SLTI(v *Value) bool { v.AuxInt = int64ToAuxInt(1) return true } - // match: (SLTI [x] (ORI [y] _)) - // cond: y >= 0 && int64(y) >= int64(x) - // result: (MOVDconst [0]) - for { - x := auxIntToInt64(v.AuxInt) - if v_0.Op != OpRISCV64ORI { - break - } - y := auxIntToInt64(v_0.AuxInt) - if !(y >= 0 && int64(y) >= int64(x)) { - break - } - v.reset(OpRISCV64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } return false } func rewriteValueRISCV64_OpRISCV64SLTIU(v *Value) bool { @@ -7433,7 +7416,7 @@ func rewriteValueRISCV64_OpRISCV64SLTU(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SLTU x (MOVDconst [val])) - // cond: val >= -2048 && val <= 2047 + // cond: is12Bit(val) // result: (SLTIU [val] x) for { x := v_0 @@ -7441,7 +7424,7 @@ func rewriteValueRISCV64_OpRISCV64SLTU(v *Value) bool { break } val := auxIntToInt64(v_1.AuxInt) - if !(val >= -2048 && val <= 2047) { + if !(is12Bit(val)) { break } v.reset(OpRISCV64SLTIU) @@ -7555,7 +7538,7 @@ func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SRA x (MOVDconst [val])) - // result: (SRAI [int64(val&63)] x) + // result: (SRAI [val&63] x) for { x := v_0 if v_1.Op != OpRISCV64MOVDconst { @@ -7563,7 +7546,7 @@ func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool { } val := auxIntToInt64(v_1.AuxInt) v.reset(OpRISCV64SRAI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) + v.AuxInt = int64ToAuxInt(val & 63) v.AddArg(x) return true } @@ -7572,11 +7555,10 @@ func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool { func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (SRAI <t> [x] (MOVWreg y)) + // match: (SRAI [x] (MOVWreg y)) // cond: x >= 0 && x <= 31 - // result: (SRAIW <t> [int64(x)] y) + // result: (SRAIW [x] y) for { - t := v.Type x := auxIntToInt64(v.AuxInt) if v_0.Op != OpRISCV64MOVWreg { break @@ -7586,8 +7568,7 @@ func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool { break } v.reset(OpRISCV64SRAIW) - v.Type = t - v.AuxInt = int64ToAuxInt(int64(x)) + v.AuxInt = int64ToAuxInt(x) v.AddArg(y) return true } @@ -7633,7 +7614,7 @@ func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool { v.AddArg(v0) return true } - // match: (SRAI <t> [x] (MOVWreg y)) + // match: (SRAI [x] (MOVWreg y)) // cond: x >= 32 // result: (SRAIW [31] y) for { @@ -7668,7 +7649,7 @@ func rewriteValueRISCV64_OpRISCV64SRAW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SRAW x (MOVDconst [val])) - // result: (SRAIW [int64(val&31)] x) + // result: (SRAIW [val&31] x) for { x := v_0 if v_1.Op != OpRISCV64MOVDconst { @@ -7676,7 +7657,7 @@ func rewriteValueRISCV64_OpRISCV64SRAW(v *Value) bool { } val := auxIntToInt64(v_1.AuxInt) v.reset(OpRISCV64SRAIW) - v.AuxInt = int64ToAuxInt(int64(val & 31)) + v.AuxInt = int64ToAuxInt(val & 31) v.AddArg(x) return true } @@ -7686,7 +7667,7 @@ func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SRL x (MOVDconst [val])) - // result: (SRLI [int64(val&63)] x) + // result: (SRLI [val&63] x) for { x := v_0 if v_1.Op != OpRISCV64MOVDconst { @@ -7694,7 +7675,7 @@ func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool { } val := auxIntToInt64(v_1.AuxInt) v.reset(OpRISCV64SRLI) - v.AuxInt = int64ToAuxInt(int64(val & 63)) + v.AuxInt = int64ToAuxInt(val & 63) v.AddArg(x) return true } @@ -7702,11 +7683,10 @@ func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool { } func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool { v_0 := v.Args[0] - // match: (SRLI <t> [x] (MOVWUreg y)) + // match: (SRLI [x] (MOVWUreg y)) // cond: x >= 0 && x <= 31 - // result: (SRLIW <t> [int64(x)] y) + // result: (SRLIW [x] y) for { - t := v.Type x := auxIntToInt64(v.AuxInt) if v_0.Op != OpRISCV64MOVWUreg { break @@ -7716,16 +7696,14 @@ func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool { break } v.reset(OpRISCV64SRLIW) - v.Type = t - v.AuxInt = int64ToAuxInt(int64(x)) + v.AuxInt = int64ToAuxInt(x) v.AddArg(y) return true } - // match: (SRLI <t> [x] (MOVBUreg y)) + // match: (SRLI [x] (MOVBUreg y)) // cond: x >= 8 - // result: (MOVDconst <t> [0]) + // result: (MOVDconst [0]) for { - t := v.Type x := auxIntToInt64(v.AuxInt) if v_0.Op != OpRISCV64MOVBUreg { break @@ -7734,15 +7712,13 @@ func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool { break } v.reset(OpRISCV64MOVDconst) - v.Type = t v.AuxInt = int64ToAuxInt(0) return true } - // match: (SRLI <t> [x] (MOVHUreg y)) + // match: (SRLI [x] (MOVHUreg y)) // cond: x >= 16 - // result: (MOVDconst <t> [0]) + // result: (MOVDconst [0]) for { - t := v.Type x := auxIntToInt64(v.AuxInt) if v_0.Op != OpRISCV64MOVHUreg { break @@ -7751,15 +7727,13 @@ func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool { break } v.reset(OpRISCV64MOVDconst) - v.Type = t v.AuxInt = int64ToAuxInt(0) return true } - // match: (SRLI <t> [x] (MOVWUreg y)) + // match: (SRLI [x] (MOVWUreg y)) // cond: x >= 32 - // result: (MOVDconst <t> [0]) + // result: (MOVDconst [0]) for { - t := v.Type x := auxIntToInt64(v.AuxInt) if v_0.Op != OpRISCV64MOVWUreg { break @@ -7768,7 +7742,6 @@ func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool { break } v.reset(OpRISCV64MOVDconst) - v.Type = t v.AuxInt = int64ToAuxInt(0) return true } @@ -7790,7 +7763,7 @@ func rewriteValueRISCV64_OpRISCV64SRLW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SRLW x (MOVDconst [val])) - // result: (SRLIW [int64(val&31)] x) + // result: (SRLIW [val&31] x) for { x := v_0 if v_1.Op != OpRISCV64MOVDconst { @@ -7798,7 +7771,7 @@ func rewriteValueRISCV64_OpRISCV64SRLW(v *Value) bool { } val := auxIntToInt64(v_1.AuxInt) v.reset(OpRISCV64SRLIW) - v.AuxInt = int64ToAuxInt(int64(val & 31)) + v.AuxInt = int64ToAuxInt(val & 31) v.AddArg(x) return true } diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index 16d0269210..c45034ead0 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -279,11 +279,20 @@ func rewriteValuedec_OpIData(v *Value) bool { func rewriteValuedec_OpIMake(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (IMake _typ (StructMake val)) + // match: (IMake _typ (StructMake ___)) + // result: imakeOfStructMake(v) + for { + if v_1.Op != OpStructMake { + break + } + v.copyOf(imakeOfStructMake(v)) + return true + } + // match: (IMake _typ (ArrayMake1 val)) // result: (IMake _typ val) for { _typ := v_0 - if v_1.Op != OpStructMake || len(v_1.Args) != 1 { + if v_1.Op != OpArrayMake1 { break } val := v_1.Args[0] @@ -839,17 +848,47 @@ func rewriteValuedec_OpStructMake(v *Value) bool { func rewriteValuedec_OpStructSelect(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (StructSelect [0] (IData x)) + // match: (StructSelect (IData x)) + // cond: v.Type.Size() > 0 // result: (IData x) for { - if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData { + if v_0.Op != OpIData { break } x := v_0.Args[0] + if !(v.Type.Size() > 0) { + break + } v.reset(OpIData) v.AddArg(x) return true } + // match: (StructSelect (IData x)) + // cond: v.Type.Size() == 0 && v.Type.IsStruct() + // result: (StructMake) + for { + if v_0.Op != OpIData { + break + } + if !(v.Type.Size() == 0 && v.Type.IsStruct()) { + break + } + v.reset(OpStructMake) + return true + } + // match: (StructSelect (IData x)) + // cond: v.Type.Size() == 0 && v.Type.IsArray() + // result: (ArrayMake0) + for { + if v_0.Op != OpIData { + break + } + if !(v.Type.Size() == 0 && v.Type.IsArray()) { + break + } + v.reset(OpArrayMake0) + return true + } // match: (StructSelect [i] x:(StructMake ___)) // result: x.Args[i] for { @@ -861,13 +900,10 @@ func rewriteValuedec_OpStructSelect(v *Value) bool { v.copyOf(x.Args[i]) return true } - // match: (StructSelect [0] x) + // match: (StructSelect x) // cond: x.Type.IsPtrShaped() // result: x for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } x := v_0 if !(x.Type.IsPtrShaped()) { break diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 5b5494f43a..5c183fc2a6 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -5332,6 +5332,182 @@ func rewriteValuegeneric_OpAndB(v *Value) bool { } break } + // match: (AndB (Neq64 x cv:(Const64 [c])) (Neq64 x (Const64 [d]))) + // cond: c|d == c && oneBit(c^d) + // result: (Neq64 (Or64 <x.Type> x (Const64 <x.Type> [c^d])) cv) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq64 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + cv := v_0_1 + if cv.Op != OpConst64 { + continue + } + c := auxIntToInt64(cv.AuxInt) + if v_1.Op != OpNeq64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 || v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c|d == c && oneBit(c^d)) { + continue + } + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpOr64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = int64ToAuxInt(c ^ d) + v0.AddArg2(x, v1) + v.AddArg2(v0, cv) + return true + } + } + } + break + } + // match: (AndB (Neq32 x cv:(Const32 [c])) (Neq32 x (Const32 [d]))) + // cond: c|d == c && oneBit(c^d) + // result: (Neq32 (Or32 <x.Type> x (Const32 <x.Type> [c^d])) cv) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + cv := v_0_1 + if cv.Op != OpConst32 { + continue + } + c := auxIntToInt32(cv.AuxInt) + if v_1.Op != OpNeq32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 || v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(c|d == c && oneBit(c^d)) { + continue + } + v.reset(OpNeq32) + v0 := b.NewValue0(v.Pos, OpOr32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int32ToAuxInt(c ^ d) + v0.AddArg2(x, v1) + v.AddArg2(v0, cv) + return true + } + } + } + break + } + // match: (AndB (Neq16 x cv:(Const16 [c])) (Neq16 x (Const16 [d]))) + // cond: c|d == c && oneBit(c^d) + // result: (Neq16 (Or16 <x.Type> x (Const16 <x.Type> [c^d])) cv) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq16 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + cv := v_0_1 + if cv.Op != OpConst16 { + continue + } + c := auxIntToInt16(cv.AuxInt) + if v_1.Op != OpNeq16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 || v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(c|d == c && oneBit(c^d)) { + continue + } + v.reset(OpNeq16) + v0 := b.NewValue0(v.Pos, OpOr16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int16ToAuxInt(c ^ d) + v0.AddArg2(x, v1) + v.AddArg2(v0, cv) + return true + } + } + } + break + } + // match: (AndB (Neq8 x cv:(Const8 [c])) (Neq8 x (Const8 [d]))) + // cond: c|d == c && oneBit(c^d) + // result: (Neq8 (Or8 <x.Type> x (Const8 <x.Type> [c^d])) cv) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeq8 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + cv := v_0_1 + if cv.Op != OpConst8 { + continue + } + c := auxIntToInt8(cv.AuxInt) + if v_1.Op != OpNeq8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 || v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(c|d == c && oneBit(c^d)) { + continue + } + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpOr8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int8ToAuxInt(c ^ d) + v0.AddArg2(x, v1) + v.AddArg2(v0, cv) + return true + } + } + } + break + } return false } func rewriteValuegeneric_OpArraySelect(v *Value) bool { @@ -8809,16 +8985,13 @@ func rewriteValuegeneric_OpFloor(v *Value) bool { func rewriteValuegeneric_OpIMake(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (IMake _typ (StructMake val)) - // result: (IMake _typ val) + // match: (IMake _typ (StructMake ___)) + // result: imakeOfStructMake(v) for { - _typ := v_0 - if v_1.Op != OpStructMake || len(v_1.Args) != 1 { + if v_1.Op != OpStructMake { break } - val := v_1.Args[0] - v.reset(OpIMake) - v.AddArg2(_typ, val) + v.copyOf(imakeOfStructMake(v)) return true } // match: (IMake _typ (ArrayMake1 val)) @@ -16610,6 +16783,45 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { } break } + // match: (Mul16 (Const16 <t> [c]) (Neg16 x)) + // result: (Mul16 x (Const16 <t> [-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpNeg16 { + continue + } + x := v_1.Args[0] + v.reset(OpMul16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(-c) + v.AddArg2(x, v0) + return true + } + break + } + // match: (Mul16 (Neg16 x) (Neg16 y)) + // result: (Mul16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeg16 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpNeg16 { + continue + } + y := v_1.Args[0] + v.reset(OpMul16) + v.AddArg2(x, y) + return true + } + break + } // match: (Mul16 (Const16 <t> [c]) (Add16 <t> (Const16 <t> [d]) x)) // cond: !isPowerOfTwo(c) // result: (Add16 (Const16 <t> [c*d]) (Mul16 <t> (Const16 <t> [c]) x)) @@ -16821,6 +17033,45 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { } break } + // match: (Mul32 (Const32 <t> [c]) (Neg32 x)) + // result: (Mul32 x (Const32 <t> [-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpNeg32 { + continue + } + x := v_1.Args[0] + v.reset(OpMul32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(-c) + v.AddArg2(x, v0) + return true + } + break + } + // match: (Mul32 (Neg32 x) (Neg32 y)) + // result: (Mul32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeg32 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpNeg32 { + continue + } + y := v_1.Args[0] + v.reset(OpMul32) + v.AddArg2(x, y) + return true + } + break + } // match: (Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x)) // cond: !isPowerOfTwo(c) // result: (Add32 (Const32 <t> [c*d]) (Mul32 <t> (Const32 <t> [c]) x)) @@ -17193,6 +17444,45 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { } break } + // match: (Mul64 (Const64 <t> [c]) (Neg64 x)) + // result: (Mul64 x (Const64 <t> [-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + t := v_0.Type + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpNeg64 { + continue + } + x := v_1.Args[0] + v.reset(OpMul64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(-c) + v.AddArg2(x, v0) + return true + } + break + } + // match: (Mul64 (Neg64 x) (Neg64 y)) + // result: (Mul64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeg64 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpNeg64 { + continue + } + y := v_1.Args[0] + v.reset(OpMul64) + v.AddArg2(x, y) + return true + } + break + } // match: (Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x)) // cond: !isPowerOfTwo(c) // result: (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x)) @@ -17565,6 +17855,45 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { } break } + // match: (Mul8 (Const8 <t> [c]) (Neg8 x)) + // result: (Mul8 x (Const8 <t> [-c])) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpNeg8 { + continue + } + x := v_1.Args[0] + v.reset(OpMul8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(-c) + v.AddArg2(x, v0) + return true + } + break + } + // match: (Mul8 (Neg8 x) (Neg8 y)) + // result: (Mul8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpNeg8 { + continue + } + x := v_0.Args[0] + if v_1.Op != OpNeg8 { + continue + } + y := v_1.Args[0] + v.reset(OpMul8) + v.AddArg2(x, y) + return true + } + break + } // match: (Mul8 (Const8 <t> [c]) (Add8 <t> (Const8 <t> [d]) x)) // cond: !isPowerOfTwo(c) // result: (Add8 (Const8 <t> [c*d]) (Mul8 <t> (Const8 <t> [c]) x)) @@ -23242,6 +23571,182 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } + // match: (OrB (Eq64 x cv:(Const64 [c])) (Eq64 x (Const64 [d]))) + // cond: c|d == c && oneBit(c^d) + // result: (Eq64 (Or64 <x.Type> x (Const64 <x.Type> [c^d])) cv) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpEq64 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + cv := v_0_1 + if cv.Op != OpConst64 { + continue + } + c := auxIntToInt64(cv.AuxInt) + if v_1.Op != OpEq64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 || v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c|d == c && oneBit(c^d)) { + continue + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpOr64, x.Type) + v1 := b.NewValue0(v.Pos, OpConst64, x.Type) + v1.AuxInt = int64ToAuxInt(c ^ d) + v0.AddArg2(x, v1) + v.AddArg2(v0, cv) + return true + } + } + } + break + } + // match: (OrB (Eq32 x cv:(Const32 [c])) (Eq32 x (Const32 [d]))) + // cond: c|d == c && oneBit(c^d) + // result: (Eq32 (Or32 <x.Type> x (Const32 <x.Type> [c^d])) cv) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpEq32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + cv := v_0_1 + if cv.Op != OpConst32 { + continue + } + c := auxIntToInt32(cv.AuxInt) + if v_1.Op != OpEq32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 || v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(c|d == c && oneBit(c^d)) { + continue + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpOr32, x.Type) + v1 := b.NewValue0(v.Pos, OpConst32, x.Type) + v1.AuxInt = int32ToAuxInt(c ^ d) + v0.AddArg2(x, v1) + v.AddArg2(v0, cv) + return true + } + } + } + break + } + // match: (OrB (Eq16 x cv:(Const16 [c])) (Eq16 x (Const16 [d]))) + // cond: c|d == c && oneBit(c^d) + // result: (Eq16 (Or16 <x.Type> x (Const16 <x.Type> [c^d])) cv) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpEq16 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + cv := v_0_1 + if cv.Op != OpConst16 { + continue + } + c := auxIntToInt16(cv.AuxInt) + if v_1.Op != OpEq16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 || v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(c|d == c && oneBit(c^d)) { + continue + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpOr16, x.Type) + v1 := b.NewValue0(v.Pos, OpConst16, x.Type) + v1.AuxInt = int16ToAuxInt(c ^ d) + v0.AddArg2(x, v1) + v.AddArg2(v0, cv) + return true + } + } + } + break + } + // match: (OrB (Eq8 x cv:(Const8 [c])) (Eq8 x (Const8 [d]))) + // cond: c|d == c && oneBit(c^d) + // result: (Eq8 (Or8 <x.Type> x (Const8 <x.Type> [c^d])) cv) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpEq8 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + cv := v_0_1 + if cv.Op != OpConst8 { + continue + } + c := auxIntToInt8(cv.AuxInt) + if v_1.Op != OpEq8 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 || v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(c|d == c && oneBit(c^d)) { + continue + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpOr8, x.Type) + v1 := b.NewValue0(v.Pos, OpConst8, x.Type) + v1.AuxInt = int8ToAuxInt(c ^ d) + v0.AddArg2(x, v1) + v.AddArg2(v0, cv) + return true + } + } + } + break + } // match: (OrB (Neq64F x x) (Less64F x y:(Const64F [c]))) // result: (Not (Leq64F y x)) for { @@ -31601,17 +32106,47 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { v0.AddArg2(v1, mem) return true } - // match: (StructSelect [0] (IData x)) + // match: (StructSelect (IData x)) + // cond: v.Type.Size() > 0 // result: (IData x) for { - if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData { + if v_0.Op != OpIData { break } x := v_0.Args[0] + if !(v.Type.Size() > 0) { + break + } v.reset(OpIData) v.AddArg(x) return true } + // match: (StructSelect (IData x)) + // cond: v.Type.Size() == 0 && v.Type.IsStruct() + // result: (StructMake) + for { + if v_0.Op != OpIData { + break + } + if !(v.Type.Size() == 0 && v.Type.IsStruct()) { + break + } + v.reset(OpStructMake) + return true + } + // match: (StructSelect (IData x)) + // cond: v.Type.Size() == 0 && v.Type.IsArray() + // result: (ArrayMake0) + for { + if v_0.Op != OpIData { + break + } + if !(v.Type.Size() == 0 && v.Type.IsArray()) { + break + } + v.reset(OpArrayMake0) + return true + } return false } func rewriteValuegeneric_OpSub16(v *Value) bool { diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index e854cd9895..e59451c773 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -124,6 +124,11 @@ func InitConfig() { ir.Syms.GCWriteBarrier[7] = typecheck.LookupRuntimeFunc("gcWriteBarrier8") ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded") ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice") + ir.Syms.GrowsliceBuf = typecheck.LookupRuntimeFunc("growsliceBuf") + ir.Syms.MoveSlice = typecheck.LookupRuntimeFunc("moveSlice") + ir.Syms.MoveSliceNoScan = typecheck.LookupRuntimeFunc("moveSliceNoScan") + ir.Syms.MoveSliceNoCap = typecheck.LookupRuntimeFunc("moveSliceNoCap") + ir.Syms.MoveSliceNoCapNoScan = typecheck.LookupRuntimeFunc("moveSliceNoCapNoScan") ir.Syms.InterfaceSwitch = typecheck.LookupRuntimeFunc("interfaceSwitch") for i := 1; i < len(ir.Syms.MallocGCSmallNoScan); i++ { ir.Syms.MallocGCSmallNoScan[i] = typecheck.LookupRuntimeFunc(fmt.Sprintf("mallocgcSmallNoScanSC%d", i)) @@ -1096,6 +1101,23 @@ type state struct { // Block starting position, indexed by block id. blockStarts []src.XPos + + // Information for stack allocation. Indexed by the first argument + // to an append call. Normally a slice-typed variable, but not always. + backingStores map[ir.Node]*backingStoreInfo +} + +type backingStoreInfo struct { + // Size of backing store array (in elements) + K int64 + // Stack-allocated backing store variable. + store *ir.Name + // Dynamic boolean variable marking the fact that we used this backing store. + used *ir.Name + // Have we used this variable statically yet? This is just a hint + // to avoid checking the dynamic variable if the answer is obvious. + // (usedStatic == true implies used == true) + usedStatic bool } type funcLine struct { @@ -3683,6 +3705,9 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value { case ir.OAPPEND: return s.append(n.(*ir.CallExpr), false) + case ir.OMOVE2HEAP: + return s.move2heap(n.(*ir.MoveToHeapExpr)) + case ir.OMIN, ir.OMAX: return s.minMax(n.(*ir.CallExpr)) @@ -3744,6 +3769,68 @@ func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa. return addr } +// Get backing store information for an append call. +func (s *state) getBackingStoreInfoForAppend(n *ir.CallExpr) *backingStoreInfo { + if n.Esc() != ir.EscNone { + return nil + } + return s.getBackingStoreInfo(n.Args[0]) +} +func (s *state) getBackingStoreInfo(n ir.Node) *backingStoreInfo { + t := n.Type() + et := t.Elem() + maxStackSize := int64(base.Debug.VariableMakeThreshold) + if et.Size() == 0 || et.Size() > maxStackSize { + return nil + } + if base.Flag.N != 0 { + return nil + } + if !base.VariableMakeHash.MatchPos(n.Pos(), nil) { + return nil + } + i := s.backingStores[n] + if i != nil { + return i + } + + // Build type of backing store. + K := maxStackSize / et.Size() // rounds down + KT := types.NewArray(et, K) + KT.SetNoalg(true) + types.CalcArraySize(KT) + // Align more than naturally for the type KT. See issue 73199. + align := types.NewArray(types.Types[types.TUINTPTR], 0) + types.CalcArraySize(align) + storeTyp := types.NewStruct([]*types.Field{ + {Sym: types.BlankSym, Type: align}, + {Sym: types.BlankSym, Type: KT}, + }) + storeTyp.SetNoalg(true) + types.CalcStructSize(storeTyp) + + // Make backing store variable. + backingStore := typecheck.TempAt(n.Pos(), s.curfn, storeTyp) + backingStore.SetAddrtaken(true) + + // Make "used" boolean. + used := typecheck.TempAt(n.Pos(), s.curfn, types.Types[types.TBOOL]) + if s.curBlock == s.f.Entry { + s.vars[used] = s.constBool(false) + } else { + // initialize this variable at end of entry block + s.defvars[s.f.Entry.ID][used] = s.constBool(false) + } + + // Initialize an info structure. + if s.backingStores == nil { + s.backingStores = map[ir.Node]*backingStoreInfo{} + } + i = &backingStoreInfo{K: K, store: backingStore, used: used, usedStatic: false} + s.backingStores[n] = i + return i +} + // append converts an OAPPEND node to SSA. // If inplace is false, it converts the OAPPEND expression n to an ssa.Value, // adds it to s, and returns the Value. @@ -3834,9 +3921,29 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { // A stack-allocated backing store could be used at every // append that qualifies, but we limit it in some cases to // avoid wasted code and stack space. - // TODO: handle ... append case. - maxStackSize := int64(base.Debug.VariableMakeThreshold) - if !inplace && n.Esc() == ir.EscNone && et.Size() > 0 && et.Size() <= maxStackSize && base.Flag.N == 0 && base.VariableMakeHash.MatchPos(n.Pos(), nil) && !s.appendTargets[sn] { + // + // Note that we have two different strategies. + // 1. The standard strategy is just to allocate the full + // backing store at the first append. + // 2. An alternate strategy is used when + // a. The backing store eventually escapes via move2heap + // and b. The capacity is used somehow + // In this case, we don't want to just allocate + // the full buffer at the first append, because when + // we move2heap the buffer to the heap when it escapes, + // we might end up wasting memory because we can't + // change the capacity. + // So in this case we use growsliceBuf to reuse the buffer + // and walk one step up the size class ladder each time. + // + // TODO: handle ... append case? Currently we handle only + // a fixed number of appended elements. + var info *backingStoreInfo + if !inplace { + info = s.getBackingStoreInfoForAppend(n) + } + + if !inplace && info != nil && !n.UseBuf && !info.usedStatic { // if l <= K { // if !used { // if oldLen == 0 { @@ -3860,43 +3967,19 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { // It is ok to do it more often, but it is probably helpful only for // the first instance. TODO: this could use more tuning. Using ir.Node // as the key works for *ir.Name instances but probably nothing else. - if s.appendTargets == nil { - s.appendTargets = map[ir.Node]bool{} - } - s.appendTargets[sn] = true - - K := maxStackSize / et.Size() // rounds down - KT := types.NewArray(et, K) - KT.SetNoalg(true) - types.CalcArraySize(KT) - // Align more than naturally for the type KT. See issue 73199. - align := types.NewArray(types.Types[types.TUINTPTR], 0) - types.CalcArraySize(align) - storeTyp := types.NewStruct([]*types.Field{ - {Sym: types.BlankSym, Type: align}, - {Sym: types.BlankSym, Type: KT}, - }) - storeTyp.SetNoalg(true) - types.CalcStructSize(storeTyp) + info.usedStatic = true + // TODO: unset usedStatic somehow? usedTestBlock := s.f.NewBlock(ssa.BlockPlain) oldLenTestBlock := s.f.NewBlock(ssa.BlockPlain) bodyBlock := s.f.NewBlock(ssa.BlockPlain) growSlice := s.f.NewBlock(ssa.BlockPlain) - - // Make "used" boolean. - tBool := types.Types[types.TBOOL] - used := typecheck.TempAt(n.Pos(), s.curfn, tBool) - s.defvars[s.f.Entry.ID][used] = s.constBool(false) // initialize this variable at fn entry - - // Make backing store variable. tInt := types.Types[types.TINT] - backingStore := typecheck.TempAt(n.Pos(), s.curfn, storeTyp) - backingStore.SetAddrtaken(true) + tBool := types.Types[types.TBOOL] // if l <= K s.startBlock(grow) - kTest := s.newValue2(s.ssaOp(ir.OLE, tInt), tBool, l, s.constInt(tInt, K)) + kTest := s.newValue2(s.ssaOp(ir.OLE, tInt), tBool, l, s.constInt(tInt, info.K)) b := s.endBlock() b.Kind = ssa.BlockIf b.SetControl(kTest) @@ -3906,7 +3989,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { // if !used s.startBlock(usedTestBlock) - usedTest := s.newValue1(ssa.OpNot, tBool, s.expr(used)) + usedTest := s.newValue1(ssa.OpNot, tBool, s.expr(info.used)) b = s.endBlock() b.Kind = ssa.BlockIf b.SetControl(usedTest) @@ -3927,18 +4010,18 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { // var store struct { _ [0]uintptr; arr [K]T } s.startBlock(bodyBlock) if et.HasPointers() { - s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, backingStore, s.mem()) + s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, info.store, s.mem()) } - addr := s.addr(backingStore) - s.zero(storeTyp, addr) + addr := s.addr(info.store) + s.zero(info.store.Type(), addr) // s = store.arr[:l:K] s.vars[ptrVar] = addr s.vars[lenVar] = l // nargs would also be ok because of the oldLen==0 test. - s.vars[capVar] = s.constInt(tInt, K) + s.vars[capVar] = s.constInt(tInt, info.K) // used = true - s.assign(used, s.constBool(true), false, 0) + s.assign(info.used, s.constBool(true), false, 0) b = s.endBlock() b.AddEdgeTo(assign) @@ -3949,7 +4032,25 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { // Call growslice s.startBlock(grow) taddr := s.expr(n.Fun) - r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr) + var r []*ssa.Value + if info != nil && n.UseBuf { + // Use stack-allocated buffer as backing store, if we can. + if et.HasPointers() && !info.usedStatic { + // Initialize in the function header. Not the best place, + // but it makes sure we don't scan this area before it is + // initialized. + mem := s.defvars[s.f.Entry.ID][memVar] + mem = s.f.Entry.NewValue1A(n.Pos(), ssa.OpVarDef, types.TypeMem, info.store, mem) + addr := s.f.Entry.NewValue2A(n.Pos(), ssa.OpLocalAddr, types.NewPtr(info.store.Type()), info.store, s.sp, mem) + mem = s.f.Entry.NewValue2I(n.Pos(), ssa.OpZero, types.TypeMem, info.store.Type().Size(), addr, mem) + mem.Aux = info.store.Type() + s.defvars[s.f.Entry.ID][memVar] = mem + info.usedStatic = true + } + r = s.rtcall(ir.Syms.GrowsliceBuf, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr, s.addr(info.store), s.constInt(types.Types[types.TINT], info.K)) + } else { + r = s.rtcall(ir.Syms.Growslice, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr) + } // Decompose output slice p = s.newValue1(ssa.OpSlicePtr, pt, r[0]) @@ -4036,6 +4137,95 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value { return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c) } +func (s *state) move2heap(n *ir.MoveToHeapExpr) *ssa.Value { + // s := n.Slice + // if s.ptr points to current stack frame { + // s2 := make([]T, s.len, s.cap) + // copy(s2[:cap], s[:cap]) + // s = s2 + // } + // return s + + slice := s.expr(n.Slice) + et := slice.Type.Elem() + pt := types.NewPtr(et) + + info := s.getBackingStoreInfo(n) + if info == nil { + // Backing store will never be stack allocated, so + // move2heap is a no-op. + return slice + } + + // Decomposse input slice. + p := s.newValue1(ssa.OpSlicePtr, pt, slice) + l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice) + c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice) + + moveBlock := s.f.NewBlock(ssa.BlockPlain) + mergeBlock := s.f.NewBlock(ssa.BlockPlain) + + s.vars[ptrVar] = p + s.vars[lenVar] = l + s.vars[capVar] = c + + // Decide if we need to move the slice backing store. + // It needs to be moved if it is currently on the stack. + sub := ssa.OpSub64 + less := ssa.OpLess64U + if s.config.PtrSize == 4 { + sub = ssa.OpSub32 + less = ssa.OpLess32U + } + callerSP := s.newValue1(ssa.OpGetCallerSP, types.Types[types.TUINTPTR], s.mem()) + frameSize := s.newValue2(sub, types.Types[types.TUINTPTR], callerSP, s.sp) + pInt := s.newValue2(ssa.OpConvert, types.Types[types.TUINTPTR], p, s.mem()) + off := s.newValue2(sub, types.Types[types.TUINTPTR], pInt, s.sp) + cond := s.newValue2(less, types.Types[types.TBOOL], off, frameSize) + + b := s.endBlock() + b.Kind = ssa.BlockIf + b.Likely = ssa.BranchUnlikely // fast path is to not have to call into runtime + b.SetControl(cond) + b.AddEdgeTo(moveBlock) + b.AddEdgeTo(mergeBlock) + + // Move the slice to heap + s.startBlock(moveBlock) + var newSlice *ssa.Value + if et.HasPointers() { + typ := s.expr(n.RType) + if n.PreserveCapacity { + newSlice = s.rtcall(ir.Syms.MoveSlice, true, []*types.Type{slice.Type}, typ, p, l, c)[0] + } else { + newSlice = s.rtcall(ir.Syms.MoveSliceNoCap, true, []*types.Type{slice.Type}, typ, p, l)[0] + } + } else { + elemSize := s.constInt(types.Types[types.TUINTPTR], et.Size()) + if n.PreserveCapacity { + newSlice = s.rtcall(ir.Syms.MoveSliceNoScan, true, []*types.Type{slice.Type}, elemSize, p, l, c)[0] + } else { + newSlice = s.rtcall(ir.Syms.MoveSliceNoCapNoScan, true, []*types.Type{slice.Type}, elemSize, p, l)[0] + } + } + // Decompose output slice + s.vars[ptrVar] = s.newValue1(ssa.OpSlicePtr, pt, newSlice) + s.vars[lenVar] = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], newSlice) + s.vars[capVar] = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], newSlice) + b = s.endBlock() + b.AddEdgeTo(mergeBlock) + + // Merge fast path (no moving) and slow path (moved) + s.startBlock(mergeBlock) + p = s.variable(ptrVar, pt) // generates phi for ptr + l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len + c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap + delete(s.vars, ptrVar) + delete(s.vars, lenVar) + delete(s.vars, capVar) + return s.newValue3(ssa.OpSliceMake, slice.Type, p, l, c) +} + // minMax converts an OMIN/OMAX builtin call into SSA. func (s *state) minMax(n *ir.CallExpr) *ssa.Value { // The OMIN/OMAX builtin is variadic, but its semantics are diff --git a/src/cmd/compile/internal/test/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go index 8984cd3e26..34ac73c068 100644 --- a/src/cmd/compile/internal/test/testdata/arith_test.go +++ b/src/cmd/compile/internal/test/testdata/arith_test.go @@ -445,6 +445,19 @@ func testBitwiseRshU_ssa(a uint32, b, c uint32) uint32 { } //go:noinline +func orLt_ssa(x int) bool { + y := x - x + return (x | 2) < y +} + +// test riscv64 SLTI rules +func testSetIfLessThan(t *testing.T) { + if want, got := true, orLt_ssa(-7); got != want { + t.Errorf("orLt_ssa(-7) = %t want %t", got, want) + } +} + +//go:noinline func testShiftCX_ssa() int { v1 := uint8(3) v4 := (v1 * v1) ^ v1 | v1 - v1 - v1&v1 ^ uint8(3+2) + v1*1>>0 - v1 | 1 | v1<<(2*3|0-0*0^1) @@ -977,6 +990,7 @@ func TestArithmetic(t *testing.T) { testRegallocCVSpill(t) testSubqToNegq(t) testBitwiseLogic(t) + testSetIfLessThan(t) testOcom(t) testLrot(t) testShiftCX(t) diff --git a/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/src/cmd/compile/internal/typecheck/_builtin/runtime.go index 3c9707252e..3e2324b528 100644 --- a/src/cmd/compile/internal/typecheck/_builtin/runtime.go +++ b/src/cmd/compile/internal/typecheck/_builtin/runtime.go @@ -195,6 +195,7 @@ func makeslice(typ *byte, len int, cap int) unsafe.Pointer func makeslice64(typ *byte, len int64, cap int64) unsafe.Pointer func makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer func growslice(oldPtr *any, newLen, oldCap, num int, et *byte) (ary []any) +func growsliceBuf(oldPtr *any, newLen, oldCap, num int, et *byte, buf *any, bufLen int) (ary []any) func unsafeslicecheckptr(typ *byte, ptr unsafe.Pointer, len int64) func panicunsafeslicelen() func panicunsafeslicenilptr() @@ -202,6 +203,11 @@ func unsafestringcheckptr(ptr unsafe.Pointer, len int64) func panicunsafestringlen() func panicunsafestringnilptr() +func moveSlice(typ *byte, old *byte, len, cap int) (*byte, int, int) +func moveSliceNoScan(elemSize uintptr, old *byte, len, cap int) (*byte, int, int) +func moveSliceNoCap(typ *byte, old *byte, len int) (*byte, int, int) +func moveSliceNoCapNoScan(elemSize uintptr, old *byte, len int) (*byte, int, int) + func memmove(to *any, frm *any, length uintptr) func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) func memclrHasPointers(ptr unsafe.Pointer, n uintptr) diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go index eea7fd7d05..537de2cbe9 100644 --- a/src/cmd/compile/internal/typecheck/builtin.go +++ b/src/cmd/compile/internal/typecheck/builtin.go @@ -160,80 +160,85 @@ var runtimeDecls = [...]struct { {"makeslice64", funcTag, 124}, {"makeslicecopy", funcTag, 125}, {"growslice", funcTag, 127}, - {"unsafeslicecheckptr", funcTag, 128}, + {"growsliceBuf", funcTag, 128}, + {"unsafeslicecheckptr", funcTag, 129}, {"panicunsafeslicelen", funcTag, 9}, {"panicunsafeslicenilptr", funcTag, 9}, - {"unsafestringcheckptr", funcTag, 129}, + {"unsafestringcheckptr", funcTag, 130}, {"panicunsafestringlen", funcTag, 9}, {"panicunsafestringnilptr", funcTag, 9}, - {"memmove", funcTag, 130}, - {"memclrNoHeapPointers", funcTag, 131}, - {"memclrHasPointers", funcTag, 131}, - {"memequal", funcTag, 132}, - {"memequal0", funcTag, 133}, - {"memequal8", funcTag, 133}, - {"memequal16", funcTag, 133}, - {"memequal32", funcTag, 133}, - {"memequal64", funcTag, 133}, - {"memequal128", funcTag, 133}, - {"f32equal", funcTag, 134}, - {"f64equal", funcTag, 134}, - {"c64equal", funcTag, 134}, - {"c128equal", funcTag, 134}, - {"strequal", funcTag, 134}, - {"interequal", funcTag, 134}, - {"nilinterequal", funcTag, 134}, - {"memhash", funcTag, 135}, - {"memhash0", funcTag, 136}, - {"memhash8", funcTag, 136}, - {"memhash16", funcTag, 136}, - {"memhash32", funcTag, 136}, - {"memhash64", funcTag, 136}, - {"memhash128", funcTag, 136}, - {"f32hash", funcTag, 137}, - {"f64hash", funcTag, 137}, - {"c64hash", funcTag, 137}, - {"c128hash", funcTag, 137}, - {"strhash", funcTag, 137}, - {"interhash", funcTag, 137}, - {"nilinterhash", funcTag, 137}, - {"int64div", funcTag, 138}, - {"uint64div", funcTag, 139}, - {"int64mod", funcTag, 138}, - {"uint64mod", funcTag, 139}, - {"float64toint64", funcTag, 140}, - {"float64touint64", funcTag, 141}, - {"float64touint32", funcTag, 142}, - {"int64tofloat64", funcTag, 143}, - {"int64tofloat32", funcTag, 144}, - {"uint64tofloat64", funcTag, 145}, - {"uint64tofloat32", funcTag, 146}, - {"uint32tofloat64", funcTag, 147}, - {"complex128div", funcTag, 148}, + {"moveSlice", funcTag, 131}, + {"moveSliceNoScan", funcTag, 132}, + {"moveSliceNoCap", funcTag, 133}, + {"moveSliceNoCapNoScan", funcTag, 134}, + {"memmove", funcTag, 135}, + {"memclrNoHeapPointers", funcTag, 136}, + {"memclrHasPointers", funcTag, 136}, + {"memequal", funcTag, 137}, + {"memequal0", funcTag, 138}, + {"memequal8", funcTag, 138}, + {"memequal16", funcTag, 138}, + {"memequal32", funcTag, 138}, + {"memequal64", funcTag, 138}, + {"memequal128", funcTag, 138}, + {"f32equal", funcTag, 139}, + {"f64equal", funcTag, 139}, + {"c64equal", funcTag, 139}, + {"c128equal", funcTag, 139}, + {"strequal", funcTag, 139}, + {"interequal", funcTag, 139}, + {"nilinterequal", funcTag, 139}, + {"memhash", funcTag, 140}, + {"memhash0", funcTag, 141}, + {"memhash8", funcTag, 141}, + {"memhash16", funcTag, 141}, + {"memhash32", funcTag, 141}, + {"memhash64", funcTag, 141}, + {"memhash128", funcTag, 141}, + {"f32hash", funcTag, 142}, + {"f64hash", funcTag, 142}, + {"c64hash", funcTag, 142}, + {"c128hash", funcTag, 142}, + {"strhash", funcTag, 142}, + {"interhash", funcTag, 142}, + {"nilinterhash", funcTag, 142}, + {"int64div", funcTag, 143}, + {"uint64div", funcTag, 144}, + {"int64mod", funcTag, 143}, + {"uint64mod", funcTag, 144}, + {"float64toint64", funcTag, 145}, + {"float64touint64", funcTag, 146}, + {"float64touint32", funcTag, 147}, + {"int64tofloat64", funcTag, 148}, + {"int64tofloat32", funcTag, 149}, + {"uint64tofloat64", funcTag, 150}, + {"uint64tofloat32", funcTag, 151}, + {"uint32tofloat64", funcTag, 152}, + {"complex128div", funcTag, 153}, {"racefuncenter", funcTag, 33}, {"racefuncexit", funcTag, 9}, {"raceread", funcTag, 33}, {"racewrite", funcTag, 33}, - {"racereadrange", funcTag, 149}, - {"racewriterange", funcTag, 149}, - {"msanread", funcTag, 149}, - {"msanwrite", funcTag, 149}, - {"msanmove", funcTag, 150}, - {"asanread", funcTag, 149}, - {"asanwrite", funcTag, 149}, - {"checkptrAlignment", funcTag, 151}, - {"checkptrArithmetic", funcTag, 153}, - {"libfuzzerTraceCmp1", funcTag, 154}, - {"libfuzzerTraceCmp2", funcTag, 155}, - {"libfuzzerTraceCmp4", funcTag, 156}, - {"libfuzzerTraceCmp8", funcTag, 157}, - {"libfuzzerTraceConstCmp1", funcTag, 154}, - {"libfuzzerTraceConstCmp2", funcTag, 155}, - {"libfuzzerTraceConstCmp4", funcTag, 156}, - {"libfuzzerTraceConstCmp8", funcTag, 157}, - {"libfuzzerHookStrCmp", funcTag, 158}, - {"libfuzzerHookEqualFold", funcTag, 158}, - {"addCovMeta", funcTag, 160}, + {"racereadrange", funcTag, 154}, + {"racewriterange", funcTag, 154}, + {"msanread", funcTag, 154}, + {"msanwrite", funcTag, 154}, + {"msanmove", funcTag, 155}, + {"asanread", funcTag, 154}, + {"asanwrite", funcTag, 154}, + {"checkptrAlignment", funcTag, 156}, + {"checkptrArithmetic", funcTag, 158}, + {"libfuzzerTraceCmp1", funcTag, 159}, + {"libfuzzerTraceCmp2", funcTag, 160}, + {"libfuzzerTraceCmp4", funcTag, 161}, + {"libfuzzerTraceCmp8", funcTag, 162}, + {"libfuzzerTraceConstCmp1", funcTag, 159}, + {"libfuzzerTraceConstCmp2", funcTag, 160}, + {"libfuzzerTraceConstCmp4", funcTag, 161}, + {"libfuzzerTraceConstCmp8", funcTag, 162}, + {"libfuzzerHookStrCmp", funcTag, 163}, + {"libfuzzerHookEqualFold", funcTag, 163}, + {"addCovMeta", funcTag, 165}, {"x86HasAVX", varTag, 6}, {"x86HasFMA", varTag, 6}, {"x86HasPOPCNT", varTag, 6}, @@ -244,11 +249,11 @@ var runtimeDecls = [...]struct { {"loong64HasLAM_BH", varTag, 6}, {"loong64HasLSX", varTag, 6}, {"riscv64HasZbb", varTag, 6}, - {"asanregisterglobals", funcTag, 131}, + {"asanregisterglobals", funcTag, 136}, } func runtimeTypes() []*types.Type { - var typs [161]*types.Type + var typs [166]*types.Type typs[0] = types.ByteType typs[1] = types.NewPtr(typs[0]) typs[2] = types.Types[types.TANY] @@ -377,39 +382,44 @@ func runtimeTypes() []*types.Type { typs[125] = newSig(params(typs[1], typs[13], typs[13], typs[7]), params(typs[7])) typs[126] = types.NewSlice(typs[2]) typs[127] = newSig(params(typs[3], typs[13], typs[13], typs[13], typs[1]), params(typs[126])) - typs[128] = newSig(params(typs[1], typs[7], typs[22]), nil) - typs[129] = newSig(params(typs[7], typs[22]), nil) - typs[130] = newSig(params(typs[3], typs[3], typs[5]), nil) - typs[131] = newSig(params(typs[7], typs[5]), nil) - typs[132] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6])) - typs[133] = newSig(params(typs[3], typs[3]), params(typs[6])) - typs[134] = newSig(params(typs[7], typs[7]), params(typs[6])) - typs[135] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5])) - typs[136] = newSig(params(typs[7], typs[5]), params(typs[5])) - typs[137] = newSig(params(typs[3], typs[5]), params(typs[5])) - typs[138] = newSig(params(typs[22], typs[22]), params(typs[22])) - typs[139] = newSig(params(typs[24], typs[24]), params(typs[24])) - typs[140] = newSig(params(typs[18]), params(typs[22])) - typs[141] = newSig(params(typs[18]), params(typs[24])) - typs[142] = newSig(params(typs[18]), params(typs[67])) - typs[143] = newSig(params(typs[22]), params(typs[18])) - typs[144] = newSig(params(typs[22]), params(typs[20])) - typs[145] = newSig(params(typs[24]), params(typs[18])) - typs[146] = newSig(params(typs[24]), params(typs[20])) - typs[147] = newSig(params(typs[67]), params(typs[18])) - typs[148] = newSig(params(typs[26], typs[26]), params(typs[26])) - typs[149] = newSig(params(typs[5], typs[5]), nil) - typs[150] = newSig(params(typs[5], typs[5], typs[5]), nil) - typs[151] = newSig(params(typs[7], typs[1], typs[5]), nil) - typs[152] = types.NewSlice(typs[7]) - typs[153] = newSig(params(typs[7], typs[152]), nil) - typs[154] = newSig(params(typs[71], typs[71], typs[15]), nil) - typs[155] = newSig(params(typs[65], typs[65], typs[15]), nil) - typs[156] = newSig(params(typs[67], typs[67], typs[15]), nil) - typs[157] = newSig(params(typs[24], typs[24], typs[15]), nil) - typs[158] = newSig(params(typs[30], typs[30], typs[15]), nil) - typs[159] = types.NewArray(typs[0], 16) - typs[160] = newSig(params(typs[7], typs[67], typs[159], typs[30], typs[13], typs[71], typs[71]), params(typs[67])) + typs[128] = newSig(params(typs[3], typs[13], typs[13], typs[13], typs[1], typs[3], typs[13]), params(typs[126])) + typs[129] = newSig(params(typs[1], typs[7], typs[22]), nil) + typs[130] = newSig(params(typs[7], typs[22]), nil) + typs[131] = newSig(params(typs[1], typs[1], typs[13], typs[13]), params(typs[1], typs[13], typs[13])) + typs[132] = newSig(params(typs[5], typs[1], typs[13], typs[13]), params(typs[1], typs[13], typs[13])) + typs[133] = newSig(params(typs[1], typs[1], typs[13]), params(typs[1], typs[13], typs[13])) + typs[134] = newSig(params(typs[5], typs[1], typs[13]), params(typs[1], typs[13], typs[13])) + typs[135] = newSig(params(typs[3], typs[3], typs[5]), nil) + typs[136] = newSig(params(typs[7], typs[5]), nil) + typs[137] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6])) + typs[138] = newSig(params(typs[3], typs[3]), params(typs[6])) + typs[139] = newSig(params(typs[7], typs[7]), params(typs[6])) + typs[140] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5])) + typs[141] = newSig(params(typs[7], typs[5]), params(typs[5])) + typs[142] = newSig(params(typs[3], typs[5]), params(typs[5])) + typs[143] = newSig(params(typs[22], typs[22]), params(typs[22])) + typs[144] = newSig(params(typs[24], typs[24]), params(typs[24])) + typs[145] = newSig(params(typs[18]), params(typs[22])) + typs[146] = newSig(params(typs[18]), params(typs[24])) + typs[147] = newSig(params(typs[18]), params(typs[67])) + typs[148] = newSig(params(typs[22]), params(typs[18])) + typs[149] = newSig(params(typs[22]), params(typs[20])) + typs[150] = newSig(params(typs[24]), params(typs[18])) + typs[151] = newSig(params(typs[24]), params(typs[20])) + typs[152] = newSig(params(typs[67]), params(typs[18])) + typs[153] = newSig(params(typs[26], typs[26]), params(typs[26])) + typs[154] = newSig(params(typs[5], typs[5]), nil) + typs[155] = newSig(params(typs[5], typs[5], typs[5]), nil) + typs[156] = newSig(params(typs[7], typs[1], typs[5]), nil) + typs[157] = types.NewSlice(typs[7]) + typs[158] = newSig(params(typs[7], typs[157]), nil) + typs[159] = newSig(params(typs[71], typs[71], typs[15]), nil) + typs[160] = newSig(params(typs[65], typs[65], typs[15]), nil) + typs[161] = newSig(params(typs[67], typs[67], typs[15]), nil) + typs[162] = newSig(params(typs[24], typs[24], typs[15]), nil) + typs[163] = newSig(params(typs[30], typs[30], typs[15]), nil) + typs[164] = types.NewArray(typs[0], 16) + typs[165] = newSig(params(typs[7], typs[67], typs[164], typs[30], typs[13], typs[71], typs[71]), params(typs[67])) return typs[:] } diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index e8aca90081..6663c49dd8 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -1842,26 +1842,7 @@ func IsReflexive(t *Type) bool { // Can this type be stored directly in an interface word? // Yes, if the representation is a single pointer. func IsDirectIface(t *Type) bool { - switch t.Kind() { - case TPTR: - // Pointers to notinheap types must be stored indirectly. See issue 42076. - return !t.Elem().NotInHeap() - case TCHAN, - TMAP, - TFUNC, - TUNSAFEPTR: - return true - - case TARRAY: - // Array of 1 direct iface type can be direct. - return t.NumElem() == 1 && IsDirectIface(t.Elem()) - - case TSTRUCT: - // Struct with 1 field of direct iface type can be direct. - return t.NumFields() == 1 && IsDirectIface(t.Field(0).Type) - } - - return false + return t.Size() == int64(PtrSize) && PtrDataSize(t) == int64(PtrSize) } // IsInterfaceMethod reports whether (field) m is diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go index 25cda4f73d..42218b4caf 100644 --- a/src/cmd/compile/internal/types2/check.go +++ b/src/cmd/compile/internal/types2/check.go @@ -171,12 +171,13 @@ type Checker struct { usedPkgNames map[*PkgName]bool // set of used package names mono monoGraph // graph for detecting non-monomorphizable instantiation loops - firstErr error // first error encountered - methods map[*TypeName][]*Func // maps package scope type names to associated non-blank (non-interface) methods - untyped map[syntax.Expr]exprInfo // map of expressions without final type - delayed []action // stack of delayed action segments; segments are processed in FIFO order - objPath []Object // path of object dependencies during type inference (for cycle reporting) - cleaners []cleaner // list of types that may need a final cleanup at the end of type-checking + firstErr error // first error encountered + methods map[*TypeName][]*Func // maps package scope type names to associated non-blank (non-interface) methods + untyped map[syntax.Expr]exprInfo // map of expressions without final type + delayed []action // stack of delayed action segments; segments are processed in FIFO order + objPath []Object // path of object dependencies during type-checking (for cycle reporting) + objPathIdx map[Object]int // map of object to object path index during type-checking (for cycle reporting) + cleaners []cleaner // list of types that may need a final cleanup at the end of type-checking // environment within which the current object is type-checked (valid only // for the duration of type-checking a specific object) @@ -248,19 +249,22 @@ func (check *Checker) later(f func()) *action { return &check.delayed[i] } -// push pushes obj onto the object path and returns its index in the path. -func (check *Checker) push(obj Object) int { +// push pushes obj onto the object path and records its index in the path index map. +func (check *Checker) push(obj Object) { + if check.objPathIdx == nil { + check.objPathIdx = make(map[Object]int) + } + check.objPathIdx[obj] = len(check.objPath) check.objPath = append(check.objPath, obj) - return len(check.objPath) - 1 } -// pop pops and returns the topmost object from the object path. -func (check *Checker) pop() Object { +// pop pops an object from the object path and removes it from the path index map. +func (check *Checker) pop() { i := len(check.objPath) - 1 obj := check.objPath[i] - check.objPath[i] = nil + check.objPath[i] = nil // help the garbage collector check.objPath = check.objPath[:i] - return obj + delete(check.objPathIdx, obj) } type cleaner interface { @@ -319,6 +323,7 @@ func (check *Checker) initFiles(files []*syntax.File) { check.untyped = nil check.delayed = nil check.objPath = nil + check.objPathIdx = nil check.cleaners = nil // We must initialize usedVars and usedPkgNames both here and in NewChecker, diff --git a/src/cmd/compile/internal/types2/cycles.go b/src/cmd/compile/internal/types2/cycles.go index fa739a2b84..b916219c97 100644 --- a/src/cmd/compile/internal/types2/cycles.go +++ b/src/cmd/compile/internal/types2/cycles.go @@ -54,7 +54,6 @@ func (check *Checker) directCycle(tname *TypeName, pathIdx map[*TypeName]int) { // tname is marked grey - we have a cycle on the path beginning at start. // Mark tname as invalid. tname.setType(Typ[Invalid]) - tname.setColor(black) // collect type names on cycle var cycle []Object diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go index 91d2492a53..5cb52fdbe4 100644 --- a/src/cmd/compile/internal/types2/decl.go +++ b/src/cmd/compile/internal/types2/decl.go @@ -62,114 +62,77 @@ func (check *Checker) objDecl(obj Object, def *TypeName) { if check.indent == 0 { fmt.Println() // empty line between top-level objects for readability } - check.trace(obj.Pos(), "-- checking %s (%s, objPath = %s)", obj, obj.color(), pathString(check.objPath)) + check.trace(obj.Pos(), "-- checking %s (objPath = %s)", obj, pathString(check.objPath)) check.indent++ defer func() { check.indent-- - check.trace(obj.Pos(), "=> %s (%s)", obj, obj.color()) + check.trace(obj.Pos(), "=> %s", obj) }() } - // Checking the declaration of obj means inferring its type - // (and possibly its value, for constants). - // An object's type (and thus the object) may be in one of - // three states which are expressed by colors: + // Checking the declaration of an object means determining its type + // (and also its value for constants). An object (and thus its type) + // may be in 1 of 3 states: // - // - an object whose type is not yet known is painted white (initial color) - // - an object whose type is in the process of being inferred is painted grey - // - an object whose type is fully inferred is painted black + // - not in Checker.objPathIdx and type == nil : type is not yet known (white) + // - in Checker.objPathIdx : type is pending (grey) + // - not in Checker.objPathIdx and type != nil : type is known (black) // - // During type inference, an object's color changes from white to grey - // to black (pre-declared objects are painted black from the start). - // A black object (i.e., its type) can only depend on (refer to) other black - // ones. White and grey objects may depend on white and black objects. - // A dependency on a grey object indicates a cycle which may or may not be - // valid. + // During type-checking, an object changes from white to grey to black. + // Predeclared objects start as black (their type is known without checking). // - // When objects turn grey, they are pushed on the object path (a stack); - // they are popped again when they turn black. Thus, if a grey object (a - // cycle) is encountered, it is on the object path, and all the objects - // it depends on are the remaining objects on that path. Color encoding - // is such that the color value of a grey object indicates the index of - // that object in the object path. - - // During type-checking, white objects may be assigned a type without - // traversing through objDecl; e.g., when initializing constants and - // variables. Update the colors of those objects here (rather than - // everywhere where we set the type) to satisfy the color invariants. - if obj.color() == white && obj.Type() != nil { - obj.setColor(black) - return - } - - switch obj.color() { - case white: - assert(obj.Type() == nil) - // All color values other than white and black are considered grey. - // Because black and white are < grey, all values >= grey are grey. - // Use those values to encode the object's index into the object path. - obj.setColor(grey + color(check.push(obj))) - defer func() { - check.pop().setColor(black) - }() - - case black: - assert(obj.Type() != nil) - return - - default: - // Color values other than white or black are considered grey. - fallthrough + // A black object may only depend on (refer to) to other black objects. White + // and grey objects may depend on white or black objects. A dependency on a + // grey object indicates a (possibly invalid) cycle. + // + // When an object is marked grey, it is pushed onto the object path (a stack) + // and its index in the path is recorded in the path index map. It is popped + // and removed from the map when its type is determined (and marked black). - case grey: - // We have a (possibly invalid) cycle. - // In the existing code, this is marked by a non-nil type - // for the object except for constants and variables whose - // type may be non-nil (known), or nil if it depends on the - // not-yet known initialization value. - // In the former case, set the type to Typ[Invalid] because - // we have an initialization cycle. The cycle error will be - // reported later, when determining initialization order. - // TODO(gri) Report cycle here and simplify initialization - // order code. + // If this object is grey, we have a (possibly invalid) cycle. This is signaled + // by a non-nil type for the object, except for constants and variables whose + // type may be non-nil (known), or nil if it depends on a not-yet known + // initialization value. + // + // In the former case, set the type to Typ[Invalid] because we have an + // initialization cycle. The cycle error will be reported later, when + // determining initialization order. + // + // TODO(gri) Report cycle here and simplify initialization order code. + if _, ok := check.objPathIdx[obj]; ok { switch obj := obj.(type) { - case *Const: - if !check.validCycle(obj) || obj.typ == nil { - obj.typ = Typ[Invalid] - } - - case *Var: - if !check.validCycle(obj) || obj.typ == nil { - obj.typ = Typ[Invalid] + case *Const, *Var: + if !check.validCycle(obj) || obj.Type() == nil { + obj.setType(Typ[Invalid]) } - case *TypeName: if !check.validCycle(obj) { - // break cycle - // (without this, calling underlying() - // below may lead to an endless loop - // if we have a cycle for a defined - // (*Named) type) - obj.typ = Typ[Invalid] + obj.setType(Typ[Invalid]) } - case *Func: if !check.validCycle(obj) { - // Don't set obj.typ to Typ[Invalid] here - // because plenty of code type-asserts that - // functions have a *Signature type. Grey - // functions have their type set to an empty - // signature which makes it impossible to + // Don't set type to Typ[Invalid]; plenty of code asserts that + // functions have a *Signature type. Instead, leave the type + // as an empty signature, which makes it impossible to // initialize a variable with the function. } - default: panic("unreachable") } + assert(obj.Type() != nil) return } + if obj.Type() != nil { // black, meaning it's already type-checked + return + } + + // white, meaning it must be type-checked + + check.push(obj) + defer check.pop() + d := check.objMap[obj] if d == nil { check.dump("%v: %s should have been declared", obj.Pos(), obj) @@ -221,8 +184,8 @@ func (check *Checker) validCycle(obj Object) (valid bool) { } // Count cycle objects. - assert(obj.color() >= grey) - start := obj.color() - grey // index of obj in objPath + start, found := check.objPathIdx[obj] + assert(found) cycle := check.objPath[start:] tparCycle := false // if set, the cycle is through a type parameter list nval := 0 // number of (constant or variable) values in the cycle @@ -532,11 +495,16 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN check.collectTypeParams(&alias.tparams, tdecl.TParamList) } - rhs = check.definedType(tdecl.Type, obj) + rhs = check.declaredType(tdecl.Type, obj) assert(rhs != nil) - alias.fromRHS = rhs - unalias(alias) // populate alias.actual + + // spec: In an alias declaration the given type cannot be a type parameter declared in the same declaration." + // (see also go.dev/issue/75884, go.dev/issue/#75885) + if tpar, ok := rhs.(*TypeParam); ok && alias.tparams != nil && slices.Index(alias.tparams.list(), tpar) >= 0 { + check.error(tdecl.Type, MisplacedTypeParam, "cannot use type parameter declared in alias declaration as RHS") + alias.fromRHS = Typ[Invalid] + } } else { if !versionErr && tparam0 != nil { check.error(tdecl, UnsupportedFeature, "generic type alias requires GODEBUG=gotypesalias=1 or unset") @@ -576,7 +544,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN check.collectTypeParams(&named.tparams, tdecl.TParamList) } - rhs = check.definedType(tdecl.Type, obj) + rhs = check.declaredType(tdecl.Type, obj) assert(rhs != nil) named.fromRHS = rhs @@ -764,17 +732,8 @@ func (check *Checker) funcDecl(obj *Func, decl *declInfo) { sig := new(Signature) obj.typ = sig // guard against cycles - // Avoid cycle error when referring to method while type-checking the signature. - // This avoids a nuisance in the best case (non-parameterized receiver type) and - // since the method is not a type, we get an error. If we have a parameterized - // receiver type, instantiating the receiver type leads to the instantiation of - // its methods, and we don't want a cycle error in that case. - // TODO(gri) review if this is correct and/or whether we still need this? - saved := obj.color_ - obj.color_ = black fdecl := decl.fdecl check.funcType(sig, fdecl.Recv, fdecl.TParamList, fdecl.Type) - obj.color_ = saved // Set the scope's extent to the complete "func (...) { ... }" // so that Scope.Innermost works correctly. @@ -921,10 +880,9 @@ func (check *Checker) declStmt(list []syntax.Decl) { // the innermost containing block." scopePos := s.Name.Pos() check.declare(check.scope, s.Name, obj, scopePos) - // mark and unmark type before calling typeDecl; its type is still nil (see Checker.objDecl) - obj.setColor(grey + color(check.push(obj))) + check.push(obj) // mark as grey check.typeDecl(obj, s, nil) - check.pop().setColor(black) + check.pop() default: check.errorf(s, InvalidSyntaxTree, "unknown syntax.Decl node %T", s) diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go index ce129dbf59..dd2d415790 100644 --- a/src/cmd/compile/internal/types2/object.go +++ b/src/cmd/compile/internal/types2/object.go @@ -42,18 +42,12 @@ type Object interface { // 0 for all other objects (including objects in file scopes). order() uint32 - // color returns the object's color. - color() color - // setType sets the type of the object. setType(Type) // setOrder sets the order number of the object. It must be > 0. setOrder(uint32) - // setColor sets the object's color. It must not be white. - setColor(color color) - // setParent sets the parent scope of the object. setParent(*Scope) @@ -102,41 +96,9 @@ type object struct { name string typ Type order_ uint32 - color_ color scopePos_ syntax.Pos } -// color encodes the color of an object (see Checker.objDecl for details). -type color uint32 - -// An object may be painted in one of three colors. -// Color values other than white or black are considered grey. -const ( - white color = iota - black - grey // must be > white and black -) - -func (c color) String() string { - switch c { - case white: - return "white" - case black: - return "black" - default: - return "grey" - } -} - -// colorFor returns the (initial) color for an object depending on -// whether its type t is known or not. -func colorFor(t Type) color { - if t != nil { - return black - } - return white -} - // Parent returns the scope in which the object is declared. // The result is nil for methods and struct fields. func (obj *object) Parent() *Scope { return obj.parent } @@ -164,13 +126,11 @@ func (obj *object) Id() string { return Id(obj.pkg, obj.name) } func (obj *object) String() string { panic("abstract") } func (obj *object) order() uint32 { return obj.order_ } -func (obj *object) color() color { return obj.color_ } func (obj *object) scopePos() syntax.Pos { return obj.scopePos_ } func (obj *object) setParent(parent *Scope) { obj.parent = parent } func (obj *object) setType(typ Type) { obj.typ = typ } func (obj *object) setOrder(order uint32) { assert(order > 0); obj.order_ = order } -func (obj *object) setColor(color color) { assert(color != white); obj.color_ = color } func (obj *object) setScopePos(pos syntax.Pos) { obj.scopePos_ = pos } func (obj *object) sameId(pkg *Package, name string, foldCase bool) bool { @@ -247,7 +207,7 @@ type PkgName struct { // NewPkgName returns a new PkgName object representing an imported package. // The remaining arguments set the attributes found with all Objects. func NewPkgName(pos syntax.Pos, pkg *Package, name string, imported *Package) *PkgName { - return &PkgName{object{nil, pos, pkg, name, Typ[Invalid], 0, black, nopos}, imported} + return &PkgName{object{nil, pos, pkg, name, Typ[Invalid], 0, nopos}, imported} } // Imported returns the package that was imported. @@ -263,7 +223,7 @@ type Const struct { // NewConst returns a new constant with value val. // The remaining arguments set the attributes found with all Objects. func NewConst(pos syntax.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const { - return &Const{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, val} + return &Const{object{nil, pos, pkg, name, typ, 0, nopos}, val} } // Val returns the constant's value. @@ -288,7 +248,7 @@ type TypeName struct { // argument for NewNamed, which will set the TypeName's type as a side- // effect. func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName { - return &TypeName{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}} + return &TypeName{object{nil, pos, pkg, name, typ, 0, nopos}} } // NewTypeNameLazy returns a new defined type like NewTypeName, but it @@ -402,7 +362,7 @@ func NewField(pos syntax.Pos, pkg *Package, name string, typ Type, embedded bool // newVar returns a new variable. // The arguments set the attributes found with all Objects. func newVar(kind VarKind, pos syntax.Pos, pkg *Package, name string, typ Type) *Var { - return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, kind: kind} + return &Var{object: object{nil, pos, pkg, name, typ, 0, nopos}, kind: kind} } // Anonymous reports whether the variable is an embedded field. @@ -452,7 +412,7 @@ func NewFunc(pos syntax.Pos, pkg *Package, name string, sig *Signature) *Func { // as this would violate object.{Type,color} invariants. // TODO(adonovan): propose to disallow NewFunc with nil *Signature. } - return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, false, nil} + return &Func{object{nil, pos, pkg, name, typ, 0, nopos}, false, nil} } // Signature returns the signature (type) of the function or method. @@ -534,7 +494,7 @@ type Label struct { // NewLabel returns a new label. func NewLabel(pos syntax.Pos, pkg *Package, name string) *Label { - return &Label{object{pos: pos, pkg: pkg, name: name, typ: Typ[Invalid], color_: black}, false} + return &Label{object{pos: pos, pkg: pkg, name: name, typ: Typ[Invalid]}, false} } // A Builtin represents a built-in function. @@ -545,7 +505,7 @@ type Builtin struct { } func newBuiltin(id builtinId) *Builtin { - return &Builtin{object{name: predeclaredFuncs[id].name, typ: Typ[Invalid], color_: black}, id} + return &Builtin{object{name: predeclaredFuncs[id].name, typ: Typ[Invalid]}, id} } // Nil represents the predeclared value nil. diff --git a/src/cmd/compile/internal/types2/scope.go b/src/cmd/compile/internal/types2/scope.go index 566184df73..10c624be2e 100644 --- a/src/cmd/compile/internal/types2/scope.go +++ b/src/cmd/compile/internal/types2/scope.go @@ -217,10 +217,8 @@ func (*lazyObject) Exported() bool { panic("unreachable") } func (*lazyObject) Id() string { panic("unreachable") } func (*lazyObject) String() string { panic("unreachable") } func (*lazyObject) order() uint32 { panic("unreachable") } -func (*lazyObject) color() color { panic("unreachable") } func (*lazyObject) setType(Type) { panic("unreachable") } func (*lazyObject) setOrder(uint32) { panic("unreachable") } -func (*lazyObject) setColor(color color) { panic("unreachable") } func (*lazyObject) setParent(*Scope) { panic("unreachable") } func (*lazyObject) sameId(*Package, string, bool) bool { panic("unreachable") } func (*lazyObject) scopePos() syntax.Pos { panic("unreachable") } diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go index 092e82318a..f206c12fc3 100644 --- a/src/cmd/compile/internal/types2/sizeof_test.go +++ b/src/cmd/compile/internal/types2/sizeof_test.go @@ -36,14 +36,14 @@ func TestSizeof(t *testing.T) { {term{}, 12, 24}, // Objects - {PkgName{}, 60, 96}, - {Const{}, 64, 104}, - {TypeName{}, 56, 88}, - {Var{}, 64, 104}, - {Func{}, 64, 104}, - {Label{}, 60, 96}, - {Builtin{}, 60, 96}, - {Nil{}, 56, 88}, + {PkgName{}, 56, 96}, + {Const{}, 60, 104}, + {TypeName{}, 52, 88}, + {Var{}, 60, 104}, + {Func{}, 60, 104}, + {Label{}, 56, 96}, + {Builtin{}, 56, 96}, + {Nil{}, 52, 88}, // Misc {Scope{}, 60, 104}, diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go index 8601ce6277..303f782ac4 100644 --- a/src/cmd/compile/internal/types2/typexpr.go +++ b/src/cmd/compile/internal/types2/typexpr.go @@ -16,7 +16,7 @@ import ( // ident type-checks identifier e and initializes x with the value or type of e. // If an error occurred, x.mode is set to invalid. -// For the meaning of def, see Checker.definedType, below. +// For the meaning of def, see Checker.declaredType, below. // If wantType is set, the identifier e is expected to denote a type. func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType bool) { x.mode = invalid @@ -149,14 +149,14 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType // typ type-checks the type expression e and returns its type, or Typ[Invalid]. // The type must not be an (uninstantiated) generic type. func (check *Checker) typ(e syntax.Expr) Type { - return check.definedType(e, nil) + return check.declaredType(e, nil) } // varType type-checks the type expression e and returns its type, or Typ[Invalid]. // The type must not be an (uninstantiated) generic type and it must not be a // constraint interface. func (check *Checker) varType(e syntax.Expr) Type { - typ := check.definedType(e, nil) + typ := check.declaredType(e, nil) check.validVarType(e, typ) return typ } @@ -187,11 +187,11 @@ func (check *Checker) validVarType(e syntax.Expr, typ Type) { }).describef(e, "check var type %s", typ) } -// definedType is like typ but also accepts a type name def. -// If def != nil, e is the type specification for the type named def, declared -// in a type declaration, and def.typ.underlying will be set to the type of e -// before any components of e are type-checked. -func (check *Checker) definedType(e syntax.Expr, def *TypeName) Type { +// declaredType is like typ but also accepts a type name def. +// If def != nil, e is the type specification for the [Alias] or [Named] type +// named def, and def.typ.fromRHS will be set to the [Type] of e immediately +// after its creation. +func (check *Checker) declaredType(e syntax.Expr, def *TypeName) Type { typ := check.typInternal(e, def) assert(isTyped(typ)) if isGeneric(typ) { @@ -230,7 +230,7 @@ func goTypeName(typ Type) string { } // typInternal drives type checking of types. -// Must only be called by definedType or genericType. +// Must only be called by declaredType or genericType. func (check *Checker) typInternal(e0 syntax.Expr, def *TypeName) (T Type) { if check.conf.Trace { check.trace(e0.Pos(), "-- type %s", e0) @@ -296,7 +296,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *TypeName) (T Type) { case *syntax.ParenExpr: // Generic types must be instantiated before they can be used in any form. // Consequently, generic types cannot be parenthesized. - return check.definedType(e.X, def) + return check.declaredType(e.X, def) case *syntax.ArrayType: typ := new(Array) diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go index 332cd174f9..1ecef97d51 100644 --- a/src/cmd/compile/internal/types2/universe.go +++ b/src/cmd/compile/internal/types2/universe.go @@ -98,7 +98,6 @@ func defPredeclaredTypes() { // interface. { universeAnyNoAlias = NewTypeName(nopos, nil, "any", &Interface{complete: true, tset: &topTypeSet}) - universeAnyNoAlias.setColor(black) // ensure that the any TypeName reports a consistent Parent, after // hijacking Universe.Lookup with gotypesalias=0. universeAnyNoAlias.setParent(Universe) @@ -107,7 +106,6 @@ func defPredeclaredTypes() { // into the Universe, but we lean toward the future and insert the Alias // representation. universeAnyAlias = NewTypeName(nopos, nil, "any", nil) - universeAnyAlias.setColor(black) _ = NewAlias(universeAnyAlias, universeAnyNoAlias.Type().Underlying()) // Link TypeName and Alias def(universeAnyAlias) } @@ -115,7 +113,6 @@ func defPredeclaredTypes() { // type error interface{ Error() string } { obj := NewTypeName(nopos, nil, "error", nil) - obj.setColor(black) typ := (*Checker)(nil).newNamed(obj, nil, nil) // error.Error() string @@ -136,7 +133,6 @@ func defPredeclaredTypes() { // type comparable interface{} // marked as comparable { obj := NewTypeName(nopos, nil, "comparable", nil) - obj.setColor(black) typ := (*Checker)(nil).newNamed(obj, nil, nil) // interface{} // marked as comparable @@ -165,7 +161,7 @@ func defPredeclaredConsts() { } func defPredeclaredNil() { - def(&Nil{object{name: "nil", typ: Typ[UntypedNil], color_: black}}) + def(&Nil{object{name: "nil", typ: Typ[UntypedNil]}}) } // A builtinId is the id of a builtin function. @@ -289,7 +285,7 @@ func init() { // a scope. Objects with exported names are inserted in the unsafe package // scope; other objects are inserted in the universe scope. func def(obj Object) { - assert(obj.color() == black) + assert(obj.Type() != nil) name := obj.Name() if strings.Contains(name, " ") { return // nothing to do diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 989ae0a1db..2794671c73 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -351,6 +351,11 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { case ir.OMETHVALUE: return walkMethodValue(n.(*ir.SelectorExpr), init) + + case ir.OMOVE2HEAP: + n := n.(*ir.MoveToHeapExpr) + n.Slice = walkExpr(n.Slice, init) + return n } // No return! Each case must return (or panic), diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 42d510c34f..090b2c943f 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -6,12 +6,12 @@ require ( github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 golang.org/x/arch v0.22.1-0.20251016010524-fea4a9ec4938 golang.org/x/build v0.0.0-20250806225920-b7c66c047964 - golang.org/x/mod v0.29.0 - golang.org/x/sync v0.17.0 - golang.org/x/sys v0.37.0 - golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 + golang.org/x/mod v0.30.1-0.20251114215501-3f03020ad526 + golang.org/x/sync v0.18.0 + golang.org/x/sys v0.38.0 + golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 golang.org/x/term v0.34.0 - golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5 + golang.org/x/tools v0.39.1-0.20251114194111-59ff18ce4883 ) require ( diff --git a/src/cmd/go.sum b/src/cmd/go.sum index 0a09e6e401..e4955f224b 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -10,19 +10,19 @@ golang.org/x/arch v0.22.1-0.20251016010524-fea4a9ec4938 h1:VJ182b/ajNehMFRltVfCh golang.org/x/arch v0.22.1-0.20251016010524-fea4a9ec4938/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A= golang.org/x/build v0.0.0-20250806225920-b7c66c047964 h1:yRs1K51GKq7hsIO+YHJ8LsslrvwFceNPIv0tYjpcBd0= golang.org/x/build v0.0.0-20250806225920-b7c66c047964/go.mod h1:i9Vx7+aOQUpYJRxSO+OpRStVBCVL/9ccI51xblWm5WY= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU= -golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= +golang.org/x/mod v0.30.1-0.20251114215501-3f03020ad526 h1:LPpBM4CGUFMC47OqgAr2YIUxEUjH1Ur+D3KR/1LiuuQ= +golang.org/x/mod v0.30.1-0.20251114215501-3f03020ad526/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= -golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5 h1:cz7f45KGWAtyIrz6bm45Gc+lw8beIxBSW3EQh4Bwbg4= -golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.1-0.20251114194111-59ff18ce4883 h1:aeO0AW8d+a+5+hNQx9f4J5egD89zftrY2x42KGQjLzI= +golang.org/x/tools v0.39.1-0.20251114194111-59ff18ce4883/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef h1:mqLYrXCXYEZOop9/Dbo6RPX11539nwiCNBb1icVPmw8= rsc.io/markdown v0.0.0-20240306144322-0bf8f97ee8ef/go.mod h1:8xcPgWmwlZONN1D9bjxtHEjrUtSEa3fakVF8iaewYKQ= diff --git a/src/cmd/go/internal/cache/hash.go b/src/cmd/go/internal/cache/hash.go index 4f79c31500..27d275644a 100644 --- a/src/cmd/go/internal/cache/hash.go +++ b/src/cmd/go/internal/cache/hash.go @@ -51,6 +51,9 @@ func stripExperiment(version string) string { if i := strings.Index(version, " X:"); i >= 0 { return version[:i] } + if i := strings.Index(version, "-X:"); i >= 0 { + return version[:i] + } return version } diff --git a/src/cmd/go/internal/vcweb/script.go b/src/cmd/go/internal/vcweb/script.go index 0856c40677..8fa00b2775 100644 --- a/src/cmd/go/internal/vcweb/script.go +++ b/src/cmd/go/internal/vcweb/script.go @@ -44,7 +44,7 @@ func newScriptEngine() *script.Engine { return script.OnceCondition(summary, func() (bool, error) { return f(), nil }) } add("bzr", lazyBool("the 'bzr' executable exists and provides the standard CLI", hasWorkingBzr)) - add("git-min-vers", script.PrefixCondition("<suffix> indicates a minimum git version", hasAtLeastGitVersion)) + add("git-sha256", script.OnceCondition("the local 'git' version is recent enough to support sha256 object/commit hashes", gitSupportsSHA256)) interrupt := func(cmd *exec.Cmd) error { return cmd.Process.Signal(os.Interrupt) } gracePeriod := 30 * time.Second // arbitrary @@ -412,10 +412,14 @@ func gitVersion() (string, error) { return "v" + string(matches[1]), nil } -func hasAtLeastGitVersion(s *script.State, minVers string) (bool, error) { +func hasAtLeastGitVersion(minVers string) (bool, error) { gitVers, gitVersErr := gitVersion() if gitVersErr != nil { return false, gitVersErr } return semver.Compare(minVers, gitVers) <= 0, nil } + +func gitSupportsSHA256() (bool, error) { + return hasAtLeastGitVersion("v2.29") +} diff --git a/src/cmd/go/scriptconds_test.go b/src/cmd/go/scriptconds_test.go index c87c60ad33..4d7a9ac54b 100644 --- a/src/cmd/go/scriptconds_test.go +++ b/src/cmd/go/scriptconds_test.go @@ -44,7 +44,7 @@ func scriptConditions(t *testing.T) map[string]script.Cond { add("case-sensitive", script.OnceCondition("$WORK filesystem is case-sensitive", isCaseSensitive)) add("cc", script.PrefixCondition("go env CC = <suffix> (ignoring the go/env file)", ccIs)) add("git", lazyBool("the 'git' executable exists and provides the standard CLI", hasWorkingGit)) - add("git-min-vers", script.PrefixCondition("<suffix> indicates a minimum git version", hasAtLeastGitVersion)) + add("git-sha256", script.OnceCondition("the local 'git' version is recent enough to support sha256 object/commit hashes", gitSupportsSHA256)) add("net", script.PrefixCondition("can connect to external network host <suffix>", hasNet)) add("trimpath", script.OnceCondition("test binary was built with -trimpath", isTrimpath)) @@ -171,7 +171,7 @@ func gitVersion() (string, error) { return "v" + string(matches[1]), nil } -func hasAtLeastGitVersion(s *script.State, minVers string) (bool, error) { +func hasAtLeastGitVersion(minVers string) (bool, error) { gitVers, gitVersErr := gitVersion() if gitVersErr != nil { return false, gitVersErr @@ -179,6 +179,10 @@ func hasAtLeastGitVersion(s *script.State, minVers string) (bool, error) { return semver.Compare(minVers, gitVers) <= 0, nil } +func gitSupportsSHA256() (bool, error) { + return hasAtLeastGitVersion("v2.29") +} + func hasWorkingBzr() bool { bzr, err := exec.LookPath("bzr") if err != nil { diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README index d4f4c47af7..2b5ab6948b 100644 --- a/src/cmd/go/testdata/script/README +++ b/src/cmd/go/testdata/script/README @@ -399,8 +399,8 @@ The available conditions are: GOOS/GOARCH supports -fuzz with instrumentation [git] the 'git' executable exists and provides the standard CLI -[git-min-vers:*] - <suffix> indicates a minimum git version +[git-sha256] + the local 'git' version is recent enough to support sha256 object/commit hashes [go-builder] GO_BUILDER_NAME is non-empty [link] diff --git a/src/cmd/go/testdata/script/build_git_sha256_go_get_branch.txt b/src/cmd/go/testdata/script/build_git_sha256_go_get_branch.txt index 0773e08ea5..1e71e25f11 100644 --- a/src/cmd/go/testdata/script/build_git_sha256_go_get_branch.txt +++ b/src/cmd/go/testdata/script/build_git_sha256_go_get_branch.txt @@ -1,6 +1,6 @@ [short] skip [!git] skip -[!git-min-vers:v2.29] skip +[!git-sha256] skip env GOPRIVATE=vcs-test.golang.org diff --git a/src/cmd/go/testdata/script/build_git_sha256_moddep.txt b/src/cmd/go/testdata/script/build_git_sha256_moddep.txt index 21a296bd3d..7048e8f2e4 100644 --- a/src/cmd/go/testdata/script/build_git_sha256_moddep.txt +++ b/src/cmd/go/testdata/script/build_git_sha256_moddep.txt @@ -1,6 +1,6 @@ [short] skip [!git] skip -[!git-min-vers:v2.29] skip +[!git-sha256] skip env GOPRIVATE=vcs-test.golang.org diff --git a/src/cmd/go/testdata/script/mod_download_git_bareRepository_sha256.txt b/src/cmd/go/testdata/script/mod_download_git_bareRepository_sha256.txt index df772f5c4b..2f391e200e 100644 --- a/src/cmd/go/testdata/script/mod_download_git_bareRepository_sha256.txt +++ b/src/cmd/go/testdata/script/mod_download_git_bareRepository_sha256.txt @@ -1,6 +1,6 @@ [short] skip [!git] skip -[!git-min-vers:v2.29] skip +[!git-sha256] skip # This is a git sha256-mode copy of mod_download_git_bareRepository diff --git a/src/cmd/go/testdata/script/mod_get_direct.txt b/src/cmd/go/testdata/script/mod_get_direct.txt index 02b10ab6fd..2c89576446 100644 --- a/src/cmd/go/testdata/script/mod_get_direct.txt +++ b/src/cmd/go/testdata/script/mod_get_direct.txt @@ -2,14 +2,14 @@ # 'GOPROXY=direct go get golang.org/x/tools/gopls@master' did not correctly # resolve the pseudo-version for its dependency on golang.org/x/tools. -[!net:cloud.google.com] skip +[short] skip [!git] skip env GO111MODULE=on env GOPROXY=direct env GOSUMDB=off -go list -m cloud.google.com/go@main +go list -m vcs-test.golang.org/git/tagtests.git@master ! stdout 'v0.0.0-' -- go.mod -- diff --git a/src/cmd/go/testdata/vcstest/git/gitrepo-sha256.txt b/src/cmd/go/testdata/vcstest/git/gitrepo-sha256.txt index 15068a249e..a08e2949d3 100644 --- a/src/cmd/go/testdata/vcstest/git/gitrepo-sha256.txt +++ b/src/cmd/go/testdata/vcstest/git/gitrepo-sha256.txt @@ -1,4 +1,4 @@ -[!git-min-vers:v2.29] skip +[!git-sha256] skip handle git diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 85dca33d27..c70c1d9438 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -1153,36 +1153,37 @@ type Func interface { // Link holds the context for writing object code from a compiler // to be linker input or for reading that input into the linker. type Link struct { - Headtype objabi.HeadType - Arch *LinkArch - Debugasm int - Debugvlog bool - Debugpcln string - Flag_shared bool - Flag_dynlink bool - Flag_linkshared bool - Flag_optimize bool - Flag_locationlists bool - Flag_noRefName bool // do not include referenced symbol names in object file - Retpoline bool // emit use of retpoline stubs for indirect jmp/call - Flag_maymorestack string // If not "", call this function before stack checks - Bso *bufio.Writer - Pathname string - Pkgpath string // the current package's import path - hashmu sync.Mutex // protects hash, funchash - hash map[string]*LSym // name -> sym mapping - funchash map[string]*LSym // name -> sym mapping for ABIInternal syms - statichash map[string]*LSym // name -> sym mapping for static syms - PosTable src.PosTable - InlTree InlTree // global inlining tree used by gc/inl.go - DwFixups *DwarfFixupTable - DwTextCount int - Imports []goobj.ImportedPkg - DiagFunc func(string, ...any) - DiagFlush func() - DebugInfo func(ctxt *Link, fn *LSym, info *LSym, curfn Func) ([]dwarf.Scope, dwarf.InlCalls) - GenAbstractFunc func(fn *LSym) - Errors int + Headtype objabi.HeadType + Arch *LinkArch + CompressInstructions bool // use compressed instructions where possible (if supported by architecture) + Debugasm int + Debugvlog bool + Debugpcln string + Flag_shared bool + Flag_dynlink bool + Flag_linkshared bool + Flag_optimize bool + Flag_locationlists bool + Flag_noRefName bool // do not include referenced symbol names in object file + Retpoline bool // emit use of retpoline stubs for indirect jmp/call + Flag_maymorestack string // If not "", call this function before stack checks + Bso *bufio.Writer + Pathname string + Pkgpath string // the current package's import path + hashmu sync.Mutex // protects hash, funchash + hash map[string]*LSym // name -> sym mapping + funchash map[string]*LSym // name -> sym mapping for ABIInternal syms + statichash map[string]*LSym // name -> sym mapping for static syms + PosTable src.PosTable + InlTree InlTree // global inlining tree used by gc/inl.go + DwFixups *DwarfFixupTable + DwTextCount int + Imports []goobj.ImportedPkg + DiagFunc func(string, ...any) + DiagFlush func() + DebugInfo func(ctxt *Link, fn *LSym, info *LSym, curfn Func) ([]dwarf.Scope, dwarf.InlCalls) + GenAbstractFunc func(fn *LSym) + Errors int InParallel bool // parallel backend phase in effect UseBASEntries bool // use Base Address Selection Entries in location lists and PC ranges diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go index 73f145df14..5b8bffc9f1 100644 --- a/src/cmd/internal/obj/loong64/a.out.go +++ b/src/cmd/internal/obj/loong64/a.out.go @@ -589,6 +589,10 @@ const ( AORN AANDN + // 2.2.1.12 + AMULWVW + AMULWVWU + // 2.2.7. Atomic Memory Access Instructions AAMSWAPB AAMSWAPH diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go index ab85c52a21..1749b43bf6 100644 --- a/src/cmd/internal/obj/loong64/anames.go +++ b/src/cmd/internal/obj/loong64/anames.go @@ -131,6 +131,8 @@ var Anames = []string{ "ALSLV", "ORN", "ANDN", + "MULWVW", + "MULWVWU", "AMSWAPB", "AMSWAPH", "AMSWAPW", diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go index 38b075d77e..b35e49a1b6 100644 --- a/src/cmd/internal/obj/loong64/asm.go +++ b/src/cmd/internal/obj/loong64/asm.go @@ -1503,6 +1503,8 @@ func buildop(ctxt *obj.Link) { opset(AREMU, r0) opset(ADIV, r0) opset(ADIVU, r0) + opset(AMULWVW, r0) + opset(AMULWVWU, r0) case AMULV: opset(AMULVU, r0) @@ -3230,6 +3232,10 @@ func (c *ctxt0) oprrr(a obj.As) uint32 { return 0x3c << 15 // mulh.d case AMULHVU: return 0x3d << 15 // mulhu.d + case AMULWVW: + return 0x3e << 15 // mulw.d.w + case AMULWVWU: + return 0x3f << 15 // mulw.d.wu case ADIV: return 0x40 << 15 // div.w case ADIVU: diff --git a/src/cmd/internal/obj/riscv/asm_test.go b/src/cmd/internal/obj/riscv/asm_test.go index f40e57fa64..5b50d1533a 100644 --- a/src/cmd/internal/obj/riscv/asm_test.go +++ b/src/cmd/internal/obj/riscv/asm_test.go @@ -11,8 +11,8 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "runtime" - "strings" "testing" ) @@ -48,10 +48,10 @@ func genLargeBranch(buf *bytes.Buffer) { fmt.Fprintln(buf, "TEXT f(SB),0,$0-0") fmt.Fprintln(buf, "BEQ X0, X0, label") for i := 0; i < 1<<19; i++ { - fmt.Fprintln(buf, "ADD $0, X0, X0") + fmt.Fprintln(buf, "ADD $0, X5, X0") } fmt.Fprintln(buf, "label:") - fmt.Fprintln(buf, "ADD $0, X0, X0") + fmt.Fprintln(buf, "ADD $0, X5, X0") } // TestLargeCall generates a large function (>1MB of text) with a call to @@ -112,11 +112,11 @@ func genLargeCall(buf *bytes.Buffer) { fmt.Fprintln(buf, "TEXT ·x(SB),0,$0-0") fmt.Fprintln(buf, "CALL ·y(SB)") for i := 0; i < 1<<19; i++ { - fmt.Fprintln(buf, "ADD $0, X0, X0") + fmt.Fprintln(buf, "ADD $0, X5, X0") } fmt.Fprintln(buf, "RET") fmt.Fprintln(buf, "TEXT ·y(SB),0,$0-0") - fmt.Fprintln(buf, "ADD $0, X0, X0") + fmt.Fprintln(buf, "ADD $0, X5, X0") fmt.Fprintln(buf, "RET") } @@ -301,9 +301,9 @@ TEXT _stub(SB),$0-0 // FENCE // NOP // FENCE - // RET - want := "0f 00 f0 0f 13 00 00 00 0f 00 f0 0f 67 80 00 00" - if !strings.Contains(string(out), want) { + // RET (CJALR or JALR) + want := regexp.MustCompile("0x0000 0f 00 f0 0f 13 00 00 00 0f 00 f0 0f (82 80|67 80 00 00) ") + if !want.Match(out) { t.Errorf("PCALIGN test failed - got %s\nwant %s", out, want) } } diff --git a/src/cmd/internal/obj/riscv/cpu.go b/src/cmd/internal/obj/riscv/cpu.go index 60174a0b3a..a91395dd38 100644 --- a/src/cmd/internal/obj/riscv/cpu.go +++ b/src/cmd/internal/obj/riscv/cpu.go @@ -326,6 +326,9 @@ const ( NEED_GOT_PCREL_ITYPE_RELOC ) +const NEED_RELOC = NEED_JAL_RELOC | NEED_CALL_RELOC | NEED_PCREL_ITYPE_RELOC | + NEED_PCREL_STYPE_RELOC | NEED_GOT_PCREL_ITYPE_RELOC + // RISC-V mnemonics, as defined in the "opcodes" and "opcodes-pseudo" files // at https://github.com/riscv/riscv-opcodes. // diff --git a/src/cmd/internal/obj/riscv/doc.go b/src/cmd/internal/obj/riscv/doc.go new file mode 100644 index 0000000000..365bedd299 --- /dev/null +++ b/src/cmd/internal/obj/riscv/doc.go @@ -0,0 +1,297 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package riscv implements the riscv64 assembler. + +# Register naming + +The integer registers are named X0 through to X31, however X4 must be accessed +through its RISC-V ABI name, TP, and X27, which holds a pointer to the Go +routine structure, must be referred to as g. Additionally, when building in +shared mode, X3 is unavailable and must be accessed via its RISC-V ABI name, +GP. + +The floating-point registers are named F0 through to F31. + +The vector registers are named V0 through to V31. + +Both integer and floating-point registers can be referred to by their RISC-V +ABI names, e.g., A0 or FT0, with the exception that X27 cannot be referred to +by its RISC-V ABI name, S11. It must be referred to as g. + +Some of the integer registers are used by the Go runtime and assembler - X26 is +the closure pointer, X27 points to the Go routine structure and X31 is a +temporary register used by the Go assembler. Use of X31 should be avoided in +hand written assembly code as its value could be altered by the instruction +sequences emitted by the assembler. + +# Instruction naming + +Many RISC-V instructions contain one or more suffixes in their names. In the +[RISC-V ISA Manual] these suffixes are separated from themselves and the +name of the instruction mnemonic with a dot ('.'). In the Go assembler, the +separators are omitted and the suffixes are written in upper case. + +Example: + + FMVWX <=> fmv.w.x + +# Rounding modes + +The Go toolchain does not set the FCSR register and requires the desired +rounding mode to be explicitly encoded within floating-point instructions. +The syntax the Go assembler uses to specify the rounding modes differs +from the syntax in the RISC-V specifications. In the [RISC-V ISA Manual] +the rounding mode is given as an extra operand at the end of an +assembly language instruction. In the Go assembler, the rounding modes are +converted to uppercase and follow the instruction mnemonic from which they +are separated with a dot ('.'). + +Example: + + FCVTLUS.RNE F0, X5 <=> fcvt.lu.s x5, f0, rne + +RTZ is assumed if the rounding mode is omitted. + +# RISC-V extensions + +By default the Go compiler targets the [rva20u64] profile. This profile mandates +all the general RISC-V instructions, allowing Go to use integer, multiplication, +division, floating-point and atomic instructions without having to +perform compile time or runtime checks to verify that their use is appropriate +for the target hardware. All widely available riscv64 devices support at least +[rva20u64]. The Go toolchain can be instructed to target later RISC-V profiles, +including, [rva22u64] and [rva23u64], via the GORISCV64 environment variable. +Instructions that are provided by newer profiles cannot typically be used in +handwritten assembly code without compile time guards (or runtime checks) +that ensure they are hardware supported. + +The file asm_riscv64.h defines macros for each RISC-V extension that is enabled +by setting the GORISCV64 environment variable to a value other than [rva20u64]. +For example, if GORISCV64=rva22u64 the macros hasZba, hasZbb and hasZbs will be +defined. If GORISCV64=rva23u64 hasV will be defined in addition to hasZba, +hasZbb and hasZbs. These macros can be used to determine whether it's safe +to use an instruction in hand-written assembly. + +It is not always necessary to include asm_riscv64.h and use #ifdefs in your +code to safely take advantage of instructions present in the [rva22u64] +profile. In some cases the assembler can generate [rva20u64] compatible code +even when an [rva22u64] instruction is used in an assembly source file. When +GORISCV64=rva20u64 the assembler will synthesize certain [rva22u64] +instructions, e.g., ANDN, using multiple [rva20u64] instructions. Instructions +such as ANDN can then be freely used in assembly code without checking to see +whether the instruction is supported by the target profile. When building a +source file containing the ANDN instruction with GORISCV64=rva22u64 the +assembler will emit the Zbb ANDN instruction directly. When building the same +source file with GORISCV64=rva20u64 the assembler will emit multiple [rva20u64] +instructions to synthesize ANDN. + +The assembler will also use [rva22u64] instructions to implement the zero and +sign extension instructions, e.g., MOVB and MOVHU, when GORISCV64=rva22u64 or +greater. + +The instructions not implemented in the default profile ([rva20u64]) that can +be safely used in assembly code without compile time checks are: + + - ANDN + - MAX + - MAXU + - MIN + - MINU + - MOVB + - MOVH + - MOVHU + - MOVWU + - ORN + - ROL + - ROLW + - ROR + - RORI + - RORIW + - RORW + - XNOR + +# Operand ordering + +The ordering used for instruction operands in the Go assembler differs from the +ordering defined in the [RISC-V ISA Manual]. + +1. R-Type instructions + +R-Type instructions are written in the reverse order to that given in the +[RISC-V ISA Manual], with the register order being rs2, rs1, rd. + +Examples: + + ADD X10, X11, X12 <=> add x12, x11, x10 + FADDD F10, F11, F12 <=> fadd.d f12, f11, f10 + +2. I-Type arithmetic instructions + +I-Type arithmetic instructions (not loads, fences, ebreak, ecall) use the same +ordering as the R-Type instructions, typically, imm12, rs1, rd. + +Examples: + + ADDI $1, X11, X12 <=> add x12, x11, 1 + SLTI $1, X11, X12 <=> slti x12, x11, 1 + +3. Loads and Stores + +Load instructions are written with the source operand (whether it be a register +or a memory address), first followed by the destination operand. + +Examples: + + MOV 16(X2), X10 <=> ld x10, 16(x2) + MOV X10, (X2) <=> sd x10, 0(x2) + +4. Branch instructions + +The branch instructions use the same operand ordering as is given in the +[RISC-V ISA Manual], e.g., rs1, rs2, label. + +Example: + + BLT X12, X23, loop1 <=> blt x12, x23, loop1 + +BLT X12, X23, label will jump to label if X12 < X23. Note this is not the +same ordering as is used for the SLT instructions. + +5. FMA instructions + +The Go assembler uses a different ordering for the RISC-V FMA operands to +the ordering given in the [RISC-V ISA Manual]. The operands are rotated one +place to the left, so that the destination operand comes last. + +Example: + + FMADDS F1, F2, F3, F4 <=> fmadd.s f4, f1, f2, f3 + +6. AMO instructions + +The ordering used for the AMO operations is rs2, rs1, rd, i.e., the operands +as specified in the [RISC-V ISA Manual] are rotated one place to the left. + +Example: + + AMOSWAPW X5, (X6), X7 <=> amoswap.w x7, x5, (x6) + +7. Vector instructions + +The VSETVLI instruction uses the same symbolic names as the [RISC-V ISA Manual] +to represent the components of vtype, with the exception +that they are written in upper case. The ordering of the operands in the Go +assembler differs from the [RISC-V ISA Manual] in that the operands are +rotated one place to the left so that the destination register, the register +that holds the new vl, is the last operand. + +Example: + + VSETVLI X10, E8, M1, TU, MU, X12 <=> vsetvli x12, x10, e8, m1, tu, mu + +Vector load and store instructions follow the pattern set by scalar loads and +stores, i.e., the source is always the first operand and the destination the +last. However, the ordering of the operands of these instructions is +complicated by the optional mask register and, in some cases, the use of an +additional stride or index register. In the Go assembler the index and stride +registers appear as the second operand in indexed or strided loads and stores, +while the mask register, if present, is always the penultimate operand. + +Examples: + + VLE8V (X10), V3 <=> vle8.v v3, (x10) + VSE8V V3, (X10) <=> vse8.v v3, (x10) + VLE8V (X10), V0, V3 <=> vle8.v v3, (x10), v0.t + VSE8V V3, V0, (X10) <=> vse8.v v3, (x10), v0.t + VLSE8V (X10), X11, V3 <=> vlse8.v v3, (x10), x11 + VSSE8V V3, X11, (X10) <=> vsse8.v v3, (x10), x11 + VLSE8V (X10), X11, V0, V3 <=> vlse8.v v3, (x10), x11, v0.t + VSSE8V V3, X11, V0, (X10) <=> vsse8.v v3, (x10), x11, v0.t + VLUXEI8V (X10), V2, V3 <=> vluxei8.v v3, (x10), v2 + VSUXEI8V V3, V2, (X10) <=> vsuxei8.v v3, (x10), v2 + VLUXEI8V (X10), V2, V0, V3 <=> vluxei8.v v3, (x10), v2, v0.t + VSUXEI8V V3, V2, V0, (X10) <=> vsuxei8.v v3, (x10), v2, v0.t + VL1RE8V (X10), V3 <=> vl1re8.v v3, (x10) + VS1RV V3, (X11) <=> vs1r.v v3, (x11) + +The ordering of operands for two and three argument vector arithmetic instructions is +reversed in the Go assembler. + +Examples: + + VMVVV V2, V3 <=> vmv.v.v v3, v2 + VADDVV V1, V2, V3 <=> vadd.vv v3, v2, v1 + VADDVX X10, V2, V3 <=> vadd.vx v3, v2, x10 + VMADCVI $15, V2, V3 <=> vmadc.vi v3, v2, 15 + +The mask register, when specified, is always the penultimate operand in a vector +arithmetic instruction, appearing before the destination register. + +Examples: + + VANDVV V1, V2, V0, V3 <=> vand.vv v3, v2, v1, v0.t + +# Ternary instructions + +The Go assembler allows the second operand to be omitted from most ternary +instructions if it matches the third (destination) operand. + +Examples: + + ADD X10, X12, X12 <=> ADD X10, X12 + ANDI $3, X12, X12 <=> ANDI $3, X12 + +The use of this abbreviated syntax is encouraged. + +# Ordering of atomic instructions + +It is not possible to specify the ordering bits in the FENCE, LR, SC or AMO +instructions. The FENCE instruction is always emitted as a full fence, the +acquire and release bits are always set for the AMO instructions, the acquire +bit is always set for the LR instructions while the release bit is set for +the SC instructions. + +# Immediate operands + +In many cases, where an R-Type instruction has a corresponding I-Type +instruction, the R-Type mnemonic can be used in place of the I-Type mnemonic. +The assembler assumes that the immediate form of the instruction was intended +when the first operand is given as an immediate value rather than a register. + +Example: + + AND $3, X12, X13 <=> ANDI $3, X12, X13 + +# Integer constant materialization + +The MOV instruction can be used to set a register to the value of any 64 bit +constant literal. The way this is achieved by the assembler varies depending +on the value of the constant. Where possible the assembler will synthesize the +constant using one or more RISC-V arithmetic instructions. If it is unable +to easily materialize the constant it will load the 64 bit literal from memory. + +A 32 bit constant literal can be specified as an argument to ADDI, ANDI, ORI and +XORI. If the specified literal does not fit into 12 bits the assembler will +generate extra instructions to synthesize it. + +Integer constants provided as operands to all other instructions must fit into +the number of bits allowed by the instructions' encodings for immediate values. +Otherwise, an error will be generated. + +# Floating point constant materialization + +The MOVF and MOVD instructions can be used to set a register to the value +of any 32 bit or 64 bit floating point constant literal, respectively. Unless +the constant literal is 0.0, MOVF and MOVD will be encoded as FLW and FLD +instructions that load the constant from a location within the program's +binary. + +[RISC-V ISA Manual]: https://github.com/riscv/riscv-isa-manual +[rva20u64]: https://github.com/riscv/riscv-profiles/blob/main/src/profiles.adoc#51-rva20u64-profile +[rva22u64]: https://github.com/riscv/riscv-profiles/blob/main/src/profiles.adoc#rva22u64-profile +[rva23u64]: https://github.com/riscv/riscv-profiles/blob/main/src/rva23-profile.adoc#rva23u64-profile +*/ +package riscv diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index 3deab34d31..043be17c07 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -414,10 +414,10 @@ func containsCall(sym *obj.LSym) bool { // setPCs sets the Pc field in all instructions reachable from p. // It uses pc as the initial value and returns the next available pc. -func setPCs(p *obj.Prog, pc int64) int64 { +func setPCs(p *obj.Prog, pc int64, compress bool) int64 { for ; p != nil; p = p.Link { p.Pc = pc - for _, ins := range instructionsForProg(p) { + for _, ins := range instructionsForProg(p, compress) { pc += int64(ins.length()) } @@ -671,7 +671,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // a fixed point will be reached). No attempt to handle functions > 2GiB. for { big, rescan := false, false - maxPC := setPCs(cursym.Func().Text, 0) + maxPC := setPCs(cursym.Func().Text, 0, ctxt.CompressInstructions) if maxPC+maxTrampSize > (1 << 20) { big = true } @@ -801,7 +801,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { // Validate all instructions - this provides nice error messages. for p := cursym.Func().Text; p != nil; p = p.Link { - for _, ins := range instructionsForProg(p) { + for _, ins := range instructionsForProg(p, ctxt.CompressInstructions) { ins.validate(ctxt) } } @@ -1141,6 +1141,14 @@ func wantImmU(ctxt *obj.Link, ins *instruction, imm int64, nbits uint) { } } +func isScaledImmI(imm int64, nbits uint, scale int64) bool { + return immFits(imm, nbits, true) == nil && imm%scale == 0 +} + +func isScaledImmU(imm int64, nbits uint, scale int64) bool { + return immFits(imm, nbits, false) == nil && imm%scale == 0 +} + func wantScaledImm(ctxt *obj.Link, ins *instruction, imm int64, nbits uint, scale int64, signed bool) { if err := immFits(imm, nbits, signed); err != nil { ctxt.Diag("%v: %v", ins, err) @@ -1180,6 +1188,10 @@ func wantIntReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) { wantReg(ctxt, ins, pos, "integer", r, REG_X0, REG_X31) } +func isIntPrimeReg(r uint32) bool { + return r >= REG_X8 && r <= REG_X15 +} + // wantIntPrimeReg checks that r is an integer register that can be used // in a prime register field of a compressed instruction. func wantIntPrimeReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) { @@ -1191,6 +1203,10 @@ func wantFloatReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) { wantReg(ctxt, ins, pos, "float", r, REG_F0, REG_F31) } +func isFloatPrimeReg(r uint32) bool { + return r >= REG_F8 && r <= REG_F15 +} + // wantFloatPrimeReg checks that r is an floating-point register that can // be used in a prime register field of a compressed instruction. func wantFloatPrimeReg(ctxt *obj.Link, ins *instruction, pos string, r uint32) { @@ -3515,6 +3531,147 @@ func (ins *instruction) usesRegTmp() bool { return ins.rd == REG_TMP || ins.rs1 == REG_TMP || ins.rs2 == REG_TMP } +func (ins *instruction) compress() { + switch ins.as { + case ALW: + if ins.rd != REG_X0 && ins.rs1 == REG_SP && isScaledImmU(ins.imm, 8, 4) { + ins.as, ins.rs1, ins.rs2 = ACLWSP, obj.REG_NONE, ins.rs1 + } else if isIntPrimeReg(ins.rd) && isIntPrimeReg(ins.rs1) && isScaledImmU(ins.imm, 7, 4) { + ins.as = ACLW + } + + case ALD: + if ins.rs1 == REG_SP && ins.rd != REG_X0 && isScaledImmU(ins.imm, 9, 8) { + ins.as, ins.rs1, ins.rs2 = ACLDSP, obj.REG_NONE, ins.rs1 + } else if isIntPrimeReg(ins.rd) && isIntPrimeReg(ins.rs1) && isScaledImmU(ins.imm, 8, 8) { + ins.as = ACLD + } + + case AFLD: + if ins.rs1 == REG_SP && isScaledImmU(ins.imm, 9, 8) { + ins.as, ins.rs1, ins.rs2 = ACFLDSP, obj.REG_NONE, ins.rs1 + } else if isFloatPrimeReg(ins.rd) && isIntPrimeReg(ins.rs1) && isScaledImmU(ins.imm, 8, 8) { + ins.as = ACFLD + } + + case ASW: + if ins.rd == REG_SP && isScaledImmU(ins.imm, 8, 4) { + ins.as, ins.rs1, ins.rs2 = ACSWSP, obj.REG_NONE, ins.rs1 + } else if isIntPrimeReg(ins.rd) && isIntPrimeReg(ins.rs1) && isScaledImmU(ins.imm, 7, 4) { + ins.as, ins.rd, ins.rs1, ins.rs2 = ACSW, obj.REG_NONE, ins.rd, ins.rs1 + } + + case ASD: + if ins.rd == REG_SP && isScaledImmU(ins.imm, 9, 8) { + ins.as, ins.rs1, ins.rs2 = ACSDSP, obj.REG_NONE, ins.rs1 + } else if isIntPrimeReg(ins.rd) && isIntPrimeReg(ins.rs1) && isScaledImmU(ins.imm, 8, 8) { + ins.as, ins.rd, ins.rs1, ins.rs2 = ACSD, obj.REG_NONE, ins.rd, ins.rs1 + } + + case AFSD: + if ins.rd == REG_SP && isScaledImmU(ins.imm, 9, 8) { + ins.as, ins.rs1, ins.rs2 = ACFSDSP, obj.REG_NONE, ins.rs1 + } else if isIntPrimeReg(ins.rd) && isFloatPrimeReg(ins.rs1) && isScaledImmU(ins.imm, 8, 8) { + ins.as, ins.rd, ins.rs1, ins.rs2 = ACFSD, obj.REG_NONE, ins.rd, ins.rs1 + } + + case AADDI: + if ins.rd == REG_SP && ins.rs1 == REG_SP && ins.imm != 0 && isScaledImmI(ins.imm, 10, 16) { + ins.as = ACADDI16SP + } else if ins.rd != REG_X0 && ins.rd == ins.rs1 && ins.imm != 0 && immIFits(ins.imm, 6) == nil { + ins.as = ACADDI + } else if isIntPrimeReg(ins.rd) && ins.rs1 == REG_SP && ins.imm != 0 && isScaledImmU(ins.imm, 10, 4) { + ins.as = ACADDI4SPN + } else if ins.rd != REG_X0 && ins.rs1 == REG_X0 && immIFits(ins.imm, 6) == nil { + ins.as, ins.rs1 = ACLI, obj.REG_NONE + } else if ins.rd != REG_X0 && ins.rs1 != REG_X0 && ins.imm == 0 { + ins.as, ins.rs1, ins.rs2 = ACMV, obj.REG_NONE, ins.rs1 + } else if ins.rd == REG_X0 && ins.rs1 == REG_X0 && ins.imm == 0 { + ins.as, ins.rs1 = ACNOP, ins.rd + } + + case AADDIW: + if ins.rd == ins.rs1 && immIFits(ins.imm, 6) == nil { + ins.as = ACADDIW + } + + case ALUI: + if ins.rd != REG_X0 && ins.rd != REG_SP && ins.imm != 0 && immIFits(ins.imm, 6) == nil { + ins.as = ACLUI + } + + case ASLLI: + if ins.rd != REG_X0 && ins.rd == ins.rs1 && ins.imm != 0 { + ins.as = ACSLLI + } + + case ASRLI: + if isIntPrimeReg(ins.rd) && ins.rd == ins.rs1 && ins.imm != 0 { + ins.as = ACSRLI + } + + case ASRAI: + if isIntPrimeReg(ins.rd) && ins.rd == ins.rs1 && ins.imm != 0 { + ins.as = ACSRAI + } + + case AANDI: + if isIntPrimeReg(ins.rd) && ins.rd == ins.rs1 && immIFits(ins.imm, 6) == nil { + ins.as = ACANDI + } + + case AADD: + if ins.rd != REG_X0 && ins.rd == ins.rs1 && ins.rs2 != REG_X0 { + ins.as = ACADD + } else if ins.rd != REG_X0 && ins.rd == ins.rs2 && ins.rs1 != REG_X0 { + ins.as, ins.rs1, ins.rs2 = ACADD, ins.rs2, ins.rs1 + } else if ins.rd != REG_X0 && ins.rs1 == REG_X0 && ins.rs2 != REG_X0 { + ins.as = ACMV + } + + case AADDW: + if isIntPrimeReg(ins.rd) && ins.rd == ins.rs1 && isIntPrimeReg(ins.rs2) { + ins.as = ACADDW + } else if isIntPrimeReg(ins.rd) && isIntPrimeReg(ins.rs1) && ins.rd == ins.rs2 { + ins.as, ins.rs1, ins.rs2 = ACADDW, ins.rs2, ins.rs1 + } + + case ASUB: + if isIntPrimeReg(ins.rd) && ins.rd == ins.rs1 && isIntPrimeReg(ins.rs2) { + ins.as = ACSUB + } + + case ASUBW: + if isIntPrimeReg(ins.rd) && ins.rd == ins.rs1 && isIntPrimeReg(ins.rs2) { + ins.as = ACSUBW + } + + case AAND: + if isIntPrimeReg(ins.rd) && ins.rd == ins.rs1 && isIntPrimeReg(ins.rs2) { + ins.as = ACAND + } else if isIntPrimeReg(ins.rd) && isIntPrimeReg(ins.rs1) && ins.rd == ins.rs2 { + ins.as, ins.rs1, ins.rs2 = ACAND, ins.rs2, ins.rs1 + } + + case AOR: + if isIntPrimeReg(ins.rd) && ins.rd == ins.rs1 && isIntPrimeReg(ins.rs2) { + ins.as = ACOR + } else if isIntPrimeReg(ins.rd) && isIntPrimeReg(ins.rs1) && ins.rd == ins.rs2 { + ins.as, ins.rs1, ins.rs2 = ACOR, ins.rs2, ins.rs1 + } + + case AXOR: + if isIntPrimeReg(ins.rd) && ins.rd == ins.rs1 && isIntPrimeReg(ins.rs2) { + ins.as = ACXOR + } else if isIntPrimeReg(ins.rd) && isIntPrimeReg(ins.rs1) && ins.rd == ins.rs2 { + ins.as, ins.rs1, ins.rs2 = ACXOR, ins.rs2, ins.rs1 + } + + case AEBREAK: + ins.as, ins.rd, ins.rs1 = ACEBREAK, obj.REG_NONE, obj.REG_NONE + } +} + // instructionForProg returns the default *obj.Prog to instruction mapping. func instructionForProg(p *obj.Prog) *instruction { ins := &instruction{ @@ -4057,7 +4214,7 @@ func instructionsForMinMax(p *obj.Prog, ins *instruction) []*instruction { } // instructionsForProg returns the machine instructions for an *obj.Prog. -func instructionsForProg(p *obj.Prog) []*instruction { +func instructionsForProg(p *obj.Prog, compress bool) []*instruction { ins := instructionForProg(p) inss := []*instruction{ins} @@ -4710,6 +4867,15 @@ func instructionsForProg(p *obj.Prog) []*instruction { ins.rs1, ins.rs2 = obj.REG_NONE, REG_V0 } + // Only compress instructions when there is no relocation, since + // relocation relies on knowledge about the exact instructions that + // are in use. + if compress && p.Mark&NEED_RELOC == 0 { + for _, ins := range inss { + ins.compress() + } + } + for _, ins := range inss { ins.p = p } @@ -4799,15 +4965,22 @@ func assemble(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { v := pcAlignPadLength(p.Pc, alignedValue) offset := p.Pc for ; v >= 4; v -= 4 { - // NOP - cursym.WriteBytes(ctxt, offset, []byte{0x13, 0, 0, 0}) + // NOP (ADDI $0, X0, X0) + cursym.WriteBytes(ctxt, offset, []byte{0x13, 0x00, 0x00, 0x00}) offset += 4 } + if v == 2 { + // CNOP + cursym.WriteBytes(ctxt, offset, []byte{0x01, 0x00}) + offset += 2 + } else if v != 0 { + ctxt.Diag("bad PCALIGN pad length") + } continue } offset := p.Pc - for _, ins := range instructionsForProg(p) { + for _, ins := range instructionsForProg(p, ctxt.CompressInstructions) { if ic, err := ins.encode(); err == nil { cursym.WriteInt(ctxt, offset, ins.length(), int64(ic)) offset += int64(ins.length()) diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index 9c8e5e96f8..ed41d81388 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -423,8 +423,12 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { q.From.Reg = reg } } - if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { - ctxt.Diag("don't know how to handle %v with -dynlink", p) + from3 := p.GetFrom3() + for i := range p.RestArgs { + a := &p.RestArgs[i].Addr + if a != from3 && a.Name == obj.NAME_EXTERN && !a.Sym.Local() { + ctxt.Diag("don't know how to handle %v with -dynlink", p) + } } var source *obj.Addr // MOVx sym, Ry becomes $MOV sym@GOT, R15; MOVx (R15), Ry @@ -434,9 +438,17 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) } + if from3 != nil && from3.Name == obj.NAME_EXTERN && !from3.Sym.Local() { + ctxt.Diag("cannot handle NAME_EXTERN on multiple operands in %v with -dynlink", p) + } source = &p.From } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { + if from3 != nil && from3.Name == obj.NAME_EXTERN && !from3.Sym.Local() { + ctxt.Diag("cannot handle NAME_EXTERN on multiple operands in %v with -dynlink", p) + } source = &p.To + } else if from3 != nil && from3.Name == obj.NAME_EXTERN && !from3.Sym.Local() { + source = from3 } else { return } @@ -501,9 +513,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { p2.As = p.As p2.From = p.From p2.To = p.To - if from3 := p.GetFrom3(); from3 != nil { - p2.AddRestSource(*from3) - } + p2.RestArgs = p.RestArgs if p.From.Name == obj.NAME_EXTERN { p2.From.Reg = reg p2.From.Name = obj.NAME_NONE @@ -512,6 +522,11 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { p2.To.Reg = reg p2.To.Name = obj.NAME_NONE p2.To.Sym = nil + } else if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { + from3 = p2.GetFrom3() + from3.Reg = reg + from3.Name = obj.NAME_NONE + from3.Sym = nil } else { return } diff --git a/src/cmd/internal/sys/arch.go b/src/cmd/internal/sys/arch.go index 3c92a6bbf2..14b1cde22b 100644 --- a/src/cmd/internal/sys/arch.go +++ b/src/cmd/internal/sys/arch.go @@ -236,7 +236,7 @@ var ArchRISCV64 = &Arch{ ByteOrder: binary.LittleEndian, PtrSize: 8, RegSize: 8, - MinLC: 4, + MinLC: 2, Alignment: 8, // riscv unaligned loads work, but are really slow (trap + simulated by OS) CanMergeLoads: false, HasLR: true, diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index 31de34aff4..9bab73e7b7 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -2507,19 +2507,19 @@ func dwarfcompress(ctxt *Link) { var prevSect *sym.Section for _, si := range dwarfp { for _, s := range si.syms { - ldr.SetSymValue(s, int64(pos)) sect := ldr.SymSect(s) if sect != prevSect { + if ctxt.IsWindows() { + pos = uint64(Rnd(int64(pos), PEFILEALIGN)) + } sect.Vaddr = pos prevSect = sect } + ldr.SetSymValue(s, int64(pos)) if ldr.SubSym(s) != 0 { log.Fatalf("%s: unexpected sub-symbols", ldr.SymName(s)) } pos += uint64(ldr.SymSize(s)) - if ctxt.IsWindows() { - pos = uint64(Rnd(int64(pos), PEFILEALIGN)) - } } } Segdwarf.Length = pos - Segdwarf.Vaddr diff --git a/src/cmd/link/internal/ld/ld_test.go b/src/cmd/link/internal/ld/ld_test.go index 9a27ac8c76..64b86f3a0b 100644 --- a/src/cmd/link/internal/ld/ld_test.go +++ b/src/cmd/link/internal/ld/ld_test.go @@ -387,7 +387,7 @@ func TestRISCVTrampolines(t *testing.T) { buf := new(bytes.Buffer) fmt.Fprintf(buf, "TEXT a(SB),$0-0\n") for i := 0; i < 1<<17; i++ { - fmt.Fprintf(buf, "\tADD $0, X0, X0\n") + fmt.Fprintf(buf, "\tADD $0, X5, X0\n") } fmt.Fprintf(buf, "\tCALL b(SB)\n") fmt.Fprintf(buf, "\tRET\n") @@ -398,7 +398,7 @@ func TestRISCVTrampolines(t *testing.T) { fmt.Fprintf(buf, "\tRET\n") fmt.Fprintf(buf, "TEXT ·d(SB),0,$0-0\n") for i := 0; i < 1<<17; i++ { - fmt.Fprintf(buf, "\tADD $0, X0, X0\n") + fmt.Fprintf(buf, "\tADD $0, X5, X0\n") } fmt.Fprintf(buf, "\tCALL a(SB)\n") fmt.Fprintf(buf, "\tCALL c(SB)\n") diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index d913953944..a4b3a0422f 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -188,7 +188,11 @@ func Main(arch *sys.Arch, theArch Arch) { buildVersion := buildcfg.Version if goexperiment := buildcfg.Experiment.String(); goexperiment != "" { - buildVersion += " X:" + goexperiment + sep := " " + if !strings.Contains(buildVersion, "-") { // See go.dev/issue/75953. + sep = "-" + } + buildVersion += sep + "X:" + goexperiment } addstrdata1(ctxt, "runtime.buildVersion="+buildVersion) diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 2d386c0c65..9ab55643f6 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -2464,10 +2464,11 @@ var blockedLinknames = map[string][]string{ // Experimental features "runtime.goroutineLeakGC": {"runtime/pprof"}, "runtime.goroutineleakcount": {"runtime/pprof"}, + "runtime.freegc": {}, // disallow all packages // Others "net.newWindowsFile": {"net"}, // pushed from os "testing/synctest.testingSynctestTest": {"testing/synctest"}, // pushed from testing - "runtime.addmoduledata": {}, // disallow all package + "runtime.addmoduledata": {}, // disallow all packages } // check if a linkname reference to symbol s from pkg is allowed diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 31822d21f3..6ab1246c81 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -1616,6 +1616,7 @@ func TestCheckLinkname(t *testing.T) { // pull linkname of a builtin symbol is not ok {"builtin.go", false}, {"addmoduledata.go", false}, + {"freegc.go", false}, // legacy bad linkname is ok, for now {"fastrand.go", true}, {"badlinkname.go", true}, diff --git a/src/cmd/link/testdata/linkname/freegc.go b/src/cmd/link/testdata/linkname/freegc.go new file mode 100644 index 0000000000..390063f8e9 --- /dev/null +++ b/src/cmd/link/testdata/linkname/freegc.go @@ -0,0 +1,18 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Linkname runtime.freegc is not allowed. + +package main + +import ( + _ "unsafe" +) + +//go:linkname freegc runtime.freegc +func freegc() + +func main() { + freegc() +} diff --git a/src/cmd/trace/procgen.go b/src/cmd/trace/procgen.go index 060e62fe04..fc0a00e7ce 100644 --- a/src/cmd/trace/procgen.go +++ b/src/cmd/trace/procgen.go @@ -143,6 +143,13 @@ func (g *procGenerator) ProcTransition(ctx *traceContext, ev *trace.Event) { viewerEv := traceviewer.InstantEvent{ Resource: uint64(proc), Stack: ctx.Stack(viewerFrames(ev.Stack())), + + // Annotate with the thread and proc. The proc is redundant, but this is to + // stay consistent with the thread view, where it's useful information. + Arg: format.SchedCtxArg{ + ProcID: uint64(st.Resource.Proc()), + ThreadID: uint64(ev.Thread()), + }, } from, to := st.Proc() @@ -156,7 +163,6 @@ func (g *procGenerator) ProcTransition(ctx *traceContext, ev *trace.Event) { start = ctx.startTime } viewerEv.Name = "proc start" - viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())} viewerEv.Ts = ctx.elapsed(start) ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, 1) } diff --git a/src/cmd/trace/threadgen.go b/src/cmd/trace/threadgen.go index c2e2c86f6c..7f9e7a72f0 100644 --- a/src/cmd/trace/threadgen.go +++ b/src/cmd/trace/threadgen.go @@ -138,14 +138,17 @@ func (g *threadGenerator) ProcTransition(ctx *traceContext, ev *trace.Event) { } } - type procArg struct { - Proc uint64 `json:"proc,omitempty"` - } st := ev.StateTransition() viewerEv := traceviewer.InstantEvent{ Resource: uint64(ev.Thread()), Stack: ctx.Stack(viewerFrames(ev.Stack())), - Arg: procArg{Proc: uint64(st.Resource.Proc())}, + + // Annotate with the thread and proc. The thread is redundant, but this is to + // stay consistent with the proc view. + Arg: format.SchedCtxArg{ + ProcID: uint64(st.Resource.Proc()), + ThreadID: uint64(ev.Thread()), + }, } from, to := st.Proc() @@ -159,7 +162,6 @@ func (g *threadGenerator) ProcTransition(ctx *traceContext, ev *trace.Event) { start = ctx.startTime } viewerEv.Name = "proc start" - viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())} viewerEv.Ts = ctx.elapsed(start) // TODO(mknyszek): We don't have a state machine for threads, so approximate // running threads with running Ps. diff --git a/src/cmd/vendor/golang.org/x/mod/modfile/print.go b/src/cmd/vendor/golang.org/x/mod/modfile/print.go index 2a0123d4b9..48dbd82aec 100644 --- a/src/cmd/vendor/golang.org/x/mod/modfile/print.go +++ b/src/cmd/vendor/golang.org/x/mod/modfile/print.go @@ -33,7 +33,7 @@ type printer struct { } // printf prints to the buffer. -func (p *printer) printf(format string, args ...interface{}) { +func (p *printer) printf(format string, args ...any) { fmt.Fprintf(p, format, args...) } diff --git a/src/cmd/vendor/golang.org/x/mod/modfile/read.go b/src/cmd/vendor/golang.org/x/mod/modfile/read.go index 2d7486804f..504a2f1df6 100644 --- a/src/cmd/vendor/golang.org/x/mod/modfile/read.go +++ b/src/cmd/vendor/golang.org/x/mod/modfile/read.go @@ -94,7 +94,7 @@ func (x *FileSyntax) Span() (start, end Position) { // line, the new line is added at the end of the block containing hint, // extracting hint into a new block if it is not yet in one. // -// If the hint is non-nil buts its first token does not match, +// If the hint is non-nil but its first token does not match, // the new line is added after the block containing hint // (or hint itself, if not in a block). // @@ -600,7 +600,7 @@ func (in *input) readToken() { // Checked all punctuation. Must be identifier token. if c := in.peekRune(); !isIdent(c) { - in.Error(fmt.Sprintf("unexpected input character %#q", c)) + in.Error(fmt.Sprintf("unexpected input character %#q", rune(c))) } // Scan over identifier. diff --git a/src/cmd/vendor/golang.org/x/mod/modfile/rule.go b/src/cmd/vendor/golang.org/x/mod/modfile/rule.go index a86ee4fd82..c5b8305de7 100644 --- a/src/cmd/vendor/golang.org/x/mod/modfile/rule.go +++ b/src/cmd/vendor/golang.org/x/mod/modfile/rule.go @@ -368,7 +368,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a Err: err, }) } - errorf := func(format string, args ...interface{}) { + errorf := func(format string, args ...any) { wrapError(fmt.Errorf(format, args...)) } @@ -574,7 +574,7 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V Err: err, } } - errorf := func(format string, args ...interface{}) *Error { + errorf := func(format string, args ...any) *Error { return wrapError(fmt.Errorf(format, args...)) } @@ -685,7 +685,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, Err: err, }) } - errorf := func(format string, args ...interface{}) { + errorf := func(format string, args ...any) { wrapError(fmt.Errorf(format, args...)) } @@ -1594,7 +1594,7 @@ func (f *File) AddRetract(vi VersionInterval, rationale string) error { r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") } if rationale != "" { - for _, line := range strings.Split(rationale, "\n") { + for line := range strings.SplitSeq(rationale, "\n") { com := Comment{Token: "// " + line} r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) } diff --git a/src/cmd/vendor/golang.org/x/mod/module/module.go b/src/cmd/vendor/golang.org/x/mod/module/module.go index 16e1aa7ab4..739c13f48f 100644 --- a/src/cmd/vendor/golang.org/x/mod/module/module.go +++ b/src/cmd/vendor/golang.org/x/mod/module/module.go @@ -261,7 +261,7 @@ func modPathOK(r rune) bool { // importPathOK reports whether r can appear in a package import path element. // -// Import paths are intermediate between module paths and file paths: we allow +// Import paths are intermediate between module paths and file paths: we // disallow characters that would be confusing or ambiguous as arguments to // 'go get' (such as '@' and ' ' ), but allow certain characters that are // otherwise-unambiguous on the command line and historically used for some @@ -802,8 +802,8 @@ func MatchPrefixPatterns(globs, target string) bool { for globs != "" { // Extract next non-empty glob in comma-separated list. var glob string - if i := strings.Index(globs, ","); i >= 0 { - glob, globs = globs[:i], globs[i+1:] + if before, after, ok := strings.Cut(globs, ","); ok { + glob, globs = before, after } else { glob, globs = globs, "" } diff --git a/src/cmd/vendor/golang.org/x/mod/semver/semver.go b/src/cmd/vendor/golang.org/x/mod/semver/semver.go index 628f8fd687..824b282c83 100644 --- a/src/cmd/vendor/golang.org/x/mod/semver/semver.go +++ b/src/cmd/vendor/golang.org/x/mod/semver/semver.go @@ -45,8 +45,8 @@ func IsValid(v string) bool { // Canonical returns the canonical formatting of the semantic version v. // It fills in any missing .MINOR or .PATCH and discards build metadata. -// Two semantic versions compare equal only if their canonical formattings -// are identical strings. +// Two semantic versions compare equal only if their canonical formatting +// is an identical string. // The canonical invalid semantic version is the empty string. func Canonical(v string) string { p, ok := parse(v) diff --git a/src/cmd/vendor/golang.org/x/mod/sumdb/cache.go b/src/cmd/vendor/golang.org/x/mod/sumdb/cache.go index 629e591f42..749a80dfa4 100644 --- a/src/cmd/vendor/golang.org/x/mod/sumdb/cache.go +++ b/src/cmd/vendor/golang.org/x/mod/sumdb/cache.go @@ -20,13 +20,13 @@ type parCache struct { type cacheEntry struct { done uint32 mu sync.Mutex - result interface{} + result any } // Do calls the function f if and only if Do is being called for the first time with this key. // No call to Do with a given key returns until the one call to f returns. // Do returns the value returned by the one call to f. -func (c *parCache) Do(key interface{}, f func() interface{}) interface{} { +func (c *parCache) Do(key any, f func() any) any { entryIface, ok := c.m.Load(key) if !ok { entryIface, _ = c.m.LoadOrStore(key, new(cacheEntry)) @@ -46,7 +46,7 @@ func (c *parCache) Do(key interface{}, f func() interface{}) interface{} { // Get returns the cached result associated with key. // It returns nil if there is no such result. // If the result for key is being computed, Get does not wait for the computation to finish. -func (c *parCache) Get(key interface{}) interface{} { +func (c *parCache) Get(key any) any { entryIface, ok := c.m.Load(key) if !ok { return nil diff --git a/src/cmd/vendor/golang.org/x/mod/sumdb/client.go b/src/cmd/vendor/golang.org/x/mod/sumdb/client.go index 04dbdfe46a..f926eda1bb 100644 --- a/src/cmd/vendor/golang.org/x/mod/sumdb/client.go +++ b/src/cmd/vendor/golang.org/x/mod/sumdb/client.go @@ -244,7 +244,7 @@ func (c *Client) Lookup(path, vers string) (lines []string, err error) { data []byte err error } - result := c.record.Do(file, func() interface{} { + result := c.record.Do(file, func() any { // Try the on-disk cache, or else get from web. writeCache := false data, err := c.ops.ReadCache(file) @@ -284,7 +284,7 @@ func (c *Client) Lookup(path, vers string) (lines []string, err error) { // (with or without /go.mod). prefix := path + " " + vers + " " var hashes []string - for _, line := range strings.Split(string(result.data), "\n") { + for line := range strings.SplitSeq(string(result.data), "\n") { if strings.HasPrefix(line, prefix) { hashes = append(hashes, line) } @@ -552,7 +552,7 @@ func (c *Client) readTile(tile tlog.Tile) ([]byte, error) { err error } - result := c.tileCache.Do(tile, func() interface{} { + result := c.tileCache.Do(tile, func() any { // Try the requested tile in on-disk cache. data, err := c.ops.ReadCache(c.tileCacheKey(tile)) if err == nil { diff --git a/src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go b/src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go index db9865c317..8b2b25278d 100644 --- a/src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go +++ b/src/cmd/vendor/golang.org/x/mod/sumdb/note/note.go @@ -240,8 +240,8 @@ func isValidName(name string) bool { // NewVerifier construct a new [Verifier] from an encoded verifier key. func NewVerifier(vkey string) (Verifier, error) { - name, vkey := chop(vkey, "+") - hash16, key64 := chop(vkey, "+") + name, vkey, _ := strings.Cut(vkey, "+") + hash16, key64, _ := strings.Cut(vkey, "+") hash, err1 := strconv.ParseUint(hash16, 16, 32) key, err2 := base64.StdEncoding.DecodeString(key64) if len(hash16) != 8 || err1 != nil || err2 != nil || !isValidName(name) || len(key) == 0 { @@ -276,12 +276,8 @@ func NewVerifier(vkey string) (Verifier, error) { // chop chops s at the first instance of sep, if any, // and returns the text before and after sep. // If sep is not present, chop returns before is s and after is empty. -func chop(s, sep string) (before, after string) { - i := strings.Index(s, sep) - if i < 0 { - return s, "" - } - return s[:i], s[i+len(sep):] +func chop(s, sep string) (before, after string, ok bool) { + return strings.Cut(s, sep) } // verifier is a trivial Verifier implementation. @@ -297,10 +293,10 @@ func (v *verifier) Verify(msg, sig []byte) bool { return v.verify(msg, sig) } // NewSigner constructs a new [Signer] from an encoded signer key. func NewSigner(skey string) (Signer, error) { - priv1, skey := chop(skey, "+") - priv2, skey := chop(skey, "+") - name, skey := chop(skey, "+") - hash16, key64 := chop(skey, "+") + priv1, skey, _ := strings.Cut(skey, "+") + priv2, skey, _ := strings.Cut(skey, "+") + name, skey, _ := strings.Cut(skey, "+") + hash16, key64, _ := strings.Cut(skey, "+") hash, err1 := strconv.ParseUint(hash16, 16, 32) key, err2 := base64.StdEncoding.DecodeString(key64) if priv1 != "PRIVATE" || priv2 != "KEY" || len(hash16) != 8 || err1 != nil || err2 != nil || !isValidName(name) || len(key) == 0 { @@ -557,7 +553,7 @@ func Open(msg []byte, known Verifiers) (*Note, error) { return nil, errMalformedNote } line = line[len(sigPrefix):] - name, b64 := chop(string(line), " ") + name, b64, _ := chop(string(line), " ") sig, err := base64.StdEncoding.DecodeString(b64) if err != nil || !isValidName(name) || b64 == "" || len(sig) < 5 { return nil, errMalformedNote diff --git a/src/cmd/vendor/golang.org/x/mod/sumdb/server.go b/src/cmd/vendor/golang.org/x/mod/sumdb/server.go index 216a2562c2..2433c939d7 100644 --- a/src/cmd/vendor/golang.org/x/mod/sumdb/server.go +++ b/src/cmd/vendor/golang.org/x/mod/sumdb/server.go @@ -76,8 +76,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, "invalid module@version syntax", http.StatusBadRequest) return } - i := strings.Index(mod, "@") - escPath, escVers := mod[:i], mod[i+1:] + escPath, escVers, _ := strings.Cut(mod, "@") path, err := module.UnescapePath(escPath) if err != nil { reportError(w, err) diff --git a/src/cmd/vendor/golang.org/x/mod/sumdb/test.go b/src/cmd/vendor/golang.org/x/mod/sumdb/test.go index fb772452d9..0868bef5fc 100644 --- a/src/cmd/vendor/golang.org/x/mod/sumdb/test.go +++ b/src/cmd/vendor/golang.org/x/mod/sumdb/test.go @@ -66,7 +66,7 @@ func (s *TestServer) ReadRecords(ctx context.Context, id, n int64) ([][]byte, er defer s.mu.Unlock() var list [][]byte - for i := int64(0); i < n; i++ { + for i := range n { if id+i >= int64(len(s.records)) { return nil, fmt.Errorf("missing records") } diff --git a/src/cmd/vendor/golang.org/x/mod/sumdb/tlog/note.go b/src/cmd/vendor/golang.org/x/mod/sumdb/tlog/note.go index fc6d5fa0a3..1ea765a54f 100644 --- a/src/cmd/vendor/golang.org/x/mod/sumdb/tlog/note.go +++ b/src/cmd/vendor/golang.org/x/mod/sumdb/tlog/note.go @@ -35,7 +35,7 @@ type Tree struct { // A future backwards-incompatible encoding would use a different // first line (for example, "go.sum database tree v2"). func FormatTree(tree Tree) []byte { - return []byte(fmt.Sprintf("go.sum database tree\n%d\n%s\n", tree.N, tree.Hash)) + return fmt.Appendf(nil, "go.sum database tree\n%d\n%s\n", tree.N, tree.Hash) } var errMalformedTree = errors.New("malformed tree note") @@ -87,7 +87,7 @@ func FormatRecord(id int64, text []byte) (msg []byte, err error) { if !isValidRecordText(text) { return nil, errMalformedRecord } - msg = []byte(fmt.Sprintf("%d\n", id)) + msg = fmt.Appendf(nil, "%d\n", id) msg = append(msg, text...) msg = append(msg, '\n') return msg, nil diff --git a/src/cmd/vendor/golang.org/x/mod/sumdb/tlog/tlog.go b/src/cmd/vendor/golang.org/x/mod/sumdb/tlog/tlog.go index f7ea753832..480b5eff5a 100644 --- a/src/cmd/vendor/golang.org/x/mod/sumdb/tlog/tlog.go +++ b/src/cmd/vendor/golang.org/x/mod/sumdb/tlog/tlog.go @@ -194,7 +194,7 @@ func StoredHashesForRecordHash(n int64, h Hash, r HashReader) ([]Hash, error) { // and consumes a hash from an adjacent subtree. m := int(bits.TrailingZeros64(uint64(n + 1))) indexes := make([]int64, m) - for i := 0; i < m; i++ { + for i := range m { // We arrange indexes in sorted order. // Note that n>>i is always odd. indexes[m-1-i] = StoredHashIndex(i, n>>uint(i)-1) @@ -210,7 +210,7 @@ func StoredHashesForRecordHash(n int64, h Hash, r HashReader) ([]Hash, error) { } // Build new hashes. - for i := 0; i < m; i++ { + for i := range m { h = NodeHash(old[m-1-i], h) hashes = append(hashes, h) } diff --git a/src/cmd/vendor/golang.org/x/mod/zip/zip.go b/src/cmd/vendor/golang.org/x/mod/zip/zip.go index 3673db4997..48363ceb72 100644 --- a/src/cmd/vendor/golang.org/x/mod/zip/zip.go +++ b/src/cmd/vendor/golang.org/x/mod/zip/zip.go @@ -780,7 +780,7 @@ func (fi dataFileInfo) Size() int64 { return int64(len(fi.f.data)) } func (fi dataFileInfo) Mode() os.FileMode { return 0644 } func (fi dataFileInfo) ModTime() time.Time { return time.Time{} } func (fi dataFileInfo) IsDir() bool { return false } -func (fi dataFileInfo) Sys() interface{} { return nil } +func (fi dataFileInfo) Sys() any { return nil } // isVendoredPackage attempts to report whether the given filename is contained // in a package whose import path contains (but does not end with) the component diff --git a/src/cmd/vendor/golang.org/x/sync/errgroup/errgroup.go b/src/cmd/vendor/golang.org/x/sync/errgroup/errgroup.go index 1d8cffae8c..2f45dbc86e 100644 --- a/src/cmd/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/src/cmd/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. +// cancellation for groups of goroutines working on subtasks of a common task. // // [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks // returning errors. diff --git a/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh b/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh index d1c8b2640e..42517077c4 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/src/cmd/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -226,6 +226,7 @@ struct ltchars { #include <linux/cryptouser.h> #include <linux/devlink.h> #include <linux/dm-ioctl.h> +#include <linux/elf.h> #include <linux/errqueue.h> #include <linux/ethtool_netlink.h> #include <linux/falloc.h> @@ -529,6 +530,7 @@ ccflags="$@" $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ || $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || diff --git a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go index 9439af961d..06c0eea6fb 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -2643,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) //sys Mseal(b []byte, flags uint) (err error) + +//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY + +func SetMemPolicy(mode int, mask *CPUSet) error { + return setMemPolicy(mode, mask, _CPU_SETSIZE) +} diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go index b6db27d937..d0a75da572 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -853,20 +853,86 @@ const ( DM_VERSION_MAJOR = 0x4 DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 + DT_ADDRRNGHI = 0x6ffffeff + DT_ADDRRNGLO = 0x6ffffe00 DT_BLK = 0x6 DT_CHR = 0x2 + DT_DEBUG = 0x15 DT_DIR = 0x4 + DT_ENCODING = 0x20 DT_FIFO = 0x1 + DT_FINI = 0xd + DT_FLAGS_1 = 0x6ffffffb + DT_GNU_HASH = 0x6ffffef5 + DT_HASH = 0x4 + DT_HIOS = 0x6ffff000 + DT_HIPROC = 0x7fffffff + DT_INIT = 0xc + DT_JMPREL = 0x17 DT_LNK = 0xa + DT_LOOS = 0x6000000d + DT_LOPROC = 0x70000000 + DT_NEEDED = 0x1 + DT_NULL = 0x0 + DT_PLTGOT = 0x3 + DT_PLTREL = 0x14 + DT_PLTRELSZ = 0x2 DT_REG = 0x8 + DT_REL = 0x11 + DT_RELA = 0x7 + DT_RELACOUNT = 0x6ffffff9 + DT_RELAENT = 0x9 + DT_RELASZ = 0x8 + DT_RELCOUNT = 0x6ffffffa + DT_RELENT = 0x13 + DT_RELSZ = 0x12 + DT_RPATH = 0xf DT_SOCK = 0xc + DT_SONAME = 0xe + DT_STRSZ = 0xa + DT_STRTAB = 0x5 + DT_SYMBOLIC = 0x10 + DT_SYMENT = 0xb + DT_SYMTAB = 0x6 + DT_TEXTREL = 0x16 DT_UNKNOWN = 0x0 + DT_VALRNGHI = 0x6ffffdff + DT_VALRNGLO = 0x6ffffd00 + DT_VERDEF = 0x6ffffffc + DT_VERDEFNUM = 0x6ffffffd + DT_VERNEED = 0x6ffffffe + DT_VERNEEDNUM = 0x6fffffff + DT_VERSYM = 0x6ffffff0 DT_WHT = 0xe ECHO = 0x8 ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EI_CLASS = 0x4 + EI_DATA = 0x5 + EI_MAG0 = 0x0 + EI_MAG1 = 0x1 + EI_MAG2 = 0x2 + EI_MAG3 = 0x3 + EI_NIDENT = 0x10 + EI_OSABI = 0x7 + EI_PAD = 0x8 + EI_VERSION = 0x6 + ELFCLASS32 = 0x1 + ELFCLASS64 = 0x2 + ELFCLASSNONE = 0x0 + ELFCLASSNUM = 0x3 + ELFDATA2LSB = 0x1 + ELFDATA2MSB = 0x2 + ELFDATANONE = 0x0 + ELFMAG = "\177ELF" + ELFMAG0 = 0x7f + ELFMAG1 = 'E' + ELFMAG2 = 'L' + ELFMAG3 = 'F' + ELFOSABI_LINUX = 0x3 + ELFOSABI_NONE = 0x0 EM_386 = 0x3 EM_486 = 0x6 EM_68K = 0x4 @@ -1152,14 +1218,24 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + ET_CORE = 0x4 + ET_DYN = 0x3 + ET_EXEC = 0x2 + ET_HIPROC = 0xffff + ET_LOPROC = 0xff00 + ET_NONE = 0x0 + ET_REL = 0x1 EV_ABS = 0x3 EV_CNT = 0x20 + EV_CURRENT = 0x1 EV_FF = 0x15 EV_FF_STATUS = 0x17 EV_KEY = 0x1 EV_LED = 0x11 EV_MAX = 0x1f EV_MSC = 0x4 + EV_NONE = 0x0 + EV_NUM = 0x2 EV_PWR = 0x16 EV_REL = 0x2 EV_REP = 0x14 @@ -2276,7 +2352,167 @@ const ( NLM_F_REPLACE = 0x100 NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 + NN_386_IOPERM = "LINUX" + NN_386_TLS = "LINUX" + NN_ARC_V2 = "LINUX" + NN_ARM_FPMR = "LINUX" + NN_ARM_GCS = "LINUX" + NN_ARM_HW_BREAK = "LINUX" + NN_ARM_HW_WATCH = "LINUX" + NN_ARM_PACA_KEYS = "LINUX" + NN_ARM_PACG_KEYS = "LINUX" + NN_ARM_PAC_ENABLED_KEYS = "LINUX" + NN_ARM_PAC_MASK = "LINUX" + NN_ARM_POE = "LINUX" + NN_ARM_SSVE = "LINUX" + NN_ARM_SVE = "LINUX" + NN_ARM_SYSTEM_CALL = "LINUX" + NN_ARM_TAGGED_ADDR_CTRL = "LINUX" + NN_ARM_TLS = "LINUX" + NN_ARM_VFP = "LINUX" + NN_ARM_ZA = "LINUX" + NN_ARM_ZT = "LINUX" + NN_AUXV = "CORE" + NN_FILE = "CORE" + NN_GNU_PROPERTY_TYPE_0 = "GNU" + NN_LOONGARCH_CPUCFG = "LINUX" + NN_LOONGARCH_CSR = "LINUX" + NN_LOONGARCH_HW_BREAK = "LINUX" + NN_LOONGARCH_HW_WATCH = "LINUX" + NN_LOONGARCH_LASX = "LINUX" + NN_LOONGARCH_LBT = "LINUX" + NN_LOONGARCH_LSX = "LINUX" + NN_MIPS_DSP = "LINUX" + NN_MIPS_FP_MODE = "LINUX" + NN_MIPS_MSA = "LINUX" + NN_PPC_DEXCR = "LINUX" + NN_PPC_DSCR = "LINUX" + NN_PPC_EBB = "LINUX" + NN_PPC_HASHKEYR = "LINUX" + NN_PPC_PKEY = "LINUX" + NN_PPC_PMU = "LINUX" + NN_PPC_PPR = "LINUX" + NN_PPC_SPE = "LINUX" + NN_PPC_TAR = "LINUX" + NN_PPC_TM_CDSCR = "LINUX" + NN_PPC_TM_CFPR = "LINUX" + NN_PPC_TM_CGPR = "LINUX" + NN_PPC_TM_CPPR = "LINUX" + NN_PPC_TM_CTAR = "LINUX" + NN_PPC_TM_CVMX = "LINUX" + NN_PPC_TM_CVSX = "LINUX" + NN_PPC_TM_SPR = "LINUX" + NN_PPC_VMX = "LINUX" + NN_PPC_VSX = "LINUX" + NN_PRFPREG = "CORE" + NN_PRPSINFO = "CORE" + NN_PRSTATUS = "CORE" + NN_PRXFPREG = "LINUX" + NN_RISCV_CSR = "LINUX" + NN_RISCV_TAGGED_ADDR_CTRL = "LINUX" + NN_RISCV_VECTOR = "LINUX" + NN_S390_CTRS = "LINUX" + NN_S390_GS_BC = "LINUX" + NN_S390_GS_CB = "LINUX" + NN_S390_HIGH_GPRS = "LINUX" + NN_S390_LAST_BREAK = "LINUX" + NN_S390_PREFIX = "LINUX" + NN_S390_PV_CPU_DATA = "LINUX" + NN_S390_RI_CB = "LINUX" + NN_S390_SYSTEM_CALL = "LINUX" + NN_S390_TDB = "LINUX" + NN_S390_TIMER = "LINUX" + NN_S390_TODCMP = "LINUX" + NN_S390_TODPREG = "LINUX" + NN_S390_VXRS_HIGH = "LINUX" + NN_S390_VXRS_LOW = "LINUX" + NN_SIGINFO = "CORE" + NN_TASKSTRUCT = "CORE" + NN_VMCOREDD = "LINUX" + NN_X86_SHSTK = "LINUX" + NN_X86_XSAVE_LAYOUT = "LINUX" + NN_X86_XSTATE = "LINUX" NSFS_MAGIC = 0x6e736673 + NT_386_IOPERM = 0x201 + NT_386_TLS = 0x200 + NT_ARC_V2 = 0x600 + NT_ARM_FPMR = 0x40e + NT_ARM_GCS = 0x410 + NT_ARM_HW_BREAK = 0x402 + NT_ARM_HW_WATCH = 0x403 + NT_ARM_PACA_KEYS = 0x407 + NT_ARM_PACG_KEYS = 0x408 + NT_ARM_PAC_ENABLED_KEYS = 0x40a + NT_ARM_PAC_MASK = 0x406 + NT_ARM_POE = 0x40f + NT_ARM_SSVE = 0x40b + NT_ARM_SVE = 0x405 + NT_ARM_SYSTEM_CALL = 0x404 + NT_ARM_TAGGED_ADDR_CTRL = 0x409 + NT_ARM_TLS = 0x401 + NT_ARM_VFP = 0x400 + NT_ARM_ZA = 0x40c + NT_ARM_ZT = 0x40d + NT_AUXV = 0x6 + NT_FILE = 0x46494c45 + NT_GNU_PROPERTY_TYPE_0 = 0x5 + NT_LOONGARCH_CPUCFG = 0xa00 + NT_LOONGARCH_CSR = 0xa01 + NT_LOONGARCH_HW_BREAK = 0xa05 + NT_LOONGARCH_HW_WATCH = 0xa06 + NT_LOONGARCH_LASX = 0xa03 + NT_LOONGARCH_LBT = 0xa04 + NT_LOONGARCH_LSX = 0xa02 + NT_MIPS_DSP = 0x800 + NT_MIPS_FP_MODE = 0x801 + NT_MIPS_MSA = 0x802 + NT_PPC_DEXCR = 0x111 + NT_PPC_DSCR = 0x105 + NT_PPC_EBB = 0x106 + NT_PPC_HASHKEYR = 0x112 + NT_PPC_PKEY = 0x110 + NT_PPC_PMU = 0x107 + NT_PPC_PPR = 0x104 + NT_PPC_SPE = 0x101 + NT_PPC_TAR = 0x103 + NT_PPC_TM_CDSCR = 0x10f + NT_PPC_TM_CFPR = 0x109 + NT_PPC_TM_CGPR = 0x108 + NT_PPC_TM_CPPR = 0x10e + NT_PPC_TM_CTAR = 0x10d + NT_PPC_TM_CVMX = 0x10a + NT_PPC_TM_CVSX = 0x10b + NT_PPC_TM_SPR = 0x10c + NT_PPC_VMX = 0x100 + NT_PPC_VSX = 0x102 + NT_PRFPREG = 0x2 + NT_PRPSINFO = 0x3 + NT_PRSTATUS = 0x1 + NT_PRXFPREG = 0x46e62b7f + NT_RISCV_CSR = 0x900 + NT_RISCV_TAGGED_ADDR_CTRL = 0x902 + NT_RISCV_VECTOR = 0x901 + NT_S390_CTRS = 0x304 + NT_S390_GS_BC = 0x30c + NT_S390_GS_CB = 0x30b + NT_S390_HIGH_GPRS = 0x300 + NT_S390_LAST_BREAK = 0x306 + NT_S390_PREFIX = 0x305 + NT_S390_PV_CPU_DATA = 0x30e + NT_S390_RI_CB = 0x30d + NT_S390_SYSTEM_CALL = 0x307 + NT_S390_TDB = 0x308 + NT_S390_TIMER = 0x301 + NT_S390_TODCMP = 0x302 + NT_S390_TODPREG = 0x303 + NT_S390_VXRS_HIGH = 0x30a + NT_S390_VXRS_LOW = 0x309 + NT_SIGINFO = 0x53494749 + NT_TASKSTRUCT = 0x4 + NT_VMCOREDD = 0x700 + NT_X86_SHSTK = 0x204 + NT_X86_XSAVE_LAYOUT = 0x205 + NT_X86_XSTATE = 0x202 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2463,6 +2699,59 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PF_ALG = 0x26 + PF_APPLETALK = 0x5 + PF_ASH = 0x12 + PF_ATMPVC = 0x8 + PF_ATMSVC = 0x14 + PF_AX25 = 0x3 + PF_BLUETOOTH = 0x1f + PF_BRIDGE = 0x7 + PF_CAIF = 0x25 + PF_CAN = 0x1d + PF_DECnet = 0xc + PF_ECONET = 0x13 + PF_FILE = 0x1 + PF_IB = 0x1b + PF_IEEE802154 = 0x24 + PF_INET = 0x2 + PF_INET6 = 0xa + PF_IPX = 0x4 + PF_IRDA = 0x17 + PF_ISDN = 0x22 + PF_IUCV = 0x20 + PF_KCM = 0x29 + PF_KEY = 0xf + PF_LLC = 0x1a + PF_LOCAL = 0x1 + PF_MAX = 0x2e + PF_MCTP = 0x2d + PF_MPLS = 0x1c + PF_NETBEUI = 0xd + PF_NETLINK = 0x10 + PF_NETROM = 0x6 + PF_NFC = 0x27 + PF_PACKET = 0x11 + PF_PHONET = 0x23 + PF_PPPOX = 0x18 + PF_QIPCRTR = 0x2a + PF_R = 0x4 + PF_RDS = 0x15 + PF_ROSE = 0xb + PF_ROUTE = 0x10 + PF_RXRPC = 0x21 + PF_SECURITY = 0xe + PF_SMC = 0x2b + PF_SNA = 0x16 + PF_TIPC = 0x1e + PF_UNIX = 0x1 + PF_UNSPEC = 0x0 + PF_VSOCK = 0x28 + PF_W = 0x2 + PF_WANPIPE = 0x19 + PF_X = 0x1 + PF_X25 = 0x9 + PF_XDP = 0x2c PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c @@ -2758,6 +3047,23 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + PT_AARCH64_MEMTAG_MTE = 0x70000002 + PT_DYNAMIC = 0x2 + PT_GNU_EH_FRAME = 0x6474e550 + PT_GNU_PROPERTY = 0x6474e553 + PT_GNU_RELRO = 0x6474e552 + PT_GNU_STACK = 0x6474e551 + PT_HIOS = 0x6fffffff + PT_HIPROC = 0x7fffffff + PT_INTERP = 0x3 + PT_LOAD = 0x1 + PT_LOOS = 0x60000000 + PT_LOPROC = 0x70000000 + PT_NOTE = 0x4 + PT_NULL = 0x0 + PT_PHDR = 0x6 + PT_SHLIB = 0x5 + PT_TLS = 0x7 P_ALL = 0x0 P_PGID = 0x2 P_PID = 0x1 @@ -3091,6 +3397,47 @@ const ( SEEK_MAX = 0x4 SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c + SHF_ALLOC = 0x2 + SHF_EXCLUDE = 0x8000000 + SHF_EXECINSTR = 0x4 + SHF_GROUP = 0x200 + SHF_INFO_LINK = 0x40 + SHF_LINK_ORDER = 0x80 + SHF_MASKOS = 0xff00000 + SHF_MASKPROC = 0xf0000000 + SHF_MERGE = 0x10 + SHF_ORDERED = 0x4000000 + SHF_OS_NONCONFORMING = 0x100 + SHF_RELA_LIVEPATCH = 0x100000 + SHF_RO_AFTER_INIT = 0x200000 + SHF_STRINGS = 0x20 + SHF_TLS = 0x400 + SHF_WRITE = 0x1 + SHN_ABS = 0xfff1 + SHN_COMMON = 0xfff2 + SHN_HIPROC = 0xff1f + SHN_HIRESERVE = 0xffff + SHN_LIVEPATCH = 0xff20 + SHN_LOPROC = 0xff00 + SHN_LORESERVE = 0xff00 + SHN_UNDEF = 0x0 + SHT_DYNAMIC = 0x6 + SHT_DYNSYM = 0xb + SHT_HASH = 0x5 + SHT_HIPROC = 0x7fffffff + SHT_HIUSER = 0xffffffff + SHT_LOPROC = 0x70000000 + SHT_LOUSER = 0x80000000 + SHT_NOBITS = 0x8 + SHT_NOTE = 0x7 + SHT_NULL = 0x0 + SHT_NUM = 0xc + SHT_PROGBITS = 0x1 + SHT_REL = 0x9 + SHT_RELA = 0x4 + SHT_SHLIB = 0xa + SHT_STRTAB = 0x3 + SHT_SYMTAB = 0x2 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -3317,6 +3664,16 @@ const ( STATX_UID = 0x8 STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 + STB_GLOBAL = 0x1 + STB_LOCAL = 0x0 + STB_WEAK = 0x2 + STT_COMMON = 0x5 + STT_FILE = 0x4 + STT_FUNC = 0x2 + STT_NOTYPE = 0x0 + STT_OBJECT = 0x1 + STT_SECTION = 0x3 + STT_TLS = 0x6 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 @@ -3553,6 +3910,8 @@ const ( UTIME_OMIT = 0x3ffffffe V9FS_MAGIC = 0x1021997 VERASE = 0x2 + VER_FLG_BASE = 0x1 + VER_FLG_WEAK = 0x2 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf diff --git a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 5cc1e8eb2f..8935d10a31 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setMemPolicy(mode int, mask *CPUSet, size int) (err error) { + _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go index 944e75a11c..c1a4670171 100644 --- a/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/src/cmd/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -3590,6 +3590,8 @@ type Nhmsg struct { Flags uint32 } +const SizeofNhmsg = 0x8 + type NexthopGrp struct { Id uint32 Weight uint8 @@ -3597,6 +3599,8 @@ type NexthopGrp struct { Resvd2 uint16 } +const SizeofNexthopGrp = 0x8 + const ( NHA_UNSPEC = 0x0 NHA_ID = 0x1 @@ -6332,3 +6336,30 @@ type SockDiagReq struct { } const RTM_NEWNVLAN = 0x70 + +const ( + MPOL_BIND = 0x2 + MPOL_DEFAULT = 0x0 + MPOL_F_ADDR = 0x2 + MPOL_F_MEMS_ALLOWED = 0x4 + MPOL_F_MOF = 0x8 + MPOL_F_MORON = 0x10 + MPOL_F_NODE = 0x1 + MPOL_F_NUMA_BALANCING = 0x2000 + MPOL_F_RELATIVE_NODES = 0x4000 + MPOL_F_SHARED = 0x1 + MPOL_F_STATIC_NODES = 0x8000 + MPOL_INTERLEAVE = 0x3 + MPOL_LOCAL = 0x4 + MPOL_MAX = 0x7 + MPOL_MF_INTERNAL = 0x10 + MPOL_MF_LAZY = 0x8 + MPOL_MF_MOVE_ALL = 0x4 + MPOL_MF_MOVE = 0x2 + MPOL_MF_STRICT = 0x1 + MPOL_MF_VALID = 0x7 + MPOL_MODE_FLAGS = 0xe000 + MPOL_PREFERRED = 0x1 + MPOL_PREFERRED_MANY = 0x5 + MPOL_WEIGHTED_INTERLEAVE = 0x6 +) diff --git a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go index bd51337306..69439df2a4 100644 --- a/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/src/cmd/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -892,8 +892,12 @@ const socket_error = uintptr(^uint32(0)) //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx //sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2 +//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2 //sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable //sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2 //sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange //sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 @@ -916,6 +920,17 @@ type RawSockaddrInet6 struct { Scope_id uint32 } +// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See +// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet. +// +// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using +// unsafe, depending on the address family. +type RawSockaddrInet struct { + Family uint16 + Port uint16 + Data [6]uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 diff --git a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go index 358be3c7f5..6e4f50eb48 100644 --- a/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go +++ b/src/cmd/vendor/golang.org/x/sys/windows/types_windows.go @@ -2320,6 +2320,82 @@ type MibIfRow2 struct { OutQLen uint64 } +// IP_ADDRESS_PREFIX stores an IP address prefix. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix. +type IpAddressPrefix struct { + Prefix RawSockaddrInet + PrefixLength uint8 +} + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin. +const ( + NlroManual = 0 + NlroWellKnown = 1 + NlroDHCP = 2 + NlroRouterAdvertisement = 3 + Nlro6to4 = 4 +) + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol. +const ( + MIB_IPPROTO_OTHER = 1 + MIB_IPPROTO_LOCAL = 2 + MIB_IPPROTO_NETMGMT = 3 + MIB_IPPROTO_ICMP = 4 + MIB_IPPROTO_EGP = 5 + MIB_IPPROTO_GGP = 6 + MIB_IPPROTO_HELLO = 7 + MIB_IPPROTO_RIP = 8 + MIB_IPPROTO_IS_IS = 9 + MIB_IPPROTO_ES_IS = 10 + MIB_IPPROTO_CISCO = 11 + MIB_IPPROTO_BBN = 12 + MIB_IPPROTO_OSPF = 13 + MIB_IPPROTO_BGP = 14 + MIB_IPPROTO_IDPR = 15 + MIB_IPPROTO_EIGRP = 16 + MIB_IPPROTO_DVMRP = 17 + MIB_IPPROTO_RPL = 18 + MIB_IPPROTO_DHCP = 19 + MIB_IPPROTO_NT_AUTOSTATIC = 10002 + MIB_IPPROTO_NT_STATIC = 10006 + MIB_IPPROTO_NT_STATIC_NON_DOD = 10007 +) + +// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2. +type MibIpForwardRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + DestinationPrefix IpAddressPrefix + NextHop RawSockaddrInet + SitePrefixLength uint8 + ValidLifetime uint32 + PreferredLifetime uint32 + Metric uint32 + Protocol uint32 + Loopback uint8 + AutoconfigureAddress uint8 + Publish uint8 + Immortal uint8 + Age uint32 + Origin uint32 +} + +// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2. +type MibIpForwardTable2 struct { + NumEntries uint32 + Table [1]MibIpForwardRow2 +} + +// Rows returns the IP route entries in the table. +func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 { + return unsafe.Slice(&t.Table[0], t.NumEntries) +} + // MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See // https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. type MibUnicastIpAddressRow struct { diff --git a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 426151a019..f25b7308a1 100644 --- a/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/src/cmd/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -182,13 +182,17 @@ var ( procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") + procFreeMibTable = modiphlpapi.NewProc("FreeMibTable") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2") + procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2") procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2") procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") @@ -1624,6 +1628,11 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { return } +func FreeMibTable(memory unsafe.Pointer) { + syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory)) + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { @@ -1664,6 +1673,22 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { return } +func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { @@ -1684,6 +1709,18 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa return } +func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { var _p0 uint32 if initialNotification { diff --git a/src/cmd/vendor/golang.org/x/telemetry/codereview.cfg b/src/cmd/vendor/golang.org/x/telemetry/codereview.cfg new file mode 100644 index 0000000000..3f8b14b64e --- /dev/null +++ b/src/cmd/vendor/golang.org/x/telemetry/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/diagnostic.go index f6118bec64..527540c62c 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/diagnostic.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -33,8 +33,9 @@ type Diagnostic struct { URL string // SuggestedFixes is an optional list of fixes to address the - // problem described by the diagnostic. Each one represents - // an alternative strategy; at most one may be applied. + // problem described by the diagnostic. Each one represents an + // alternative strategy, and should have a distinct and + // descriptive message; at most one may be applied. // // Fixes for different diagnostics should be treated as // independent changes to the same baseline file state, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go index ffc4169083..c7637df00a 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go @@ -13,22 +13,20 @@ import ( "encoding/json" "flag" "fmt" - "go/token" "io" "log" "os" "strconv" - "strings" "golang.org/x/tools/go/analysis" ) // flags common to all {single,multi,unit}checkers. var ( - JSON = false // -json - Context = -1 // -c=N: if N>0, display offending line plus N lines of context - Fix bool // -fix - diffFlag bool // -diff (changes [ApplyFixes] behavior) + JSON = false // -json + Context = -1 // -c=N: if N>0, display offending line plus N lines of context + Fix bool // -fix + Diff bool // -diff ) // Parse creates a flag for each of the analyzer's flags, @@ -78,7 +76,7 @@ func Parse(analyzers []*analysis.Analyzer, multi bool) []*analysis.Analyzer { flag.BoolVar(&JSON, "json", JSON, "emit JSON output") flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`) flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes") - flag.BoolVar(&diffFlag, "diff", false, "with -fix, don't update the files, but print a unified diff") + flag.BoolVar(&Diff, "diff", false, "with -fix, don't update the files, but print a unified diff") // Add shims for legacy vet flags to enable existing // scripts that run vet to continue to work. @@ -310,150 +308,3 @@ var vetLegacyFlags = map[string]string{ "unusedfuncs": "unusedresult.funcs", "unusedstringmethods": "unusedresult.stringmethods", } - -// ---- output helpers common to all drivers ---- -// -// These functions should not depend on global state (flags)! -// Really they belong in a different package. - -// TODO(adonovan): don't accept an io.Writer if we don't report errors. -// Either accept a bytes.Buffer (infallible), or return a []byte. - -// PrintPlain prints a diagnostic in plain text form. -// If contextLines is nonnegative, it also prints the -// offending line plus this many lines of context. -func PrintPlain(out io.Writer, fset *token.FileSet, contextLines int, diag analysis.Diagnostic) { - print := func(pos, end token.Pos, message string) { - posn := fset.Position(pos) - fmt.Fprintf(out, "%s: %s\n", posn, message) - - // show offending line plus N lines of context. - if contextLines >= 0 { - end := fset.Position(end) - if !end.IsValid() { - end = posn - } - // TODO(adonovan): highlight the portion of the line indicated - // by pos...end using ASCII art, terminal colors, etc? - data, _ := os.ReadFile(posn.Filename) - lines := strings.Split(string(data), "\n") - for i := posn.Line - contextLines; i <= end.Line+contextLines; i++ { - if 1 <= i && i <= len(lines) { - fmt.Fprintf(out, "%d\t%s\n", i, lines[i-1]) - } - } - } - } - - print(diag.Pos, diag.End, diag.Message) - for _, rel := range diag.Related { - print(rel.Pos, rel.End, "\t"+rel.Message) - } -} - -// A JSONTree is a mapping from package ID to analysis name to result. -// Each result is either a jsonError or a list of JSONDiagnostic. -type JSONTree map[string]map[string]any - -// A TextEdit describes the replacement of a portion of a file. -// Start and End are zero-based half-open indices into the original byte -// sequence of the file, and New is the new text. -type JSONTextEdit struct { - Filename string `json:"filename"` - Start int `json:"start"` - End int `json:"end"` - New string `json:"new"` -} - -// A JSONSuggestedFix describes an edit that should be applied as a whole or not -// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix -// consists of multiple non-contiguous edits. -type JSONSuggestedFix struct { - Message string `json:"message"` - Edits []JSONTextEdit `json:"edits"` -} - -// A JSONDiagnostic describes the JSON schema of an analysis.Diagnostic. -// -// TODO(matloob): include End position if present. -type JSONDiagnostic struct { - Category string `json:"category,omitempty"` - Posn string `json:"posn"` // e.g. "file.go:line:column" - Message string `json:"message"` - SuggestedFixes []JSONSuggestedFix `json:"suggested_fixes,omitempty"` - Related []JSONRelatedInformation `json:"related,omitempty"` -} - -// A JSONRelated describes a secondary position and message related to -// a primary diagnostic. -// -// TODO(adonovan): include End position if present. -type JSONRelatedInformation struct { - Posn string `json:"posn"` // e.g. "file.go:line:column" - Message string `json:"message"` -} - -// Add adds the result of analysis 'name' on package 'id'. -// The result is either a list of diagnostics or an error. -func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) { - var v any - if err != nil { - type jsonError struct { - Err string `json:"error"` - } - v = jsonError{err.Error()} - } else if len(diags) > 0 { - diagnostics := make([]JSONDiagnostic, 0, len(diags)) - for _, f := range diags { - var fixes []JSONSuggestedFix - for _, fix := range f.SuggestedFixes { - var edits []JSONTextEdit - for _, edit := range fix.TextEdits { - edits = append(edits, JSONTextEdit{ - Filename: fset.Position(edit.Pos).Filename, - Start: fset.Position(edit.Pos).Offset, - End: fset.Position(edit.End).Offset, - New: string(edit.NewText), - }) - } - fixes = append(fixes, JSONSuggestedFix{ - Message: fix.Message, - Edits: edits, - }) - } - var related []JSONRelatedInformation - for _, r := range f.Related { - related = append(related, JSONRelatedInformation{ - Posn: fset.Position(r.Pos).String(), - Message: r.Message, - }) - } - jdiag := JSONDiagnostic{ - Category: f.Category, - Posn: fset.Position(f.Pos).String(), - Message: f.Message, - SuggestedFixes: fixes, - Related: related, - } - diagnostics = append(diagnostics, jdiag) - } - v = diagnostics - } - if v != nil { - m, ok := tree[id] - if !ok { - m = make(map[string]any) - tree[id] = m - } - m[name] = v - } -} - -func (tree JSONTree) Print(out io.Writer) error { - data, err := json.MarshalIndent(tree, "", "\t") - if err != nil { - log.Panicf("internal error: JSON marshaling failed: %v", err) - } - _, err = fmt.Fprintf(out, "%s\n", data) - return err -} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go index b4e91edce3..8ccf982d23 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go @@ -15,7 +15,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" ) //go:embed doc.go @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "appends", - Doc: analysisinternal.MustExtractDoc(doc, "appends"), + Doc: analyzerutil.MustExtractDoc(doc, "appends"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/appends", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index e9c0879844..ba9ca38a81 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -19,7 +19,7 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = "report mismatches between assembly files and Go declarations" @@ -175,7 +175,7 @@ func run(pass *analysis.Pass) (any, error) { Files: for _, fname := range sfiles { - content, tf, err := analysisinternal.ReadFile(pass, fname) + content, tf, err := analyzerutil.ReadFile(pass, fname) if err != nil { return nil, err } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go index 8080aed020..69734df825 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go @@ -18,7 +18,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" @@ -29,7 +29,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "assign", - Doc: analysisinternal.MustExtractDoc(doc, "assign"), + Doc: analyzerutil.MustExtractDoc(doc, "assign"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/assign", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go index 9faa3f67c1..c6ab7ff7a2 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go @@ -13,7 +13,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "atomic", - Doc: analysisinternal.MustExtractDoc(doc, "atomic"), + Doc: analyzerutil.MustExtractDoc(doc, "atomic"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/atomic", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index 7dd4f249e2..d0b28e5b84 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -14,7 +14,7 @@ import ( "unicode" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = "check //go:build and // +build directives" @@ -86,7 +86,7 @@ func checkOtherFile(pass *analysis.Pass, filename string) error { // We cannot use the Go parser, since this may not be a Go source file. // Read the raw bytes instead. - content, tf, err := analysisinternal.ReadFile(pass, filename) + content, tf, err := analyzerutil.ReadFile(pass, filename) if err != nil { return err } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go index bf1202b92b..54b8062cc0 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go @@ -350,8 +350,8 @@ func typeOKForCgoCall(t types.Type, m map[types.Type]bool) bool { case *types.Array: return typeOKForCgoCall(t.Elem(), m) case *types.Struct: - for i := 0; i < t.NumFields(); i++ { - if !typeOKForCgoCall(t.Field(i).Type(), m) { + for field := range t.Fields() { + if !typeOKForCgoCall(field.Type(), m) { return false } } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go index 4190cc5900..208602f486 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -328,8 +328,8 @@ func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typ ttyp, ok := typ.Underlying().(*types.Tuple) if ok { - for i := 0; i < ttyp.Len(); i++ { - subpath := lockPath(tpkg, ttyp.At(i).Type(), seen) + for v := range ttyp.Variables() { + subpath := lockPath(tpkg, v.Type(), seen) if subpath != nil { return append(subpath, typ.String()) } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go index 951aaed00f..d6c2586e73 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go @@ -16,9 +16,12 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/cfg" "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/cfginternal" + "golang.org/x/tools/internal/typesinternal" ) var Analyzer = &analysis.Analyzer{ @@ -26,7 +29,7 @@ var Analyzer = &analysis.Analyzer{ Doc: "build a control-flow graph", URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ctrlflow", Run: run, - ResultType: reflect.TypeOf(new(CFGs)), + ResultType: reflect.TypeFor[*CFGs](), FactTypes: []analysis.Fact{new(noReturn)}, Requires: []*analysis.Analyzer{inspect.Analyzer}, } @@ -44,7 +47,20 @@ type CFGs struct { defs map[*ast.Ident]types.Object // from Pass.TypesInfo.Defs funcDecls map[*types.Func]*declInfo funcLits map[*ast.FuncLit]*litInfo - pass *analysis.Pass // transient; nil after construction + noReturn map[*types.Func]bool // functions lacking a reachable return statement + pass *analysis.Pass // transient; nil after construction +} + +// TODO(adonovan): add (*CFGs).NoReturn to public API. +func (c *CFGs) isNoReturn(fn *types.Func) bool { + return c.noReturn[fn] +} + +func init() { + // Expose the hidden method to callers in x/tools. + ctrlflowinternal.NoReturn = func(c any, fn *types.Func) bool { + return c.(*CFGs).isNoReturn(fn) + } } // CFGs has two maps: funcDecls for named functions and funcLits for @@ -54,15 +70,14 @@ type CFGs struct { // *types.Func but not the other way. type declInfo struct { - decl *ast.FuncDecl - cfg *cfg.CFG // iff decl.Body != nil - started bool // to break cycles - noReturn bool + decl *ast.FuncDecl + cfg *cfg.CFG // iff decl.Body != nil + started bool // to break cycles } type litInfo struct { cfg *cfg.CFG - noReturn bool + noReturn bool // (currently unused) } // FuncDecl returns the control-flow graph for a named function. @@ -118,6 +133,7 @@ func run(pass *analysis.Pass) (any, error) { defs: pass.TypesInfo.Defs, funcDecls: funcDecls, funcLits: funcLits, + noReturn: make(map[*types.Func]bool), pass: pass, } @@ -138,7 +154,7 @@ func run(pass *analysis.Pass) (any, error) { li := funcLits[lit] if li.cfg == nil { li.cfg = cfg.New(lit.Body, c.callMayReturn) - if !hasReachableReturn(li.cfg) { + if cfginternal.IsNoReturn(li.cfg) { li.noReturn = true } } @@ -158,27 +174,28 @@ func (c *CFGs) buildDecl(fn *types.Func, di *declInfo) { // The buildDecl call tree thus resembles the static call graph. // We mark each node when we start working on it to break cycles. - if !di.started { // break cycle - di.started = true + if di.started { + return // break cycle + } + di.started = true - if isIntrinsicNoReturn(fn) { - di.noReturn = true - } - if di.decl.Body != nil { - di.cfg = cfg.New(di.decl.Body, c.callMayReturn) - if !hasReachableReturn(di.cfg) { - di.noReturn = true - } - } - if di.noReturn { - c.pass.ExportObjectFact(fn, new(noReturn)) - } + noreturn := isIntrinsicNoReturn(fn) - // debugging - if false { - log.Printf("CFG for %s:\n%s (noreturn=%t)\n", fn, di.cfg.Format(c.pass.Fset), di.noReturn) + if di.decl.Body != nil { + di.cfg = cfg.New(di.decl.Body, c.callMayReturn) + if cfginternal.IsNoReturn(di.cfg) { + noreturn = true } } + if noreturn { + c.pass.ExportObjectFact(fn, new(noReturn)) + c.noReturn[fn] = true + } + + // debugging + if false { + log.Printf("CFG for %s:\n%s (noreturn=%t)\n", fn, di.cfg.Format(c.pass.Fset), noreturn) + } } // callMayReturn reports whether the called function may return. @@ -201,31 +218,26 @@ func (c *CFGs) callMayReturn(call *ast.CallExpr) (r bool) { // Function or method declared in this package? if di, ok := c.funcDecls[fn]; ok { c.buildDecl(fn, di) - return !di.noReturn + return !c.noReturn[fn] } // Not declared in this package. // Is there a fact from another package? - return !c.pass.ImportObjectFact(fn, new(noReturn)) + if c.pass.ImportObjectFact(fn, new(noReturn)) { + c.noReturn[fn] = true + return false + } + + return true } var panicBuiltin = types.Universe.Lookup("panic").(*types.Builtin) -func hasReachableReturn(g *cfg.CFG) bool { - for _, b := range g.Blocks { - if b.Live && b.Return() != nil { - return true - } - } - return false -} - // isIntrinsicNoReturn reports whether a function intrinsically never // returns because it stops execution of the calling thread. // It is the base case in the recursion. func isIntrinsicNoReturn(fn *types.Func) bool { // Add functions here as the need arises, but don't allocate memory. - path, name := fn.Pkg().Path(), fn.Name() - return path == "syscall" && (name == "Exit" || name == "ExitProcess" || name == "ExitThread") || - path == "runtime" && name == "Goexit" + return typesinternal.IsFunctionNamed(fn, "syscall", "Exit", "ExitProcess", "ExitThread") || + typesinternal.IsFunctionNamed(fn, "runtime", "Goexit") } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go index 3069ee9fec..af93407cae 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go @@ -12,7 +12,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "defers", Requires: []*analysis.Analyzer{inspect.Analyzer}, - Doc: analysisinternal.MustExtractDoc(doc, "defers"), + Doc: analyzerutil.MustExtractDoc(doc, "defers"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/defers", Run: run, } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go index c84d25842e..5fa28861e5 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go @@ -14,7 +14,7 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = `check Go toolchain directives such as //go:debug @@ -86,7 +86,7 @@ func checkGoFile(pass *analysis.Pass, f *ast.File) { func checkOtherFile(pass *analysis.Pass, filename string) error { // We cannot use the Go parser, since is not a Go source file. // Read the raw bytes instead. - content, tf, err := analysisinternal.ReadFile(pass, filename) + content, tf, err := analyzerutil.ReadFile(pass, filename) if err != nil { return err } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go index b3df99929d..f1465f7343 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go @@ -12,7 +12,7 @@ import ( "go/types" "golang.org/x/tools/go/analysis" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/typesinternal/typeindex" ) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go index 809095d40a..a7d558103a 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go @@ -13,7 +13,7 @@ import ( "unicode" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" ) const Doc = "report assembly that clobbers the frame pointer before saving it" @@ -98,7 +98,7 @@ func run(pass *analysis.Pass) (any, error) { } for _, fname := range sfiles { - content, tf, err := analysisinternal.ReadFile(pass, fname) + content, tf, err := analyzerutil.ReadFile(pass, fname) if err != nil { return nil, err } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/hostport/hostport.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/hostport/hostport.go index 07f154963e..d41a0e4cbf 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/hostport/hostport.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/hostport/hostport.go @@ -17,7 +17,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/types/typeutil" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/typesinternal/typeindex" ) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go index a6dcf1cf8e..da0acbd8e2 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go @@ -12,7 +12,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typeparams" ) @@ -21,7 +21,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "ifaceassert", - Doc: analysisinternal.MustExtractDoc(doc, "ifaceassert"), + Doc: analyzerutil.MustExtractDoc(doc, "ifaceassert"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ifaceassert", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inline/gofix.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inline/inline.go index 629d5d8526..1b3cb108c6 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inline/gofix.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inline/inline.go @@ -20,9 +20,10 @@ import ( "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/moreiters" "golang.org/x/tools/internal/packagepath" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/refactor/inline" @@ -34,7 +35,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "inline", - Doc: analysisinternal.MustExtractDoc(doc, "inline"), + Doc: analyzerutil.MustExtractDoc(doc, "inline"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/inline", Run: run, FactTypes: []analysis.Fact{ @@ -132,8 +133,6 @@ func (a *analyzer) HandleConst(nameIdent, rhsIdent *ast.Ident) { // inline inlines each static call to an inlinable function // and each reference to an inlinable constant or type alias. -// -// TODO(adonovan): handle multiple diffs that each add the same import. func (a *analyzer) inline() { for cur := range a.root.Preorder((*ast.CallExpr)(nil), (*ast.Ident)(nil)) { switch n := cur.Node().(type) { @@ -167,6 +166,10 @@ func (a *analyzer) inlineCall(call *ast.CallExpr, cur inspector.Cursor) { return // nope } + if a.withinTestOf(cur, fn) { + return // don't inline a function from within its own test + } + // Inline the call. content, err := a.readFile(call) if err != nil { @@ -229,6 +232,44 @@ func (a *analyzer) inlineCall(call *ast.CallExpr, cur inspector.Cursor) { } } +// withinTestOf reports whether cur is within a dedicated test +// function for the inlinable target function. +// A call within its dedicated test should not be inlined. +func (a *analyzer) withinTestOf(cur inspector.Cursor, target *types.Func) bool { + curFuncDecl, ok := moreiters.First(cur.Enclosing((*ast.FuncDecl)(nil))) + if !ok { + return false // not in a function + } + funcDecl := curFuncDecl.Node().(*ast.FuncDecl) + if funcDecl.Recv != nil { + return false // not a test func + } + if strings.TrimSuffix(a.pass.Pkg.Path(), "_test") != target.Pkg().Path() { + return false // different package + } + if !strings.HasSuffix(a.pass.Fset.File(funcDecl.Pos()).Name(), "_test.go") { + return false // not a test file + } + + // Computed expected SYMBOL portion of "TestSYMBOL_comment" + // for the target symbol. + symbol := target.Name() + if recv := target.Signature().Recv(); recv != nil { + _, named := typesinternal.ReceiverNamed(recv) + symbol = named.Obj().Name() + "_" + symbol + } + + // TODO(adonovan): use a proper Test function parser. + fname := funcDecl.Name.Name + for _, pre := range []string{"Test", "Example", "Bench"} { + if fname == pre+symbol || strings.HasPrefix(fname, pre+symbol+"_") { + return true + } + } + + return false +} + // If tn is the TypeName of an inlinable alias, suggest inlining its use at cur. func (a *analyzer) inlineAlias(tn *types.TypeName, curId inspector.Cursor) { inalias, ok := a.inlinableAliases[tn] @@ -375,8 +416,8 @@ func typenames(t types.Type) []*types.TypeName { visit(t.Key()) visit(t.Elem()) case *types.Struct: - for i := range t.NumFields() { - visit(t.Field(i).Type()) + for field := range t.Fields() { + visit(field.Type()) } case *types.Signature: // Ignore the receiver: although it may be present, it has no meaning @@ -389,11 +430,11 @@ func typenames(t types.Type) []*types.TypeName { visit(t.Params()) visit(t.Results()) case *types.Interface: - for i := range t.NumEmbeddeds() { - visit(t.EmbeddedType(i)) + for etyp := range t.EmbeddedTypes() { + visit(etyp) } - for i := range t.NumExplicitMethods() { - visit(t.ExplicitMethod(i).Type()) + for method := range t.ExplicitMethods() { + visit(method.Type()) } case *types.Tuple: for v := range t.Variables() { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go index ee1972f56d..aae5d255f9 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go @@ -41,7 +41,7 @@ var Analyzer = &analysis.Analyzer{ URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/inspect", Run: run, RunDespiteErrors: true, - ResultType: reflect.TypeOf(new(inspector.Inspector)), + ResultType: reflect.TypeFor[*inspector.Inspector](), } func run(pass *analysis.Pass) (any, error) { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal/ctrlflowinternal.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal/ctrlflowinternal.go new file mode 100644 index 0000000000..ee7a37228e --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal/ctrlflowinternal.go @@ -0,0 +1,17 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctrlflowinternal exposes internals of ctrlflow. +// It cannot actually depend on symbols from ctrlflow. +package ctrlflowinternal + +import "go/types" + +// NoReturn exposes the (*ctrlflow.CFGs).NoReturn method to the buildssa analyzer. +// +// You must link [golang.org/x/tools/go/analysis/passes/ctrlflow] into your +// application for it to be non-nil. +var NoReturn = func(cfgs any, fn *types.Func) bool { + panic("x/tools/go/analysis/passes/ctrlflow is not linked into this application") +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go index 868226328f..41b19d7933 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go @@ -13,7 +13,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/versions" ) @@ -23,7 +23,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "loopclosure", - Doc: analysisinternal.MustExtractDoc(doc, "loopclosure"), + Doc: analyzerutil.MustExtractDoc(doc, "loopclosure"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/loopclosure", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -55,8 +55,8 @@ func run(pass *analysis.Pass) (any, error) { switch n := n.(type) { case *ast.File: // Only traverse the file if its goversion is strictly before go1.22. - goversion := versions.FileVersion(pass.TypesInfo, n) - return versions.Before(goversion, versions.Go1_22) + return !analyzerutil.FileUsesGoVersion(pass, n, versions.Go1_22) + case *ast.RangeStmt: body = n.Body addVar(n.Key) @@ -308,12 +308,11 @@ func parallelSubtest(info *types.Info, call *ast.CallExpr) []ast.Stmt { if !ok { continue } - expr := exprStmt.X - if isMethodCall(info, expr, "testing", "T", "Parallel") { - call, _ := expr.(*ast.CallExpr) - if call == nil { - continue - } + call, ok := exprStmt.X.(*ast.CallExpr) + if !ok { + continue + } + if isMethodCall(info, call, "testing", "T", "Parallel") { x, _ := call.Fun.(*ast.SelectorExpr) if x == nil { continue @@ -347,26 +346,6 @@ func unlabel(stmt ast.Stmt) (ast.Stmt, bool) { } } -// isMethodCall reports whether expr is a method call of -// <pkgPath>.<typeName>.<method>. -func isMethodCall(info *types.Info, expr ast.Expr, pkgPath, typeName, method string) bool { - call, ok := expr.(*ast.CallExpr) - if !ok { - return false - } - - // Check that we are calling a method <method> - f := typeutil.StaticCallee(info, call) - if f == nil || f.Name() != method { - return false - } - recv := f.Type().(*types.Signature).Recv() - if recv == nil { - return false - } - - // Check that the receiver is a <pkgPath>.<typeName> or - // *<pkgPath>.<typeName>. - _, named := typesinternal.ReceiverNamed(recv) - return typesinternal.IsTypeNamed(named, pkgPath, typeName) +func isMethodCall(info *types.Info, call *ast.CallExpr, pkgPath, typeName, method string) bool { + return typesinternal.IsMethodNamed(typeutil.Callee(info, call), pkgPath, typeName, method) } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go index dfaecf51e2..28a5f6cd93 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go @@ -15,7 +15,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/cfg" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "lostcancel", - Doc: analysisinternal.MustExtractDoc(doc, "lostcancel"), + Doc: analyzerutil.MustExtractDoc(doc, "lostcancel"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/lostcancel", Run: run, Requires: []*analysis.Analyzer{ @@ -316,8 +316,8 @@ outer: } func tupleContains(tuple *types.Tuple, v *types.Var) bool { - for i := 0; i < tuple.Len(); i++ { - if tuple.At(i) == v { + for v0 := range tuple.Variables() { + if v0 == v { return true } } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go index 05999f8f2b..579ab865da 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/any.go @@ -9,29 +9,21 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/versions" ) var AnyAnalyzer = &analysis.Analyzer{ - Name: "any", - Doc: analysisinternal.MustExtractDoc(doc, "any"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: runAny, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#any", + Name: "any", + Doc: analyzerutil.MustExtractDoc(doc, "any"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: runAny, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#any", } // The any pass replaces interface{} with go1.18's 'any'. func runAny(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - - for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.18") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_18) { for curIface := range curFile.Preorder((*ast.InterfaceType)(nil)) { iface := curIface.Node().(*ast.InterfaceType) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go index eb1ac170c6..ad45d74478 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/bloop.go @@ -15,20 +15,19 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/moreiters" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var BLoopAnalyzer = &analysis.Analyzer{ Name: "bloop", - Doc: analysisinternal.MustExtractDoc(doc, "bloop"), + Doc: analyzerutil.MustExtractDoc(doc, "bloop"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -45,16 +44,13 @@ var BLoopAnalyzer = &analysis.Analyzer{ // for i := 0; i < b.N; i++ {} => for b.Loop() {} // for range b.N {} func bloop(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - if !typesinternal.Imports(pass.Pkg, "testing") { return nil, nil } var ( - inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) - info = pass.TypesInfo + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo ) // edits computes the text edits for a matched for/range loop @@ -102,7 +98,7 @@ func bloop(pass *analysis.Pass) (any, error) { (*ast.ForStmt)(nil), (*ast.RangeStmt)(nil), } - for curFile := range filesUsing(inspect, info, "go1.24") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_24) { for curLoop := range curFile.Preorder(loops...) { switch n := curLoop.Node().(type) { case *ast.ForStmt: @@ -189,6 +185,7 @@ func enclosingFunc(c inspector.Cursor) (inspector.Cursor, bool) { // 2. The only b.N loop in that benchmark function // - b.Loop() can only be called once per benchmark execution // - Multiple calls result in "B.Loop called with timer stopped" error +// - Multiple loops may have complex interdependencies that are hard to analyze func usesBenchmarkNOnce(c inspector.Cursor, info *types.Info) bool { // Find the enclosing benchmark function curFunc, ok := enclosingFunc(c) @@ -205,17 +202,14 @@ func usesBenchmarkNOnce(c inspector.Cursor, info *types.Info) bool { return false } - // Count b.N references in this benchmark function + // Count all b.N references in this benchmark function (including nested functions) bnRefCount := 0 - filter := []ast.Node{(*ast.SelectorExpr)(nil), (*ast.FuncLit)(nil)} + filter := []ast.Node{(*ast.SelectorExpr)(nil)} curFunc.Inspect(filter, func(cur inspector.Cursor) bool { - switch n := cur.Node().(type) { - case *ast.FuncLit: - return false // don't descend into nested function literals - case *ast.SelectorExpr: - if n.Sel.Name == "N" && typesinternal.IsPointerToNamed(info.TypeOf(n.X), "testing", "B") { - bnRefCount++ - } + sel := cur.Node().(*ast.SelectorExpr) + if sel.Sel.Name == "N" && + typesinternal.IsPointerToNamed(info.TypeOf(sel.X), "testing", "B") { + bnRefCount++ } return true }) @@ -240,7 +234,7 @@ func isIncrementLoop(info *types.Info, loop *ast.ForStmt) *types.Var { if assign, ok := loop.Init.(*ast.AssignStmt); ok && assign.Tok == token.DEFINE && len(assign.Rhs) == 1 && - isZeroIntLiteral(info, assign.Rhs[0]) && + isZeroIntConst(info, assign.Rhs[0]) && is[*ast.IncDecStmt](loop.Post) && loop.Post.(*ast.IncDecStmt).Tok == token.INC && astutil.EqualSyntax(loop.Post.(*ast.IncDecStmt).X, assign.Lhs[0]) { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go index bc143d7a6d..7469002f56 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/doc.go @@ -176,8 +176,12 @@ This analyzer finds declarations of functions of this form: and suggests a fix to turn them into inlinable wrappers around go1.26's built-in new(expr) function: + //go:fix inline func varOf(x int) *int { return new(x) } +(The directive comment causes the 'inline' analyzer to suggest +that calls to such functions are inlined.) + In addition, this analyzer suggests a fix for each call to one of the functions before it is transformed, so that @@ -187,9 +191,9 @@ is replaced by: use(new(123)) -(Wrapper functions such as varOf are common when working with Go +Wrapper functions such as varOf are common when working with Go serialization packages such as for JSON or protobuf, where pointers -are often used to express optionality.) +are often used to express optionality. # Analyzer omitzero @@ -327,6 +331,44 @@ iterator offered by the same data type: where x is one of various well-known types in the standard library. +# Analyzer stringscut + +stringscut: replace strings.Index etc. with strings.Cut + +This analyzer replaces certain patterns of use of [strings.Index] and string slicing by [strings.Cut], added in go1.18. + +For example: + + idx := strings.Index(s, substr) + if idx >= 0 { + return s[:idx] + } + +is replaced by: + + before, _, ok := strings.Cut(s, substr) + if ok { + return before + } + +And: + + idx := strings.Index(s, substr) + if idx >= 0 { + return + } + +is replaced by: + + found := strings.Contains(s, substr) + if found { + return + } + +It also handles variants using [strings.IndexByte] instead of Index, or the bytes package instead of strings. + +Fixes are offered only in cases in which there are no potential modifications of the idx, s, or substr expressions between their definition and use. + # Analyzer stringscutprefix stringscutprefix: replace HasPrefix/TrimPrefix with CutPrefix diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go index b6387ad840..d9a922f846 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/errorsastype.go @@ -14,21 +14,21 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/goplsexport" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var errorsastypeAnalyzer = &analysis.Analyzer{ Name: "errorsastype", - Doc: analysisinternal.MustExtractDoc(doc, "errorsastype"), + Doc: analyzerutil.MustExtractDoc(doc, "errorsastype"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#errorsastype", - Requires: []*analysis.Analyzer{generated.Analyzer, typeindexanalyzer.Analyzer}, + Requires: []*analysis.Analyzer{typeindexanalyzer.Analyzer}, Run: errorsastype, } @@ -78,8 +78,6 @@ func init() { // // - if errors.As(err, myerr) && othercond { ... } func errorsastype(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) info = pass.TypesInfo @@ -97,7 +95,7 @@ func errorsastype(pass *analysis.Pass) (any, error) { } file := astutil.EnclosingFile(curDeclStmt) - if !fileUses(info, file, "go1.26") { + if !analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_26) { continue // errors.AsType is too new } @@ -127,6 +125,8 @@ func errorsastype(pass *analysis.Pass) (any, error) { errtype := types.TypeString(v.Type(), qual) // Choose a name for the "ok" variable. + // TODO(adonovan): this pattern also appears in stditerators, + // and is wanted elsewhere; factor. okName := "ok" if okVar := lookup(info, curCall, "ok"); okVar != nil { // The name 'ok' is already declared, but diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go index f2e5360542..389f703466 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/fmtappendf.go @@ -13,18 +13,17 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var FmtAppendfAnalyzer = &analysis.Analyzer{ Name: "fmtappendf", - Doc: analysisinternal.MustExtractDoc(doc, "fmtappendf"), + Doc: analyzerutil.MustExtractDoc(doc, "fmtappendf"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -35,8 +34,6 @@ var FmtAppendfAnalyzer = &analysis.Analyzer{ // The fmtappend function replaces []byte(fmt.Sprintf(...)) by // fmt.Appendf(nil, ...), and similarly for Sprint, Sprintln. func fmtappendf(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - index := pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) for _, fn := range []types.Object{ index.Object("fmt", "Sprintf"), @@ -50,7 +47,7 @@ func fmtappendf(pass *analysis.Pass) (any, error) { conv := curCall.Parent().Node().(*ast.CallExpr) tv := pass.TypesInfo.Types[conv.Fun] if tv.IsType() && types.Identical(tv.Type, byteSliceType) && - fileUses(pass.TypesInfo, astutil.EnclosingFile(curCall), "go1.19") { + analyzerutil.FileUsesGoVersion(pass, astutil.EnclosingFile(curCall), versions.Go1_19) { // Have: []byte(fmt.SprintX(...)) // Find "Sprint" identifier. diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go index 76e3a8a73c..67f60acaaf 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/forvar.go @@ -10,22 +10,18 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/versions" ) var ForVarAnalyzer = &analysis.Analyzer{ - Name: "forvar", - Doc: analysisinternal.MustExtractDoc(doc, "forvar"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: forvar, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#forvar", + Name: "forvar", + Doc: analyzerutil.MustExtractDoc(doc, "forvar"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: forvar, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#forvar", } // forvar offers to fix unnecessary copying of a for variable @@ -39,54 +35,77 @@ var ForVarAnalyzer = &analysis.Analyzer{ // where the two idents are the same, // and the ident is defined (:=) as a variable in the for statement. // (Note that this 'fix' does not work for three clause loops -// because the Go specification says "The variable used by each subsequent iteration +// because the Go specfilesUsingGoVersionsays "The variable used by each subsequent iteration // is declared implicitly before executing the post statement and initialized to the // value of the previous iteration's variable at that moment.") +// +// Variant: same thing in an IfStmt.Init, when the IfStmt is the sole +// loop body statement: +// +// for _, x := range foo { +// if x := x; cond { ... } +// } +// +// (The restriction is necessary to avoid potential problems arising +// from merging two distinct variables.) +// +// This analyzer is synergistic with stditerators, +// which may create redundant "x := x" statements. func forvar(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.22") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_22) { for curLoop := range curFile.Preorder((*ast.RangeStmt)(nil)) { loop := curLoop.Node().(*ast.RangeStmt) if loop.Tok != token.DEFINE { continue } - isLoopVarRedecl := func(assign *ast.AssignStmt) bool { - for i, lhs := range assign.Lhs { - if !(astutil.EqualSyntax(lhs, assign.Rhs[i]) && - (astutil.EqualSyntax(lhs, loop.Key) || astutil.EqualSyntax(lhs, loop.Value))) { - return false + isLoopVarRedecl := func(stmt ast.Stmt) bool { + if assign, ok := stmt.(*ast.AssignStmt); ok && + assign.Tok == token.DEFINE && + len(assign.Lhs) == len(assign.Rhs) { + + for i, lhs := range assign.Lhs { + if !(astutil.EqualSyntax(lhs, assign.Rhs[i]) && + (astutil.EqualSyntax(lhs, loop.Key) || + astutil.EqualSyntax(lhs, loop.Value))) { + return false + } } + return true } - return true + return false } // Have: for k, v := range x { stmts } // // Delete the prefix of stmts that are // of the form k := k; v := v; k, v := k, v; v, k := v, k. for _, stmt := range loop.Body.List { - if assign, ok := stmt.(*ast.AssignStmt); ok && - assign.Tok == token.DEFINE && - len(assign.Lhs) == len(assign.Rhs) && - isLoopVarRedecl(assign) { - - curStmt, _ := curLoop.FindNode(stmt) - edits := refactor.DeleteStmt(pass.Fset.File(stmt.Pos()), curStmt) - if len(edits) > 0 { - pass.Report(analysis.Diagnostic{ - Pos: stmt.Pos(), - End: stmt.End(), - Message: "copying variable is unneeded", - SuggestedFixes: []analysis.SuggestedFix{{ - Message: "Remove unneeded redeclaration", - TextEdits: edits, - }}, - }) - } + if isLoopVarRedecl(stmt) { + // { x := x; ... } + // ------ + } else if ifstmt, ok := stmt.(*ast.IfStmt); ok && + ifstmt.Init != nil && + len(loop.Body.List) == 1 && // must be sole statement in loop body + isLoopVarRedecl(ifstmt.Init) { + // if x := x; cond { + // ------ + stmt = ifstmt.Init } else { break // stop at first other statement } + + curStmt, _ := curLoop.FindNode(stmt) + edits := refactor.DeleteStmt(pass.Fset.File(stmt.Pos()), curStmt) + if len(edits) > 0 { + pass.Report(analysis.Diagnostic{ + Pos: stmt.Pos(), + End: stmt.End(), + Message: "copying variable is unneeded", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Remove unneeded redeclaration", + TextEdits: edits, + }}, + }) + } } } } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go index 3072cf6f51..2352c8b608 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/maps.go @@ -15,23 +15,20 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) var MapsLoopAnalyzer = &analysis.Analyzer{ - Name: "mapsloop", - Doc: analysisinternal.MustExtractDoc(doc, "mapsloop"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: mapsloop, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#mapsloop", + Name: "mapsloop", + Doc: analyzerutil.MustExtractDoc(doc, "mapsloop"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: mapsloop, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#mapsloop", } // The mapsloop pass offers to simplify a loop of map insertions: @@ -55,8 +52,6 @@ var MapsLoopAnalyzer = &analysis.Analyzer{ // m = make(M) // m = M{} func mapsloop(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "maps", "bytes", "runtime") { @@ -223,8 +218,7 @@ func mapsloop(pass *analysis.Pass) (any, error) { } // Find all range loops around m[k] = v. - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.23") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_23) { file := curFile.Node().(*ast.File) for curRange := range curFile.Preorder((*ast.RangeStmt)(nil)) { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go index 7ebf837375..23a0977f21 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/minmax.go @@ -15,19 +15,18 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var MinMaxAnalyzer = &analysis.Analyzer{ Name: "minmax", - Doc: analysisinternal.MustExtractDoc(doc, "minmax"), + Doc: analyzerutil.MustExtractDoc(doc, "minmax"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -56,8 +55,6 @@ var MinMaxAnalyzer = &analysis.Analyzer{ // - "x := a" or "x = a" or "var x = a" in pattern 2 // - "x < b" or "a < b" in pattern 2 func minmax(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Check for user-defined min/max functions that can be removed checkUserDefinedMinMax(pass) @@ -201,8 +198,7 @@ func minmax(pass *analysis.Pass) (any, error) { // Find all "if a < b { lhs = rhs }" statements. info := pass.TypesInfo - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - for curFile := range filesUsing(inspect, info, "go1.21") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_21) { astFile := curFile.Node().(*ast.File) for curIfStmt := range curFile.Preorder((*ast.IfStmt)(nil)) { ifStmt := curIfStmt.Node().(*ast.IfStmt) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go index df23fc23c8..013ce79d6c 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/modernize.go @@ -16,15 +16,15 @@ import ( "strings" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/moreiters" "golang.org/x/tools/internal/packagepath" "golang.org/x/tools/internal/stdlib" "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" ) //go:embed doc.go @@ -48,6 +48,7 @@ var Suite = []*analysis.Analyzer{ // SlicesDeleteAnalyzer, // not nil-preserving! SlicesSortAnalyzer, stditeratorsAnalyzer, + stringscutAnalyzer, StringsCutPrefixAnalyzer, StringsSeqAnalyzer, StringsBuilderAnalyzer, @@ -57,18 +58,6 @@ var Suite = []*analysis.Analyzer{ // -- helpers -- -// skipGenerated decorates pass.Report to suppress diagnostics in generated files. -func skipGenerated(pass *analysis.Pass) { - report := pass.Report - pass.Report = func(diag analysis.Diagnostic) { - generated := pass.ResultOf[generated.Analyzer].(*generated.Result) - if generated.IsGenerated(diag.Pos) { - return // skip - } - report(diag) - } -} - // formatExprs formats a comma-separated list of expressions. func formatExprs(fset *token.FileSet, exprs []ast.Expr) string { var buf strings.Builder @@ -81,8 +70,8 @@ func formatExprs(fset *token.FileSet, exprs []ast.Expr) string { return buf.String() } -// isZeroIntLiteral reports whether e is an integer whose value is 0. -func isZeroIntLiteral(info *types.Info, e ast.Expr) bool { +// isZeroIntConst reports whether e is an integer whose value is 0. +func isZeroIntConst(info *types.Info, e ast.Expr) bool { return isIntLiteral(info, e, 0) } @@ -91,36 +80,28 @@ func isIntLiteral(info *types.Info, e ast.Expr, n int64) bool { return info.Types[e].Value == constant.MakeInt64(n) } -// filesUsing returns a cursor for each *ast.File in the inspector +// filesUsingGoVersion returns a cursor for each *ast.File in the inspector // that uses at least the specified version of Go (e.g. "go1.24"). // +// The pass's analyzer must require [inspect.Analyzer]. +// // TODO(adonovan): opt: eliminate this function, instead following the -// approach of [fmtappendf], which uses typeindex and [fileUses]. -// See "Tip" at [fileUses] for motivation. -func filesUsing(inspect *inspector.Inspector, info *types.Info, version string) iter.Seq[inspector.Cursor] { +// approach of [fmtappendf], which uses typeindex and +// [analyzerutil.FileUsesGoVersion]; see "Tip" documented at the +// latter function for motivation. +func filesUsingGoVersion(pass *analysis.Pass, version string) iter.Seq[inspector.Cursor] { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + return func(yield func(inspector.Cursor) bool) { for curFile := range inspect.Root().Children() { file := curFile.Node().(*ast.File) - if !versions.Before(info.FileVersions[file], version) && !yield(curFile) { + if analyzerutil.FileUsesGoVersion(pass, file, version) && !yield(curFile) { break } } } } -// fileUses reports whether the specified file uses at least the -// specified version of Go (e.g. "go1.24"). -// -// Tip: we recommend using this check "late", just before calling -// pass.Report, rather than "early" (when entering each ast.File, or -// each candidate node of interest, during the traversal), because the -// operation is not free, yet is not a highly selective filter: the -// fraction of files that pass most version checks is high and -// increases over time. -func fileUses(info *types.Info, file *ast.File, version string) bool { - return !versions.Before(info.FileVersions[file], version) -} - // within reports whether the current pass is analyzing one of the // specified standard packages or their dependencies. func within(pass *analysis.Pass, pkgs ...string) bool { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go index b8893244d5..6cb75f247c 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/newexpr.go @@ -17,13 +17,14 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/versions" ) var NewExprAnalyzer = &analysis.Analyzer{ Name: "newexpr", - Doc: analysisinternal.MustExtractDoc(doc, "newexpr"), + Doc: analyzerutil.MustExtractDoc(doc, "newexpr"), URL: "https://pkg.go.dev/golang.org/x/tools/gopls/internal/analysis/modernize#newexpr", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -60,7 +61,7 @@ func run(pass *analysis.Pass) (any, error) { // Check file version. file := astutil.EnclosingFile(curFuncDecl) - if !fileUses(info, file, "go1.26") { + if !analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_26) { continue // new(expr) not available in this file } @@ -87,25 +88,18 @@ func run(pass *analysis.Pass) (any, error) { } } - // Disabled until we resolve https://go.dev/issue/75726 - // (Go version skew between caller and callee in inliner.) - // TODO(adonovan): fix and reenable. + // Add a //go:fix inline annotation, if not already present. // - // Also, restore these lines to our section of doc.go: - // //go:fix inline - // ... - // (The directive comment causes the inline analyzer to suggest - // that calls to such functions are inlined.) - if false { - // Add a //go:fix inline annotation, if not already present. - // TODO(adonovan): use ast.ParseDirective when go1.26 is assured. - if !strings.Contains(decl.Doc.Text(), "go:fix inline") { - edits = append(edits, analysis.TextEdit{ - Pos: decl.Pos(), - End: decl.Pos(), - NewText: []byte("//go:fix inline\n"), - }) - } + // The inliner will not inline a newer callee body into an + // older Go file; see https://go.dev/issue/75726. + // + // TODO(adonovan): use ast.ParseDirective when go1.26 is assured. + if !strings.Contains(decl.Doc.Text(), "go:fix inline") { + edits = append(edits, analysis.TextEdit{ + Pos: decl.Pos(), + End: decl.Pos(), + NewText: []byte("//go:fix inline\n"), + }) } if len(edits) > 0 { @@ -140,7 +134,7 @@ func run(pass *analysis.Pass) (any, error) { // Check file version. file := astutil.EnclosingFile(curCall) - if !fileUses(info, file, "go1.26") { + if !analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_26) { continue // new(expr) not available in this file } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go index bd309cf9d5..4a05d64f42 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/omitzero.go @@ -12,21 +12,17 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/versions" ) var OmitZeroAnalyzer = &analysis.Analyzer{ - Name: "omitzero", - Doc: analysisinternal.MustExtractDoc(doc, "omitzero"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: omitzero, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#omitzero", + Name: "omitzero", + Doc: analyzerutil.MustExtractDoc(doc, "omitzero"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: omitzero, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#omitzero", } func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Field) { @@ -48,25 +44,20 @@ func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Fi // No omitempty in json tag return } - omitEmptyPos, omitEmptyEnd, err := astutil.RangeInStringLiteral(curField.Tag, match[2], match[3]) + omitEmpty, err := astutil.RangeInStringLiteral(curField.Tag, match[2], match[3]) if err != nil { return } - removePos, removeEnd := omitEmptyPos, omitEmptyEnd + var remove analysis.Range = omitEmpty jsonTag := reflect.StructTag(tagconv).Get("json") if jsonTag == ",omitempty" { // Remove the entire struct tag if json is the only package used if match[1]-match[0] == len(tagconv) { - removePos = curField.Tag.Pos() - removeEnd = curField.Tag.End() + remove = curField.Tag } else { // Remove the json tag if omitempty is the only field - removePos, err = astutil.PosInStringLiteral(curField.Tag, match[0]) - if err != nil { - return - } - removeEnd, err = astutil.PosInStringLiteral(curField.Tag, match[1]) + remove, err = astutil.RangeInStringLiteral(curField.Tag, match[0], match[1]) if err != nil { return } @@ -81,8 +72,8 @@ func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Fi Message: "Remove redundant omitempty tag", TextEdits: []analysis.TextEdit{ { - Pos: removePos, - End: removeEnd, + Pos: remove.Pos(), + End: remove.End(), }, }, }, @@ -90,8 +81,8 @@ func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Fi Message: "Replace omitempty with omitzero (behavior change)", TextEdits: []analysis.TextEdit{ { - Pos: omitEmptyPos, - End: omitEmptyEnd, + Pos: omitEmpty.Pos(), + End: omitEmpty.End(), NewText: []byte(",omitzero"), }, }, @@ -100,18 +91,14 @@ func checkOmitEmptyField(pass *analysis.Pass, info *types.Info, curField *ast.Fi } // The omitzero pass searches for instances of "omitempty" in a json field tag on a -// struct. Since "omitempty" does not have any effect when applied to a struct field, +// struct. Since "omitfilesUsingGoVersions not have any effect when applied to a struct field, // it suggests either deleting "omitempty" or replacing it with "omitzero", which // correctly excludes structs from a json encoding. func omitzero(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - info := pass.TypesInfo - for curFile := range filesUsing(inspect, info, "go1.24") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_24) { for curStruct := range curFile.Preorder((*ast.StructType)(nil)) { for _, curField := range curStruct.Node().(*ast.StructType).Fields.List { - checkOmitEmptyField(pass, info, curField) + checkOmitEmptyField(pass, pass.TypesInfo, curField) } } } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/plusbuild.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/plusbuild.go index e8af8074ff..57b502ab80 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/plusbuild.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/plusbuild.go @@ -10,13 +10,14 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/goplsexport" + "golang.org/x/tools/internal/versions" ) var plusBuildAnalyzer = &analysis.Analyzer{ Name: "plusbuild", - Doc: analysisinternal.MustExtractDoc(doc, "plusbuild"), + Doc: analyzerutil.MustExtractDoc(doc, "plusbuild"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#plusbuild", Run: plusbuild, } @@ -28,7 +29,7 @@ func init() { func plusbuild(pass *analysis.Pass) (any, error) { check := func(f *ast.File) { - if !fileUses(pass.TypesInfo, f, "go1.18") { + if !analyzerutil.FileUsesGoVersion(pass, f, versions.Go1_18) { return } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go index adc840f11d..6b1edf38b3 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/rangeint.go @@ -15,19 +15,18 @@ import ( "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var RangeIntAnalyzer = &analysis.Analyzer{ Name: "rangeint", - Doc: analysisinternal.MustExtractDoc(doc, "rangeint"), + Doc: analyzerutil.MustExtractDoc(doc, "rangeint"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -66,21 +65,19 @@ var RangeIntAnalyzer = &analysis.Analyzer{ // - a constant; or // - len(s), where s has the above properties. func rangeint(pass *analysis.Pass) (any, error) { - skipGenerated(pass) + var ( + info = pass.TypesInfo + typeindex = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + ) - info := pass.TypesInfo - - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - typeindex := pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) - - for curFile := range filesUsing(inspect, info, "go1.22") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_22) { nextLoop: for curLoop := range curFile.Preorder((*ast.ForStmt)(nil)) { loop := curLoop.Node().(*ast.ForStmt) if init, ok := loop.Init.(*ast.AssignStmt); ok && isSimpleAssign(init) && is[*ast.Ident](init.Lhs[0]) && - isZeroIntLiteral(info, init.Rhs[0]) { + isZeroIntConst(info, init.Rhs[0]) { // Have: for i = 0; ... (or i := 0) index := init.Lhs[0].(*ast.Ident) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go index c9b0fa42ee..0fc781813f 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/reflect.go @@ -14,9 +14,8 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" @@ -26,9 +25,8 @@ import ( var ReflectTypeForAnalyzer = &analysis.Analyzer{ Name: "reflecttypefor", - Doc: analysisinternal.MustExtractDoc(doc, "reflecttypefor"), + Doc: analyzerutil.MustExtractDoc(doc, "reflecttypefor"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -37,8 +35,6 @@ var ReflectTypeForAnalyzer = &analysis.Analyzer{ } func reflecttypefor(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) info = pass.TypesInfo @@ -89,7 +85,7 @@ func reflecttypefor(pass *analysis.Pass) (any, error) { } file := astutil.EnclosingFile(curCall) - if versions.Before(info.FileVersions[file], "go1.22") { + if !analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_22) { continue // TypeFor requires go1.22 } tokFile := pass.Fset.File(file.Pos()) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go index 032f874df1..960a46644b 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slices.go @@ -13,25 +13,21 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // Warning: this analyzer is not safe to enable by default. var AppendClippedAnalyzer = &analysis.Analyzer{ - Name: "appendclipped", - Doc: analysisinternal.MustExtractDoc(doc, "appendclipped"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: appendclipped, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#appendclipped", + Name: "appendclipped", + Doc: analyzerutil.MustExtractDoc(doc, "appendclipped"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: appendclipped, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#appendclipped", } // The appendclipped pass offers to simplify a tower of append calls: @@ -59,8 +55,6 @@ var AppendClippedAnalyzer = &analysis.Analyzer{ // The fix does not always preserve nilness the of base slice when the // addends (a, b, c) are all empty (see #73557). func appendclipped(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "slices", "bytes", "runtime") { @@ -205,8 +199,7 @@ func appendclipped(pass *analysis.Pass) (any, error) { skip := make(map[*ast.CallExpr]bool) // Visit calls of form append(x, y...). - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - for curFile := range filesUsing(inspect, info, "go1.21") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_21) { file := curFile.Node().(*ast.File) for curCall := range curFile.Preorder((*ast.CallExpr)(nil)) { @@ -266,7 +259,7 @@ func clippedSlice(info *types.Info, e ast.Expr) (res ast.Expr, empty bool) { // x[:0:0], x[:len(x):len(x)], x[:k:k] if e.Slice3 && e.High != nil && e.Max != nil && astutil.EqualSyntax(e.High, e.Max) { // x[:k:k] res = e - empty = isZeroIntLiteral(info, e.High) // x[:0:0] + empty = isZeroIntConst(info, e.High) // x[:0:0] if call, ok := e.High.(*ast.CallExpr); ok && typeutil.Callee(info, call) == builtinLen && astutil.EqualSyntax(call.Args[0], e.X) { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go index b3c2e562c9..3b32685266 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicescontains.go @@ -14,20 +14,19 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var SlicesContainsAnalyzer = &analysis.Analyzer{ Name: "slicescontains", - Doc: analysisinternal.MustExtractDoc(doc, "slicescontains"), + Doc: analyzerutil.MustExtractDoc(doc, "slicescontains"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -66,8 +65,6 @@ var SlicesContainsAnalyzer = &analysis.Analyzer{ // TODO(adonovan): Add a check that needle/predicate expression from // if-statement has no effects. Now the program behavior may change. func slicescontains(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "slices", "runtime") { @@ -75,9 +72,8 @@ func slicescontains(pass *analysis.Pass) (any, error) { } var ( - inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) - info = pass.TypesInfo + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo ) // check is called for each RangeStmt of this form: @@ -312,7 +308,7 @@ func slicescontains(pass *analysis.Pass) (any, error) { // Special case: // prev="lhs = false" body={ lhs = true; break } - // => lhs = slices.Contains(...) (or negation) + // => lhs = slices.Contains(...) (or its negation) if assign, ok := body.List[0].(*ast.AssignStmt); ok && len(body.List) == 2 && assign.Tok == token.ASSIGN && @@ -320,13 +316,12 @@ func slicescontains(pass *analysis.Pass) (any, error) { len(assign.Rhs) == 1 { // Have: body={ lhs = rhs; break } - if prevAssign, ok := prevStmt.(*ast.AssignStmt); ok && len(prevAssign.Lhs) == 1 && len(prevAssign.Rhs) == 1 && astutil.EqualSyntax(prevAssign.Lhs[0], assign.Lhs[0]) && - is[*ast.Ident](assign.Rhs[0]) && - info.Uses[assign.Rhs[0].(*ast.Ident)] == builtinTrue { + isTrueOrFalse(info, assign.Rhs[0]) == + -isTrueOrFalse(info, prevAssign.Rhs[0]) { // Have: // lhs = false @@ -336,15 +331,14 @@ func slicescontains(pass *analysis.Pass) (any, error) { // // TODO(adonovan): // - support "var lhs bool = false" and variants. - // - support negation. - // Both these variants seem quite significant. // - allow the break to be omitted. + neg := cond(isTrueOrFalse(info, assign.Rhs[0]) < 0, "!", "") report([]analysis.TextEdit{ - // Replace "rhs" of previous assignment by slices.Contains(...) + // Replace "rhs" of previous assignment by [!]slices.Contains(...) { Pos: prevAssign.Rhs[0].Pos(), End: prevAssign.Rhs[0].End(), - NewText: []byte(contains), + NewText: []byte(neg + contains), }, // Delete the loop and preceding space. { @@ -388,7 +382,7 @@ func slicescontains(pass *analysis.Pass) (any, error) { } } - for curFile := range filesUsing(inspect, info, "go1.21") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_21) { file := curFile.Node().(*ast.File) for curRange := range curFile.Preorder((*ast.RangeStmt)(nil)) { @@ -420,13 +414,19 @@ func slicescontains(pass *analysis.Pass) (any, error) { // isReturnTrueOrFalse returns nonzero if stmt returns true (+1) or false (-1). func isReturnTrueOrFalse(info *types.Info, stmt ast.Stmt) int { if ret, ok := stmt.(*ast.ReturnStmt); ok && len(ret.Results) == 1 { - if id, ok := ret.Results[0].(*ast.Ident); ok { - switch info.Uses[id] { - case builtinTrue: - return +1 - case builtinFalse: - return -1 - } + return isTrueOrFalse(info, ret.Results[0]) + } + return 0 +} + +// isTrueOrFalse returns nonzero if expr is literally true (+1) or false (-1). +func isTrueOrFalse(info *types.Info, expr ast.Expr) int { + if id, ok := expr.(*ast.Ident); ok { + switch info.Uses[id] { + case builtinTrue: + return +1 + case builtinFalse: + return -1 } } return 0 diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go index b3e063db0f..7b3aa875c0 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/slicesdelete.go @@ -12,24 +12,20 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // Warning: this analyzer is not safe to enable by default (not nil-preserving). var SlicesDeleteAnalyzer = &analysis.Analyzer{ - Name: "slicesdelete", - Doc: analysisinternal.MustExtractDoc(doc, "slicesdelete"), - Requires: []*analysis.Analyzer{ - generated.Analyzer, - inspect.Analyzer, - }, - Run: slicesdelete, - URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#slicesdelete", + Name: "slicesdelete", + Doc: analyzerutil.MustExtractDoc(doc, "slicesdelete"), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: slicesdelete, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/modernize#slicesdelete", } // The slicesdelete pass attempts to replace instances of append(s[:i], s[i+k:]...) @@ -37,15 +33,12 @@ var SlicesDeleteAnalyzer = &analysis.Analyzer{ // Other variations that will also have suggested replacements include: // append(s[:i-1], s[i:]...) and append(s[:i+k1], s[i+k2:]) where k2 > k1. func slicesdelete(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "slices", "runtime") { return nil, nil } - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) info := pass.TypesInfo report := func(file *ast.File, call *ast.CallExpr, slice1, slice2 *ast.SliceExpr) { insert := func(pos token.Pos, text string) analysis.TextEdit { @@ -55,7 +48,7 @@ func slicesdelete(pass *analysis.Pass) (any, error) { return types.Identical(types.Default(info.TypeOf(e)), builtinInt.Type()) } isIntShadowed := func() bool { - scope := pass.TypesInfo.Scopes[file].Innermost(call.Lparen) + scope := info.Scopes[file].Innermost(call.Lparen) if _, obj := scope.LookupParent("int", call.Lparen); obj != builtinInt { return true // int type is shadowed } @@ -130,7 +123,7 @@ func slicesdelete(pass *analysis.Pass) (any, error) { }}, }) } - for curFile := range filesUsing(inspect, info, "go1.21") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_21) { file := curFile.Node().(*ast.File) for curCall := range curFile.Preorder((*ast.CallExpr)(nil)) { call := curCall.Node().(*ast.CallExpr) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go index 66af16d1f6..e22b8c55f5 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/sortslice.go @@ -11,20 +11,19 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) // (Not to be confused with go/analysis/passes/sortslice.) var SlicesSortAnalyzer = &analysis.Analyzer{ Name: "slicessort", - Doc: analysisinternal.MustExtractDoc(doc, "slicessort"), + Doc: analyzerutil.MustExtractDoc(doc, "slicessort"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -52,8 +51,6 @@ var SlicesSortAnalyzer = &analysis.Analyzer{ // - sort.Sort(x) where x has a named slice type whose Less method is the natural order. // -> sort.Slice(x) func slicessort(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "slices", "sort", "runtime") { @@ -87,7 +84,7 @@ func slicessort(pass *analysis.Pass) (any, error) { } file := astutil.EnclosingFile(curCall) if isIndex(compare.X, i) && isIndex(compare.Y, j) && - fileUses(info, file, "go1.21") { + analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_21) { // Have: sort.Slice(s, func(i, j int) bool { return s[i] < s[j] }) prefix, importEdits := refactor.AddImport( diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go index 20817520e1..cc59580671 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stditerators.go @@ -14,9 +14,8 @@ import ( "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/goplsexport" "golang.org/x/tools/internal/refactor" @@ -26,9 +25,8 @@ import ( var stditeratorsAnalyzer = &analysis.Analyzer{ Name: "stditerators", - Doc: analysisinternal.MustExtractDoc(doc, "stditerators"), + Doc: analyzerutil.MustExtractDoc(doc, "stditerators"), Requires: []*analysis.Analyzer{ - generated.Analyzer, typeindexanalyzer.Analyzer, }, Run: stditerators, @@ -89,8 +87,6 @@ var stditeratorsTable = [...]struct { // iterator for that reason? We don't want to go fix to // undo optimizations. Do we need a suppression mechanism? func stditerators(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) info = pass.TypesInfo @@ -116,6 +112,10 @@ func stditerators(pass *analysis.Pass) (any, error) { // // for ... { e := x.At(i); use(e) } // + // or + // + // for ... { if e := x.At(i); cond { use(e) } } + // // then chooseName prefers the name e and additionally // returns the var's symbol. We'll transform this to: // @@ -124,10 +124,11 @@ func stditerators(pass *analysis.Pass) (any, error) { // which leaves a redundant assignment that a // subsequent 'forvar' pass will eliminate. chooseName := func(curBody inspector.Cursor, x ast.Expr, i *types.Var) (string, *types.Var) { - // Is body { elem := x.At(i); ... } ? - body := curBody.Node().(*ast.BlockStmt) - if len(body.List) > 0 { - if assign, ok := body.List[0].(*ast.AssignStmt); ok && + + // isVarAssign reports whether stmt has the form v := x.At(i) + // and returns the variable if so. + isVarAssign := func(stmt ast.Stmt) *types.Var { + if assign, ok := stmt.(*ast.AssignStmt); ok && assign.Tok == token.DEFINE && len(assign.Lhs) == 1 && len(assign.Rhs) == 1 && @@ -138,15 +139,47 @@ func stditerators(pass *analysis.Pass) (any, error) { astutil.EqualSyntax(ast.Unparen(call.Fun).(*ast.SelectorExpr).X, x) && is[*ast.Ident](call.Args[0]) && info.Uses[call.Args[0].(*ast.Ident)] == i { - // Have: { elem := x.At(i); ... } + // Have: elem := x.At(i) id := assign.Lhs[0].(*ast.Ident) - return id.Name, info.Defs[id].(*types.Var) + return info.Defs[id].(*types.Var) + } + } + return nil + } + + body := curBody.Node().(*ast.BlockStmt) + if len(body.List) > 0 { + // Is body { elem := x.At(i); ... } ? + if v := isVarAssign(body.List[0]); v != nil { + return v.Name(), v + } + + // Or { if elem := x.At(i); cond { ... } } ? + if ifstmt, ok := body.List[0].(*ast.IfStmt); ok && ifstmt.Init != nil { + if v := isVarAssign(ifstmt.Init); v != nil { + return v.Name(), v } } } loop := curBody.Parent().Node() - return refactor.FreshName(info.Scopes[loop], loop.Pos(), row.elemname), nil + + // Choose a fresh name only if + // (a) the preferred name is already declared here, and + // (b) there are references to it from the loop body. + // TODO(adonovan): this pattern also appears in errorsastype, + // and is wanted elsewhere; factor. + name := row.elemname + if v := lookup(info, curBody, name); v != nil { + // is it free in body? + for curUse := range index.Uses(v) { + if curBody.Contains(curUse) { + name = refactor.FreshName(info.Scopes[loop], loop.Pos(), name) + break + } + } + } + return name, nil } // Process each call of x.Len(). @@ -191,7 +224,7 @@ func stditerators(pass *analysis.Pass) (any, error) { } // Have: for i := 0; i < x.Len(); i++ { ... }. // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - rng = analysisinternal.Range(loop.For, loop.Post.End()) + rng = astutil.RangeOf(loop.For, loop.Post.End()) indexVar = v curBody = curFor.ChildAt(edge.ForStmt_Body, -1) elem, elemVar = chooseName(curBody, lenSel.X, indexVar) @@ -234,7 +267,7 @@ func stditerators(pass *analysis.Pass) (any, error) { // Have: for i := range x.Len() { ... } // ~~~~~~~~~~~~~ - rng = analysisinternal.Range(loop.Range, loop.X.End()) + rng = astutil.RangeOf(loop.Range, loop.X.End()) indexVar = info.Defs[id].(*types.Var) curBody = curRange.ChildAt(edge.RangeStmt_Body, -1) elem, elemVar = chooseName(curBody, lenSel.X, indexVar) @@ -313,7 +346,7 @@ func stditerators(pass *analysis.Pass) (any, error) { // may be somewhat expensive.) if v, ok := methodGoVersion(row.pkgpath, row.typename, row.itermethod); !ok { panic("no version found") - } else if file := astutil.EnclosingFile(curLenCall); !fileUses(info, file, v.String()) { + } else if !analyzerutil.FileUsesGoVersion(pass, astutil.EnclosingFile(curLenCall), v.String()) { continue nextCall } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go index 56d0ba73cc..56c5d0e3b3 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsbuilder.go @@ -15,9 +15,8 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" @@ -26,9 +25,8 @@ import ( var StringsBuilderAnalyzer = &analysis.Analyzer{ Name: "stringsbuilder", - Doc: analysisinternal.MustExtractDoc(doc, "stringsbuilder"), + Doc: analyzerutil.MustExtractDoc(doc, "stringsbuilder"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -38,8 +36,6 @@ var StringsBuilderAnalyzer = &analysis.Analyzer{ // stringsbuilder replaces string += string in a loop by strings.Builder. func stringsbuilder(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - // Skip the analyzer in packages where its // fixes would create an import cycle. if within(pass, "strings", "runtime") { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscut.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscut.go new file mode 100644 index 0000000000..521c264c51 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscut.go @@ -0,0 +1,580 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modernize + +import ( + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "iter" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/edge" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" + "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/goplsexport" + "golang.org/x/tools/internal/refactor" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" +) + +var stringscutAnalyzer = &analysis.Analyzer{ + Name: "stringscut", + Doc: analyzerutil.MustExtractDoc(doc, "stringscut"), + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + typeindexanalyzer.Analyzer, + }, + Run: stringscut, + URL: "https://pkg.go.dev/golang.org/x/tools/gopls/internal/analysis/modernize#stringscut", +} + +func init() { + // Export to gopls until this is a published modernizer. + goplsexport.StringsCutModernizer = stringscutAnalyzer +} + +// stringscut offers a fix to replace an occurrence of strings.Index{,Byte} with +// strings.{Cut,Contains}, and similar fixes for functions in the bytes package. +// Consider some candidate for replacement i := strings.Index(s, substr). +// The following must hold for a replacement to occur: +// +// 1. All instances of i and s must be in one of these forms. +// Binary expressions: +// (a): establishing that i < 0: e.g.: i < 0, 0 > i, i == -1, -1 == i +// (b): establishing that i > -1: e.g.: i >= 0, 0 <= i, i == 0, 0 == i +// +// Slice expressions: +// a: s[:i], s[0:i] +// b: s[i+len(substr):], s[len(substr) + i:], s[i + const], s[k + i] (where k = len(substr)) +// +// 2. There can be no uses of s, substr, or i where they are +// potentially modified (i.e. in assignments, or function calls with unknown side +// effects). +// +// Then, the replacement involves the following substitutions: +// +// 1. Replace "i := strings.Index(s, substr)" with "before, after, ok := strings.Cut(s, substr)" +// +// 2. Replace instances of binary expressions (a) with !ok and binary expressions (b) with ok. +// +// 3. Replace slice expressions (a) with "before" and slice expressions (b) with after. +// +// 4. The assignments to before, after, and ok may use the blank identifier "_" if they are unused. +// +// For example: +// +// i := strings.Index(s, substr) +// if i >= 0 { +// use(s[:i], s[i+len(substr):]) +// } +// +// Would become: +// +// before, after, ok := strings.Cut(s, substr) +// if ok { +// use(before, after) +// } +// +// If the condition involving `i` establishes that i > -1, then we replace it with +// `if ok“. Variants listed above include i >= 0, i > 0, and i == 0. +// If the condition is negated (e.g. establishes `i < 0`), we use `if !ok` instead. +// If the slices of `s` match `s[:i]` or `s[i+len(substr):]` or their variants listed above, +// then we replace them with before and after. +// +// When the index `i` is used only to check for the presence of the substring or byte slice, +// the suggested fix uses Contains() instead of Cut. +// +// For example: +// +// i := strings.Index(s, substr) +// if i >= 0 { +// return +// } +// +// Would become: +// +// found := strings.Contains(s, substr) +// if found { +// return +// } +func stringscut(pass *analysis.Pass) (any, error) { + var ( + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo + + stringsIndex = index.Object("strings", "Index") + stringsIndexByte = index.Object("strings", "IndexByte") + bytesIndex = index.Object("bytes", "Index") + bytesIndexByte = index.Object("bytes", "IndexByte") + ) + + for _, obj := range []types.Object{ + stringsIndex, + stringsIndexByte, + bytesIndex, + bytesIndexByte, + } { + // (obj may be nil) + nextcall: + for curCall := range index.Calls(obj) { + // Check file version. + if !analyzerutil.FileUsesGoVersion(pass, astutil.EnclosingFile(curCall), versions.Go1_18) { + continue // strings.Index not available in this file + } + indexCall := curCall.Node().(*ast.CallExpr) // the call to strings.Index, etc. + obj := typeutil.Callee(info, indexCall) + if obj == nil { + continue + } + + var iIdent *ast.Ident // defining identifier of i var + switch ek, idx := curCall.ParentEdge(); ek { + case edge.ValueSpec_Values: + // Have: var i = strings.Index(...) + curName := curCall.Parent().ChildAt(edge.ValueSpec_Names, idx) + iIdent = curName.Node().(*ast.Ident) + case edge.AssignStmt_Rhs: + // Have: i := strings.Index(...) + // (Must be i's definition.) + curLhs := curCall.Parent().ChildAt(edge.AssignStmt_Lhs, idx) + iIdent, _ = curLhs.Node().(*ast.Ident) // may be nil + } + + if iIdent == nil { + continue + } + // Inv: iIdent is i's definition. The following would be skipped: 'var i int; i = strings.Index(...)' + // Get uses of i. + iObj := info.ObjectOf(iIdent) + if iObj == nil { + continue + } + + var ( + s = indexCall.Args[0] + substr = indexCall.Args[1] + ) + + // Check that there are no statements that alter the value of s + // or substr after the call to Index(). + if !indexArgValid(info, index, s, indexCall.Pos()) || + !indexArgValid(info, index, substr, indexCall.Pos()) { + continue nextcall + } + + // Next, examine all uses of i. If the only uses are of the + // forms mentioned above (e.g. i < 0, i >= 0, s[:i] and s[i + + // len(substr)]), then we can replace the call to Index() + // with a call to Cut() and use the returned ok, before, + // and after variables accordingly. + lessZero, greaterNegOne, beforeSlice, afterSlice := checkIdxUses(pass.TypesInfo, index.Uses(iObj), s, substr) + + // Either there are no uses of before, after, or ok, or some use + // of i does not match our criteria - don't suggest a fix. + if lessZero == nil && greaterNegOne == nil && beforeSlice == nil && afterSlice == nil { + continue + } + + // If the only uses are ok and !ok, don't suggest a Cut() fix - these should be using Contains() + isContains := (len(lessZero) > 0 || len(greaterNegOne) > 0) && len(beforeSlice) == 0 && len(afterSlice) == 0 + + scope := iObj.Parent() + var ( + // TODO(adonovan): avoid FreshName when not needed; see errorsastype. + okVarName = refactor.FreshName(scope, iIdent.Pos(), "ok") + beforeVarName = refactor.FreshName(scope, iIdent.Pos(), "before") + afterVarName = refactor.FreshName(scope, iIdent.Pos(), "after") + foundVarName = refactor.FreshName(scope, iIdent.Pos(), "found") // for Contains() + ) + + // If there will be no uses of ok, before, or after, use the + // blank identifier instead. + if len(lessZero) == 0 && len(greaterNegOne) == 0 { + okVarName = "_" + } + if len(beforeSlice) == 0 { + beforeVarName = "_" + } + if len(afterSlice) == 0 { + afterVarName = "_" + } + + var edits []analysis.TextEdit + replace := func(exprs []ast.Expr, new string) { + for _, expr := range exprs { + edits = append(edits, analysis.TextEdit{ + Pos: expr.Pos(), + End: expr.End(), + NewText: []byte(new), + }) + } + } + // Get the ident for the call to strings.Index, which could just be + // "Index" if the strings package is dot imported. + indexCallId := typesinternal.UsedIdent(info, indexCall.Fun) + replacedFunc := "Cut" + if isContains { + replacedFunc = "Contains" + replace(lessZero, "!"+foundVarName) // idx < 0 -> !found + replace(greaterNegOne, foundVarName) // idx > -1 -> found + + // Replace the assignment with found, and replace the call to + // Index or IndexByte with a call to Contains. + // i := strings.Index (...) + // ----- -------- + // found := strings.Contains(...) + edits = append(edits, analysis.TextEdit{ + Pos: iIdent.Pos(), + End: iIdent.End(), + NewText: []byte(foundVarName), + }, analysis.TextEdit{ + Pos: indexCallId.Pos(), + End: indexCallId.End(), + NewText: []byte("Contains"), + }) + } else { + replace(lessZero, "!"+okVarName) // idx < 0 -> !ok + replace(greaterNegOne, okVarName) // idx > -1 -> ok + replace(beforeSlice, beforeVarName) // s[:idx] -> before + replace(afterSlice, afterVarName) // s[idx+k:] -> after + + // Replace the assignment with before, after, ok, and replace + // the call to Index or IndexByte with a call to Cut. + // i := strings.Index(...) + // ----------------- ----- + // before, after, ok := strings.Cut (...) + edits = append(edits, analysis.TextEdit{ + Pos: iIdent.Pos(), + End: iIdent.End(), + NewText: fmt.Appendf(nil, "%s, %s, %s", beforeVarName, afterVarName, okVarName), + }, analysis.TextEdit{ + Pos: indexCallId.Pos(), + End: indexCallId.End(), + NewText: []byte("Cut"), + }) + } + + // Calls to IndexByte have a byte as their second arg, which + // must be converted to a string or []byte to be a valid arg for Cut/Contains. + if obj.Name() == "IndexByte" { + switch obj.Pkg().Name() { + case "strings": + searchByteVal := info.Types[substr].Value + if searchByteVal == nil { + // substr is a variable, e.g. substr := byte('b') + // use string(substr) + edits = append(edits, []analysis.TextEdit{ + { + Pos: substr.Pos(), + NewText: []byte("string("), + }, + { + Pos: substr.End(), + NewText: []byte(")"), + }, + }...) + } else { + // substr is a byte constant + val, _ := constant.Int64Val(searchByteVal) // inv: must be a valid byte + // strings.Cut/Contains requires a string, so convert byte literal to string literal; e.g. 'a' -> "a", 55 -> "7" + edits = append(edits, analysis.TextEdit{ + Pos: substr.Pos(), + End: substr.End(), + NewText: strconv.AppendQuote(nil, string(byte(val))), + }) + } + case "bytes": + // bytes.Cut/Contains requires a []byte, so wrap substr in a []byte{} + edits = append(edits, []analysis.TextEdit{ + { + Pos: substr.Pos(), + NewText: []byte("[]byte{"), + }, + { + Pos: substr.End(), + NewText: []byte("}"), + }, + }...) + } + } + pass.Report(analysis.Diagnostic{ + Pos: indexCall.Fun.Pos(), + End: indexCall.Fun.End(), + Message: fmt.Sprintf("%s.%s can be simplified using %s.%s", + obj.Pkg().Name(), obj.Name(), obj.Pkg().Name(), replacedFunc), + Category: "stringscut", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Simplify %s.%s call using %s.%s", obj.Pkg().Name(), obj.Name(), obj.Pkg().Name(), replacedFunc), + TextEdits: edits, + }}, + }) + } + } + + return nil, nil +} + +// indexArgValid reports whether expr is a valid strings.Index(_, _) arg +// for the transformation. An arg is valid iff it is: +// - constant; +// - a local variable with no modifying uses after the Index() call; or +// - []byte(x) where x is also valid by this definition. +// All other expressions are assumed not referentially transparent, +// so we cannot be sure that all uses are safe to replace. +func indexArgValid(info *types.Info, index *typeindex.Index, expr ast.Expr, afterPos token.Pos) bool { + tv := info.Types[expr] + if tv.Value != nil { + return true // constant + } + switch expr := expr.(type) { + case *ast.CallExpr: + return types.Identical(tv.Type, byteSliceType) && + indexArgValid(info, index, expr.Args[0], afterPos) // check s in []byte(s) + case *ast.Ident: + sObj := info.Uses[expr] + sUses := index.Uses(sObj) + return !hasModifyingUses(info, sUses, afterPos) + default: + // For now, skip instances where s or substr are not + // identifers, basic lits, or call expressions of the form + // []byte(s). + // TODO(mkalil): Handle s and substr being expressions like ptr.field[i]. + // From adonovan: We'd need to analyze s and substr to see + // whether they are referentially transparent, and if not, + // analyze all code between declaration and use and see if + // there are statements or expressions with potential side + // effects. + return false + } +} + +// checkIdxUses inspects the uses of i to make sure they match certain criteria that +// allows us to suggest a modernization. If all uses of i, s and substr match +// one of the following four valid formats, it returns a list of occurrences for +// each format. If any of the uses do not match one of the formats, return nil +// for all values, since we should not offer a replacement. +// 1. lessZero - a condition involving i establishing that i is negative (e.g. i < 0, 0 > i, i == -1, -1 == i) +// 2. greaterNegOne - a condition involving i establishing that i is non-negative (e.g. i >= 0, 0 <= i, i == 0, 0 == i) +// 3. beforeSlice - a slice of `s` that matches either s[:i], s[0:i] +// 4. afterSlice - a slice of `s` that matches one of: s[i+len(substr):], s[len(substr) + i:], s[i + const], s[k + i] (where k = len(substr)) +func checkIdxUses(info *types.Info, uses iter.Seq[inspector.Cursor], s, substr ast.Expr) (lessZero, greaterNegOne, beforeSlice, afterSlice []ast.Expr) { + use := func(cur inspector.Cursor) bool { + ek, _ := cur.ParentEdge() + n := cur.Parent().Node() + switch ek { + case edge.BinaryExpr_X, edge.BinaryExpr_Y: + check := n.(*ast.BinaryExpr) + switch checkIdxComparison(info, check) { + case -1: + lessZero = append(lessZero, check) + return true + case 1: + greaterNegOne = append(greaterNegOne, check) + return true + } + // Check does not establish that i < 0 or i > -1. + // Might be part of an outer slice expression like s[i + k] + // which requires a different check. + // Check that the thing being sliced is s and that the slice + // doesn't have a max index. + if slice, ok := cur.Parent().Parent().Node().(*ast.SliceExpr); ok && + sameObject(info, s, slice.X) && + slice.Max == nil { + if isBeforeSlice(info, ek, slice) { + beforeSlice = append(beforeSlice, slice) + return true + } else if isAfterSlice(info, ek, slice, substr) { + afterSlice = append(afterSlice, slice) + return true + } + } + case edge.SliceExpr_Low, edge.SliceExpr_High: + slice := n.(*ast.SliceExpr) + // Check that the thing being sliced is s and that the slice doesn't + // have a max index. + if sameObject(info, s, slice.X) && slice.Max == nil { + if isBeforeSlice(info, ek, slice) { + beforeSlice = append(beforeSlice, slice) + return true + } else if isAfterSlice(info, ek, slice, substr) { + afterSlice = append(afterSlice, slice) + return true + } + } + } + return false + } + + for curIdent := range uses { + if !use(curIdent) { + return nil, nil, nil, nil + } + } + return lessZero, greaterNegOne, beforeSlice, afterSlice +} + +// hasModifyingUses reports whether any of the uses involve potential +// modifications. Uses involving assignments before the "afterPos" won't be +// considered. +func hasModifyingUses(info *types.Info, uses iter.Seq[inspector.Cursor], afterPos token.Pos) bool { + for curUse := range uses { + ek, _ := curUse.ParentEdge() + if ek == edge.AssignStmt_Lhs { + if curUse.Node().Pos() <= afterPos { + continue + } + assign := curUse.Parent().Node().(*ast.AssignStmt) + if sameObject(info, assign.Lhs[0], curUse.Node().(*ast.Ident)) { + // Modifying use because we are reassigning the value of the object. + return true + } + } else if ek == edge.UnaryExpr_X && + curUse.Parent().Node().(*ast.UnaryExpr).Op == token.AND { + // Modifying use because we might be passing the object by reference (an explicit &). + // We can ignore the case where we have a method call on the expression (which + // has an implicit &) because we know the type of s and substr are strings + // which cannot have methods on them. + return true + } + } + return false +} + +// checkIdxComparison reports whether the check establishes that i is negative +// or non-negative. It returns -1 in the first case, 1 in the second, and 0 if +// we can confirm neither condition. We assume that a check passed to +// checkIdxComparison has i as one of its operands. +func checkIdxComparison(info *types.Info, check *ast.BinaryExpr) int { + // Check establishes that i is negative. + // e.g.: i < 0, 0 > i, i == -1, -1 == i + if check.Op == token.LSS && (isNegativeConst(info, check.Y) || isZeroIntConst(info, check.Y)) || //i < (0 or neg) + check.Op == token.GTR && (isNegativeConst(info, check.X) || isZeroIntConst(info, check.X)) || // (0 or neg) > i + check.Op == token.LEQ && (isNegativeConst(info, check.Y)) || //i <= (neg) + check.Op == token.GEQ && (isNegativeConst(info, check.X)) || // (neg) >= i + check.Op == token.EQL && + (isNegativeConst(info, check.X) || isNegativeConst(info, check.Y)) { // i == neg; neg == i + return -1 + } + // Check establishes that i is non-negative. + // e.g.: i >= 0, 0 <= i, i == 0, 0 == i + if check.Op == token.GTR && (isNonNegativeConst(info, check.Y) || isIntLiteral(info, check.Y, -1)) || // i > (non-neg or -1) + check.Op == token.LSS && (isNonNegativeConst(info, check.X) || isIntLiteral(info, check.X, -1)) || // (non-neg or -1) < i + check.Op == token.GEQ && isNonNegativeConst(info, check.Y) || // i >= (non-neg) + check.Op == token.LEQ && isNonNegativeConst(info, check.X) || // (non-neg) <= i + check.Op == token.EQL && + (isNonNegativeConst(info, check.X) || isNonNegativeConst(info, check.Y)) { // i == non-neg; non-neg == i + return 1 + } + return 0 +} + +// isNegativeConst returns true if the expr is a const int with value < zero. +func isNegativeConst(info *types.Info, expr ast.Expr) bool { + if tv, ok := info.Types[expr]; ok && tv.Value != nil && tv.Value.Kind() == constant.Int { + if v, ok := constant.Int64Val(tv.Value); ok { + return v < 0 + } + } + return false +} + +// isNoneNegativeConst returns true if the expr is a const int with value >= zero. +func isNonNegativeConst(info *types.Info, expr ast.Expr) bool { + if tv, ok := info.Types[expr]; ok && tv.Value != nil && tv.Value.Kind() == constant.Int { + if v, ok := constant.Int64Val(tv.Value); ok { + return v >= 0 + } + } + return false +} + +// isBeforeSlice reports whether the SliceExpr is of the form s[:i] or s[0:i]. +func isBeforeSlice(info *types.Info, ek edge.Kind, slice *ast.SliceExpr) bool { + return ek == edge.SliceExpr_High && (slice.Low == nil || isZeroIntConst(info, slice.Low)) +} + +// isAfterSlice reports whether the SliceExpr is of the form s[i+len(substr):], +// or s[i + k:] where k is a const is equal to len(substr). +func isAfterSlice(info *types.Info, ek edge.Kind, slice *ast.SliceExpr, substr ast.Expr) bool { + lowExpr, ok := slice.Low.(*ast.BinaryExpr) + if !ok || slice.High != nil { + return false + } + // Returns true if the expression is a call to len(substr). + isLenCall := func(expr ast.Expr) bool { + call, ok := expr.(*ast.CallExpr) + if !ok || len(call.Args) != 1 { + return false + } + return sameObject(info, substr, call.Args[0]) && typeutil.Callee(info, call) == builtinLen + } + + // Handle len([]byte(substr)) + if is[*ast.CallExpr](substr) { + call := substr.(*ast.CallExpr) + tv := info.Types[call.Fun] + if tv.IsType() && types.Identical(tv.Type, byteSliceType) { + // Only one arg in []byte conversion. + substr = call.Args[0] + } + } + substrLen := -1 + substrVal := info.Types[substr].Value + if substrVal != nil { + switch substrVal.Kind() { + case constant.String: + substrLen = len(constant.StringVal(substrVal)) + case constant.Int: + // constant.Value is a byte literal, e.g. bytes.IndexByte(_, 'a') + // or a numeric byte literal, e.g. bytes.IndexByte(_, 65) + substrLen = 1 + } + } + + switch ek { + case edge.BinaryExpr_X: + kVal := info.Types[lowExpr.Y].Value + if kVal == nil { + // i + len(substr) + return lowExpr.Op == token.ADD && isLenCall(lowExpr.Y) + } else { + // i + k + kInt, ok := constant.Int64Val(kVal) + return ok && substrLen == int(kInt) + } + case edge.BinaryExpr_Y: + kVal := info.Types[lowExpr.X].Value + if kVal == nil { + // len(substr) + i + return lowExpr.Op == token.ADD && isLenCall(lowExpr.X) + } else { + // k + i + kInt, ok := constant.Int64Val(kVal) + return ok && substrLen == int(kInt) + } + } + return false +} + +// sameObject reports whether we know that the expressions resolve to the same object. +func sameObject(info *types.Info, expr1, expr2 ast.Expr) bool { + if ident1, ok := expr1.(*ast.Ident); ok { + if ident2, ok := expr2.(*ast.Ident); ok { + uses1, ok1 := info.Uses[ident1] + uses2, ok2 := info.Uses[ident2] + return ok1 && ok2 && uses1 == uses2 + } + } + return false +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go index 9e76f953ed..7dc11308dd 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringscutprefix.go @@ -12,22 +12,20 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var StringsCutPrefixAnalyzer = &analysis.Analyzer{ Name: "stringscutprefix", - Doc: analysisinternal.MustExtractDoc(doc, "stringscutprefix"), + Doc: analyzerutil.MustExtractDoc(doc, "stringscutprefix"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -56,12 +54,9 @@ var StringsCutPrefixAnalyzer = &analysis.Analyzer{ // Variants: // - bytes.HasPrefix/HasSuffix usage as pattern 1. func stringscutprefix(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( - inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) - info = pass.TypesInfo + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo stringsTrimPrefix = index.Object("strings", "TrimPrefix") bytesTrimPrefix = index.Object("bytes", "TrimPrefix") @@ -72,7 +67,7 @@ func stringscutprefix(pass *analysis.Pass) (any, error) { return nil, nil } - for curFile := range filesUsing(inspect, pass.TypesInfo, "go1.20") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_20) { for curIfStmt := range curFile.Preorder((*ast.IfStmt)(nil)) { ifStmt := curIfStmt.Node().(*ast.IfStmt) @@ -206,6 +201,7 @@ func stringscutprefix(pass *analysis.Pass) (any, error) { if astutil.EqualSyntax(lhs, bin.X) && astutil.EqualSyntax(call.Args[0], bin.Y) || (astutil.EqualSyntax(lhs, bin.Y) && astutil.EqualSyntax(call.Args[0], bin.X)) { + // TODO(adonovan): avoid FreshName when not needed; see errorsastype. okVarName := refactor.FreshName(info.Scopes[ifStmt], ifStmt.Pos(), "ok") // Have one of: // if rest := TrimPrefix(s, prefix); rest != s { (ditto Suffix) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go index ef2b546364..d02a53230f 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/stringsseq.go @@ -13,19 +13,17 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" - "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var StringsSeqAnalyzer = &analysis.Analyzer{ Name: "stringsseq", - Doc: analysisinternal.MustExtractDoc(doc, "stringsseq"), + Doc: analyzerutil.MustExtractDoc(doc, "stringsseq"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -48,12 +46,9 @@ var StringsSeqAnalyzer = &analysis.Analyzer{ // - bytes.SplitSeq // - bytes.FieldsSeq func stringsseq(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( - inspect = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) - info = pass.TypesInfo + index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) + info = pass.TypesInfo stringsSplit = index.Object("strings", "Split") stringsFields = index.Object("strings", "Fields") @@ -64,7 +59,7 @@ func stringsseq(pass *analysis.Pass) (any, error) { return nil, nil } - for curFile := range filesUsing(inspect, info, "go1.24") { + for curFile := range filesUsingGoVersion(pass, versions.Go1_24) { for curRange := range curFile.Preorder((*ast.RangeStmt)(nil)) { rng := curRange.Node().(*ast.RangeStmt) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go index 558cf142dd..939330521c 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/testingcontext.go @@ -17,19 +17,18 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var TestingContextAnalyzer = &analysis.Analyzer{ Name: "testingcontext", - Doc: analysisinternal.MustExtractDoc(doc, "testingcontext"), + Doc: analyzerutil.MustExtractDoc(doc, "testingcontext"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -56,8 +55,6 @@ var TestingContextAnalyzer = &analysis.Analyzer{ // - the call is within a test or subtest function // - the relevant testing.{T,B,F} is named and not shadowed at the call func testingContext(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) info = pass.TypesInfo @@ -137,7 +134,7 @@ calls: testObj = isTestFn(info, n) } } - if testObj != nil && fileUses(info, astutil.EnclosingFile(cur), "go1.24") { + if testObj != nil && analyzerutil.FileUsesGoVersion(pass, astutil.EnclosingFile(cur), versions.Go1_24) { // Have a test function. Check that we can resolve the relevant // testing.{T,B,F} at the current position. if _, obj := lhs[0].Parent().LookupParent(testObj.Name(), lhs[0].Pos()); obj == testObj { diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go index b890f334ba..19564c69b6 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/modernize/waitgroup.go @@ -14,19 +14,18 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" - "golang.org/x/tools/internal/analysisinternal/generated" - typeindexanalyzer "golang.org/x/tools/internal/analysisinternal/typeindex" + "golang.org/x/tools/internal/analysis/analyzerutil" + typeindexanalyzer "golang.org/x/tools/internal/analysis/typeindex" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typesinternal/typeindex" + "golang.org/x/tools/internal/versions" ) var WaitGroupAnalyzer = &analysis.Analyzer{ Name: "waitgroup", - Doc: analysisinternal.MustExtractDoc(doc, "waitgroup"), + Doc: analyzerutil.MustExtractDoc(doc, "waitgroup"), Requires: []*analysis.Analyzer{ - generated.Analyzer, inspect.Analyzer, typeindexanalyzer.Analyzer, }, @@ -61,8 +60,6 @@ var WaitGroupAnalyzer = &analysis.Analyzer{ // other effects, or blocked, or if WaitGroup.Go propagated panics // from child to parent goroutine, the argument would be different.) func waitgroup(pass *analysis.Pass) (any, error) { - skipGenerated(pass) - var ( index = pass.ResultOf[typeindexanalyzer.Analyzer].(*typeindex.Index) info = pass.TypesInfo @@ -128,7 +125,7 @@ func waitgroup(pass *analysis.Pass) (any, error) { } file := astutil.EnclosingFile(curAddCall) - if !fileUses(info, file, "go1.25") { + if !analyzerutil.FileUsesGoVersion(pass, file, versions.Go1_25) { continue } tokFile := pass.Fset.File(file.Pos()) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go index 2b5a7c8037..6b37295187 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go @@ -15,7 +15,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "nilfunc", - Doc: analysisinternal.MustExtractDoc(doc, "nilfunc"), + Doc: analyzerutil.MustExtractDoc(doc, "nilfunc"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/nilfunc", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index 910ffe70d7..1eac2589bf 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -21,7 +21,7 @@ import ( "golang.org/x/tools/go/ast/edge" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/fmtstr" "golang.org/x/tools/internal/typeparams" @@ -38,7 +38,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "printf", - Doc: analysisinternal.MustExtractDoc(doc, "printf"), + Doc: analyzerutil.MustExtractDoc(doc, "printf"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/printf", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -612,7 +612,7 @@ func checkPrintf(pass *analysis.Pass, fileVersion string, kind Kind, call *ast.C // breaking existing tests and CI scripts. if idx == len(call.Args)-1 && fileVersion != "" && // fail open - versions.AtLeast(fileVersion, "go1.24") { + versions.AtLeast(fileVersion, versions.Go1_24) { pass.Report(analysis.Diagnostic{ Pos: formatArg.Pos(), @@ -662,7 +662,7 @@ func checkPrintf(pass *analysis.Pass, fileVersion string, kind Kind, call *ast.C anyIndex = true } rng := opRange(formatArg, op) - if !okPrintfArg(pass, call, rng, &maxArgIndex, firstArg, name, op) { + if !okPrintfArg(pass, fileVersion, call, rng, &maxArgIndex, firstArg, name, op) { // One error per format is enough. return } @@ -694,9 +694,9 @@ func checkPrintf(pass *analysis.Pass, fileVersion string, kind Kind, call *ast.C // such as the position of the %v substring of "...%v...". func opRange(formatArg ast.Expr, op *fmtstr.Operation) analysis.Range { if lit, ok := formatArg.(*ast.BasicLit); ok { - start, end, err := astutil.RangeInStringLiteral(lit, op.Range.Start, op.Range.End) + rng, err := astutil.RangeInStringLiteral(lit, op.Range.Start, op.Range.End) if err == nil { - return analysisinternal.Range(start, end) // position of "%v" + return rng // position of "%v" } } return formatArg // entire format string @@ -707,6 +707,7 @@ type printfArgType int const ( argBool printfArgType = 1 << iota + argByte argInt argRune argString @@ -751,7 +752,7 @@ var printVerbs = []printVerb{ {'o', sharpNumFlag, argInt | argPointer}, {'O', sharpNumFlag, argInt | argPointer}, {'p', "-#", argPointer}, - {'q', " -+.0#", argRune | argInt | argString}, + {'q', " -+.0#", argRune | argInt | argString}, // note: when analyzing go1.26 code, argInt => argByte {'s', " -+.0", argString}, {'t', "-", argBool}, {'T', "-", anyType}, @@ -765,7 +766,7 @@ var printVerbs = []printVerb{ // okPrintfArg compares the operation to the arguments actually present, // reporting any discrepancies it can discern, maxArgIndex was the index of the highest used index. // If the final argument is ellipsissed, there's little it can do for that. -func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, maxArgIndex *int, firstArg int, name string, operation *fmtstr.Operation) (ok bool) { +func okPrintfArg(pass *analysis.Pass, fileVersion string, call *ast.CallExpr, rng analysis.Range, maxArgIndex *int, firstArg int, name string, operation *fmtstr.Operation) (ok bool) { verb := operation.Verb.Verb var v printVerb found := false @@ -777,6 +778,13 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, rng analysis.Range, ma } } + // When analyzing go1.26 code, rune and byte are the only %q integers (#72850). + if verb == 'q' && + fileVersion != "" && // fail open + versions.AtLeast(fileVersion, versions.Go1_26) { + v.typ = argRune | argByte | argString + } + // Could verb's arg implement fmt.Formatter? // Skip check for the %w verb, which requires an error. formatter := false diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go index f7e50f98a9..2cc5c23f12 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go @@ -204,8 +204,7 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { case *types.Struct: // report whether all the elements of the struct match the expected type. For // instance, with "%d" all the elements must be printable with the "%d" format. - for i := 0; i < typ.NumFields(); i++ { - typf := typ.Field(i) + for typf := range typ.Fields() { if !m.match(typf.Type(), false) { return false } @@ -228,14 +227,20 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { types.Bool: return m.t&argBool != 0 + case types.Byte: + return m.t&(argInt|argByte) != 0 + + case types.Rune, types.UntypedRune: + return m.t&(argInt|argRune) != 0 + case types.UntypedInt, types.Int, types.Int8, types.Int16, - types.Int32, + // see case Rune for int32 types.Int64, types.Uint, - types.Uint8, + // see case Byte for uint8 types.Uint16, types.Uint32, types.Uint64, @@ -259,9 +264,6 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool { case types.UnsafePointer: return m.t&(argPointer|argInt) != 0 - case types.UntypedRune: - return m.t&(argInt|argRune) != 0 - case types.UntypedNil: return false diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go index 934f3913c2..174c27109e 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/sigchanyzer/sigchanyzer.go @@ -19,7 +19,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -29,7 +29,7 @@ var doc string // Analyzer describes sigchanyzer analysis function detector. var Analyzer = &analysis.Analyzer{ Name: "sigchanyzer", - Doc: analysisinternal.MustExtractDoc(doc, "sigchanyzer"), + Doc: analyzerutil.MustExtractDoc(doc, "sigchanyzer"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/sigchanyzer", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go index 2cb91c7329..4afbe04684 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go @@ -19,7 +19,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -29,7 +29,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "slog", - Doc: analysisinternal.MustExtractDoc(doc, "slog"), + Doc: analyzerutil.MustExtractDoc(doc, "slog"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/slog", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -168,7 +168,7 @@ func isAttr(t types.Type) bool { // "slog.Logger.With" (instead of "(*log/slog.Logger).With") func shortName(fn *types.Func) string { var r string - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if recv := fn.Signature().Recv(); recv != nil { if _, named := typesinternal.ReceiverNamed(recv); named != nil { r = named.Obj().Name() } else { @@ -188,7 +188,7 @@ func kvFuncSkipArgs(fn *types.Func) (int, bool) { return 0, false } var recvName string // by default a slog package function - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if recv := fn.Signature().Recv(); recv != nil { _, named := typesinternal.ReceiverNamed(recv) if named == nil { return 0, false // anon struct/interface diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go index ca303ae5c1..b68385b242 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go @@ -13,7 +13,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" ) //go:embed doc.go @@ -21,7 +21,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "stdmethods", - Doc: analysisinternal.MustExtractDoc(doc, "stdmethods"), + Doc: analyzerutil.MustExtractDoc(doc, "stdmethods"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stdmethods", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -131,12 +131,12 @@ func canonicalMethod(pass *analysis.Pass, id *ast.Ident) { } // Do the =s (if any) all match? - if !matchParams(pass, expect.args, args, "=") || !matchParams(pass, expect.results, results, "=") { + if !matchParams(expect.args, args, "=") || !matchParams(expect.results, results, "=") { return } // Everything must match. - if !matchParams(pass, expect.args, args, "") || !matchParams(pass, expect.results, results, "") { + if !matchParams(expect.args, args, "") || !matchParams(expect.results, results, "") { expectFmt := id.Name + "(" + argjoin(expect.args) + ")" if len(expect.results) == 1 { expectFmt += " " + argjoin(expect.results) @@ -168,7 +168,7 @@ func argjoin(x []string) string { } // Does each type in expect with the given prefix match the corresponding type in actual? -func matchParams(pass *analysis.Pass, expect []string, actual *types.Tuple, prefix string) bool { +func matchParams(expect []string, actual *types.Tuple, prefix string) bool { for i, x := range expect { if !strings.HasPrefix(x, prefix) { continue diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go index 19c72d2cf9..0cbae68898 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go @@ -14,7 +14,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/refactor" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" @@ -25,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "stringintconv", - Doc: analysisinternal.MustExtractDoc(doc, "stringintconv"), + Doc: analyzerutil.MustExtractDoc(doc, "stringintconv"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stringintconv", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go index eba4e56bb0..e38c266afe 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go @@ -15,7 +15,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -30,7 +30,7 @@ func init() { var Analyzer = &analysis.Analyzer{ Name: "testinggoroutine", - Doc: analysisinternal.MustExtractDoc(doc, "testinggoroutine"), + Doc: analyzerutil.MustExtractDoc(doc, "testinggoroutine"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/testinggoroutine", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go index db2e5f76d1..4b68a789cf 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go @@ -36,7 +36,7 @@ func localFunctionDecls(info *types.Info, files []*ast.File) func(*types.Func) * // isMethodNamed returns true if f is a method defined // in package with the path pkgPath with a name in names. // -// (Unlike [analysisinternal.IsMethodNamed], it ignores the receiver type name.) +// (Unlike [analysis.IsMethodNamed], it ignores the receiver type name.) func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool { if f == nil { return false @@ -44,7 +44,7 @@ func isMethodNamed(f *types.Func, pkgPath string, names ...string) bool { if f.Pkg() == nil || f.Pkg().Path() != pkgPath { return false } - if f.Type().(*types.Signature).Recv() == nil { + if f.Signature().Recv() == nil { return false } return slices.Contains(names, f.Name()) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go index a0ed5ab14e..1f33df8403 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go @@ -15,7 +15,8 @@ import ( "unicode/utf8" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/astutil" "golang.org/x/tools/internal/typesinternal" ) @@ -24,7 +25,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "tests", - Doc: analysisinternal.MustExtractDoc(doc, "tests"), + Doc: analyzerutil.MustExtractDoc(doc, "tests"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/tests", Run: run, } @@ -464,7 +465,7 @@ func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) { if tparams := fn.Type.TypeParams; tparams != nil && len(tparams.List) > 0 { // Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters. // We have currently decided to also warn before compilation/package loading. This can help users in IDEs. - pass.ReportRangef(analysisinternal.Range(tparams.Opening, tparams.Closing), + pass.ReportRangef(astutil.RangeOf(tparams.Opening, tparams.Closing), "%s has type parameters: it will not be run by go test as a %sXXX function", fn.Name.Name, prefix) } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go index 45b6822c17..8353c1efa9 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go @@ -18,7 +18,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -30,7 +30,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "timeformat", - Doc: analysisinternal.MustExtractDoc(doc, "timeformat"), + Doc: analyzerutil.MustExtractDoc(doc, "timeformat"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/timeformat", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,7 @@ var Analyzer = &analysis.Analyzer{ func run(pass *analysis.Pass) (any, error) { // Note: (time.Time).Format is a method and can be a typeutil.Callee // without directly importing "time". So we cannot just skip this package - // when !analysisinternal.Imports(pass.Pkg, "time"). + // when !analysis.Imports(pass.Pkg, "time"). // TODO(taking): Consider using a prepass to collect typeutil.Callees. inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go index 4de48c8393..38eb0b1063 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go @@ -13,7 +13,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -22,7 +22,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unmarshal", - Doc: analysisinternal.MustExtractDoc(doc, "unmarshal"), + Doc: analyzerutil.MustExtractDoc(doc, "unmarshal"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unmarshal", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -39,7 +39,7 @@ func run(pass *analysis.Pass) (any, error) { // Note: (*"encoding/json".Decoder).Decode, (* "encoding/gob".Decoder).Decode // and (* "encoding/xml".Decoder).Decode are methods and can be a typeutil.Callee // without directly importing their packages. So we cannot just skip this package - // when !analysisinternal.Imports(pass.Pkg, "encoding/..."). + // when !analysis.Imports(pass.Pkg, "encoding/..."). // TODO(taking): Consider using a prepass to collect typeutil.Callees. inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) @@ -57,7 +57,7 @@ func run(pass *analysis.Pass) (any, error) { // Classify the callee (without allocating memory). argidx := -1 - recv := fn.Type().(*types.Signature).Recv() + recv := fn.Signature().Recv() if fn.Name() == "Unmarshal" && recv == nil { // "encoding/json".Unmarshal // "encoding/xml".Unmarshal diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go index 668a335299..532f38fe91 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go @@ -15,7 +15,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/refactor" ) @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unreachable", - Doc: analysisinternal.MustExtractDoc(doc, "unreachable"), + Doc: analyzerutil.MustExtractDoc(doc, "unreachable"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unreachable", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go index 24ff723390..ce785725e3 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go @@ -15,7 +15,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unsafeptr", - Doc: analysisinternal.MustExtractDoc(doc, "unsafeptr"), + Doc: analyzerutil.MustExtractDoc(doc, "unsafeptr"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unsafeptr", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go index 57ad4f0769..bd32d58690 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -25,7 +25,8 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" + "golang.org/x/tools/internal/astutil" ) //go:embed doc.go @@ -33,7 +34,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "unusedresult", - Doc: analysisinternal.MustExtractDoc(doc, "unusedresult"), + Doc: analyzerutil.MustExtractDoc(doc, "unusedresult"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedresult", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, @@ -149,11 +150,11 @@ func run(pass *analysis.Pass) (any, error) { if !ok { return // e.g. var or builtin } - if sig := fn.Type().(*types.Signature); sig.Recv() != nil { + if sig := fn.Signature(); sig.Recv() != nil { // method (e.g. foo.String()) if types.Identical(sig, sigNoArgsStringResult) { if stringMethods[fn.Name()] { - pass.ReportRangef(analysisinternal.Range(call.Pos(), call.Lparen), + pass.ReportRangef(astutil.RangeOf(call.Pos(), call.Lparen), "result of (%s).%s call not used", sig.Recv().Type(), fn.Name()) } @@ -161,7 +162,7 @@ func run(pass *analysis.Pass) (any, error) { } else { // package-level function (e.g. fmt.Errorf) if pkgFuncs[[2]string{fn.Pkg().Path(), fn.Name()}] { - pass.ReportRangef(analysisinternal.Range(call.Pos(), call.Lparen), + pass.ReportRangef(astutil.RangeOf(call.Pos(), call.Lparen), "result of %s.%s call not used", fn.Pkg().Path(), fn.Name()) } diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go index 88e4cc8677..c2e20521e9 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go @@ -15,7 +15,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/analyzerutil" "golang.org/x/tools/internal/typesinternal" ) @@ -24,7 +24,7 @@ var doc string var Analyzer = &analysis.Analyzer{ Name: "waitgroup", - Doc: analysisinternal.MustExtractDoc(doc, "waitgroup"), + Doc: analyzerutil.MustExtractDoc(doc, "waitgroup"), URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/waitgroup", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go index b407bc7791..0180a341e5 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go +++ b/src/cmd/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -49,7 +49,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/internal/analysisflags" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/analysis/driverutil" "golang.org/x/tools/internal/facts" ) @@ -183,16 +183,18 @@ func processResults(fset *token.FileSet, id string, results []result) (exit int) // but apply all fixes from the root actions. // Convert results to form needed by ApplyFixes. - fixActions := make([]analysisflags.FixAction, len(results)) + fixActions := make([]driverutil.FixAction, len(results)) for i, res := range results { - fixActions[i] = analysisflags.FixAction{ + fixActions[i] = driverutil.FixAction{ Name: res.a.Name, + Pkg: res.pkg, + Files: res.files, FileSet: fset, - ReadFileFunc: os.ReadFile, + ReadFileFunc: os.ReadFile, // TODO(adonovan): respect overlays Diagnostics: res.diagnostics, } } - if err := analysisflags.ApplyFixes(fixActions, false); err != nil { + if err := driverutil.ApplyFixes(fixActions, analysisflags.Diff, false); err != nil { // Fail when applying fixes failed. log.Print(err) exit = 1 @@ -209,7 +211,7 @@ func processResults(fset *token.FileSet, id string, results []result) (exit int) if analysisflags.JSON { // JSON output - tree := make(analysisflags.JSONTree) + tree := make(driverutil.JSONTree) for _, res := range results { tree.Add(fset, id, res.a.Name, res.diagnostics, res.err) } @@ -225,7 +227,7 @@ func processResults(fset *token.FileSet, id string, results []result) (exit int) } for _, res := range results { for _, diag := range res.diagnostics { - analysisflags.PrintPlain(os.Stderr, fset, analysisflags.Context, diag) + driverutil.PrintPlain(os.Stderr, fset, analysisflags.Context, diag) exit = 1 } } @@ -428,7 +430,7 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re ResultOf: inputs, Report: func(d analysis.Diagnostic) { // Unitchecker doesn't apply fixes, but it does report them in the JSON output. - if err := analysisinternal.ValidateFixes(fset, a, d.SuggestedFixes); err != nil { + if err := driverutil.ValidateFixes(fset, a, d.SuggestedFixes); err != nil { // Since we have diagnostics, the exit code will be nonzero, // so logging these errors is sufficient. log.Println(err) @@ -444,14 +446,14 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re AllPackageFacts: func() []analysis.PackageFact { return facts.AllPackageFacts(factFilter) }, Module: module, } - pass.ReadFile = analysisinternal.CheckedReadFile(pass, os.ReadFile) + pass.ReadFile = driverutil.CheckedReadFile(pass, os.ReadFile) t0 := time.Now() act.result, act.err = a.Run(pass) if act.err == nil { // resolve URLs on diagnostics. for i := range act.diagnostics { - if url, uerr := analysisflags.ResolveURL(a, act.diagnostics[i]); uerr == nil { + if url, uerr := driverutil.ResolveURL(a, act.diagnostics[i]); uerr == nil { act.diagnostics[i].URL = url } else { act.err = uerr // keep the last error @@ -482,9 +484,7 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re results := make([]result, len(analyzers)) for i, a := range analyzers { act := actions[a] - results[i].a = a - results[i].err = act.err - results[i].diagnostics = act.diagnostics + results[i] = result{pkg, files, a, act.diagnostics, act.err} } data := facts.Encode() @@ -499,6 +499,8 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re } type result struct { + pkg *types.Package + files []*ast.File a *analysis.Analyzer diagnostics []analysis.Diagnostic err error diff --git a/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/cursor.go index 7e72d3c284..fc9bbc714c 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/cursor.go +++ b/src/cmd/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -467,7 +467,9 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { // This algorithm could be implemented using c.Inspect, // but it is about 2.5x slower. - best := int32(-1) // push index of latest (=innermost) node containing range + // best is the push-index of the latest (=innermost) node containing range. + // (Beware: latest is not always innermost because FuncDecl.{Name,Type} overlap.) + best := int32(-1) for i, limit := c.indices(); i < limit; i++ { ev := events[i] if ev.index > i { // push? @@ -481,6 +483,19 @@ func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { continue } } else { + // Edge case: FuncDecl.Name and .Type overlap: + // Don't update best from Name to FuncDecl.Type. + // + // The condition can be read as: + // - n is FuncType + // - n.parent is FuncDecl + // - best is strictly beneath the FuncDecl + if ev.typ == 1<<nFuncType && + events[ev.parent].typ == 1<<nFuncDecl && + best > ev.parent { + continue + } + nodeEnd = n.End() if n.Pos() > start { break // disjoint, after; stop diff --git a/src/cmd/vendor/golang.org/x/tools/go/cfg/builder.go b/src/cmd/vendor/golang.org/x/tools/go/cfg/builder.go index ac4d63c400..f16cd42309 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/cfg/builder.go +++ b/src/cmd/vendor/golang.org/x/tools/go/cfg/builder.go @@ -13,7 +13,7 @@ import ( ) type builder struct { - cfg *CFG + blocks []*Block mayReturn func(*ast.CallExpr) bool current *Block lblocks map[string]*lblock // labeled blocks @@ -32,12 +32,18 @@ start: *ast.SendStmt, *ast.IncDecStmt, *ast.GoStmt, - *ast.DeferStmt, *ast.EmptyStmt, *ast.AssignStmt: // No effect on control flow. b.add(s) + case *ast.DeferStmt: + b.add(s) + // Assume conservatively that this behaves like: + // defer func() { recover() } + // so any subsequent panic may act like a return. + b.current.returns = true + case *ast.ExprStmt: b.add(s) if call, ok := s.X.(*ast.CallExpr); ok && !b.mayReturn(call) { @@ -64,6 +70,7 @@ start: goto start // effectively: tailcall stmt(g, s.Stmt, label) case *ast.ReturnStmt: + b.current.returns = true b.add(s) b.current = b.newBlock(KindUnreachable, s) @@ -483,14 +490,13 @@ func (b *builder) labeledBlock(label *ast.Ident, stmt *ast.LabeledStmt) *lblock // It does not automatically become the current block. // comment is an optional string for more readable debugging output. func (b *builder) newBlock(kind BlockKind, stmt ast.Stmt) *Block { - g := b.cfg block := &Block{ - Index: int32(len(g.Blocks)), + Index: int32(len(b.blocks)), Kind: kind, Stmt: stmt, } block.Succs = block.succs2[:0] - g.Blocks = append(g.Blocks, block) + b.blocks = append(b.blocks, block) return block } diff --git a/src/cmd/vendor/golang.org/x/tools/go/cfg/cfg.go b/src/cmd/vendor/golang.org/x/tools/go/cfg/cfg.go index 29a39f698c..38aba77c29 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/cfg/cfg.go +++ b/src/cmd/vendor/golang.org/x/tools/go/cfg/cfg.go @@ -47,13 +47,16 @@ import ( "go/ast" "go/format" "go/token" + + "golang.org/x/tools/internal/cfginternal" ) // A CFG represents the control-flow graph of a single function. // // The entry point is Blocks[0]; there may be multiple return blocks. type CFG struct { - Blocks []*Block // block[0] is entry; order otherwise undefined + Blocks []*Block // block[0] is entry; order otherwise undefined + noreturn bool // function body lacks a reachable return statement } // A Block represents a basic block: a list of statements and @@ -67,12 +70,13 @@ type CFG struct { // an [ast.Expr], Succs[0] is the successor if the condition is true, and // Succs[1] is the successor if the condition is false. type Block struct { - Nodes []ast.Node // statements, expressions, and ValueSpecs - Succs []*Block // successor nodes in the graph - Index int32 // index within CFG.Blocks - Live bool // block is reachable from entry - Kind BlockKind // block kind - Stmt ast.Stmt // statement that gave rise to this block (see BlockKind for details) + Nodes []ast.Node // statements, expressions, and ValueSpecs + Succs []*Block // successor nodes in the graph + Index int32 // index within CFG.Blocks + Live bool // block is reachable from entry + returns bool // block contains return or defer (which may recover and return) + Kind BlockKind // block kind + Stmt ast.Stmt // statement that gave rise to this block (see BlockKind for details) succs2 [2]*Block // underlying array for Succs } @@ -141,14 +145,14 @@ func (kind BlockKind) String() string { func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG { b := builder{ mayReturn: mayReturn, - cfg: new(CFG), } b.current = b.newBlock(KindBody, body) b.stmt(body) - // Compute liveness (reachability from entry point), breadth-first. - q := make([]*Block, 0, len(b.cfg.Blocks)) - q = append(q, b.cfg.Blocks[0]) // entry point + // Compute liveness (reachability from entry point), + // breadth-first, marking Block.Live flags. + q := make([]*Block, 0, len(b.blocks)) + q = append(q, b.blocks[0]) // entry point for len(q) > 0 { b := q[len(q)-1] q = q[:len(q)-1] @@ -162,12 +166,30 @@ func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG { // Does control fall off the end of the function's body? // Make implicit return explicit. if b.current != nil && b.current.Live { + b.current.returns = true b.add(&ast.ReturnStmt{ Return: body.End() - 1, }) } - return b.cfg + // Is any return (or defer+recover) block reachable? + noreturn := true + for _, bl := range b.blocks { + if bl.Live && bl.returns { + noreturn = false + break + } + } + + return &CFG{Blocks: b.blocks, noreturn: noreturn} +} + +// isNoReturn reports whether the function has no reachable return. +// TODO(adonovan): add (*CFG).NoReturn to public API. +func isNoReturn(_cfg any) bool { return _cfg.(*CFG).noreturn } + +func init() { + cfginternal.IsNoReturn = isNoReturn // expose to ctrlflow analyzer } func (b *Block) String() string { @@ -187,6 +209,14 @@ func (b *Block) comment(fset *token.FileSet) string { // // When control falls off the end of the function, the ReturnStmt is synthetic // and its [ast.Node.End] position may be beyond the end of the file. +// +// A function that contains no return statement (explicit or implied) +// may yet return normally, and may even return a nonzero value. For example: +// +// func() (res any) { +// defer func() { res = recover() }() +// panic(123) +// } func (b *Block) Return() (ret *ast.ReturnStmt) { if len(b.Nodes) > 0 { ret, _ = b.Nodes[len(b.Nodes)-1].(*ast.ReturnStmt) diff --git a/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 6c0c74968f..6646bf5508 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/src/cmd/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -249,7 +249,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { case *types.Func: // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + if recv := obj.Signature().Recv(); recv == nil { return "", fmt.Errorf("func is not a method: %v", obj) } @@ -405,7 +405,7 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { return "", false } - _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + _, named := typesinternal.ReceiverNamed(meth.Signature().Recv()) if named == nil { return "", false } diff --git a/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/map.go b/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/map.go index f035a0b6be..36624572a6 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/src/cmd/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -304,8 +304,7 @@ func (h hasher) hash(t types.Type) uint32 { case *types.Named: hash := h.hashTypeName(t.Obj()) targs := t.TypeArgs() - for i := 0; i < targs.Len(); i++ { - targ := targs.At(i) + for targ := range targs.Types() { hash += 2 * h.hash(targ) } return hash diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/doc.go b/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/doc.go new file mode 100644 index 0000000000..74a2a1c815 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/doc.go @@ -0,0 +1,6 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analyzerutil provides implementation helpers for analyzers. +package analyzerutil diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/extractdoc.go index c6cdf5997e..772a0300da 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/extractdoc.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package analysisinternal +package analyzerutil import ( "fmt" @@ -35,7 +35,7 @@ import ( // // var Analyzer = &analysis.Analyzer{ // Name: "halting", -// Doc: analysisinternal.MustExtractDoc(doc, "halting"), +// Doc: analyzerutil.MustExtractDoc(doc, "halting"), // ... // } func MustExtractDoc(content, name string) string { diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/readfile.go b/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/readfile.go new file mode 100644 index 0000000000..ecc30cae04 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/readfile.go @@ -0,0 +1,30 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analyzerutil + +// This file defines helpers for calling [analysis.Pass.ReadFile]. + +import ( + "go/token" + "os" + + "golang.org/x/tools/go/analysis" +) + +// ReadFile reads a file and adds it to the FileSet in pass +// so that we can report errors against it using lineStart. +func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) { + readFile := pass.ReadFile + if readFile == nil { + readFile = os.ReadFile + } + content, err := readFile(filename) + if err != nil { + return nil, nil, err + } + tf := pass.Fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + return content, tf, nil +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/version.go b/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/version.go new file mode 100644 index 0000000000..0b9bcc37b6 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysis/analyzerutil/version.go @@ -0,0 +1,42 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analyzerutil + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/packagepath" + "golang.org/x/tools/internal/stdlib" + "golang.org/x/tools/internal/versions" +) + +// FileUsesGoVersion reports whether the specified file may use features of the +// specified version of Go (e.g. "go1.24"). +// +// Tip: we recommend using this check "late", just before calling +// pass.Report, rather than "early" (when entering each ast.File, or +// each candidate node of interest, during the traversal), because the +// operation is not free, yet is not a highly selective filter: the +// fraction of files that pass most version checks is high and +// increases over time. +func FileUsesGoVersion(pass *analysis.Pass, file *ast.File, version string) (_res bool) { + fileVersion := pass.TypesInfo.FileVersions[file] + + // Standard packages that are part of toolchain bootstrapping + // are not considered to use a version of Go later than the + // current bootstrap toolchain version. + // The bootstrap rule does not cover tests, + // and some tests (e.g. debug/elf/file_test.go) rely on this. + pkgpath := pass.Pkg.Path() + if packagepath.IsStdPackage(pkgpath) && + stdlib.IsBootstrapPackage(pkgpath) && // (excludes "*_test" external test packages) + !strings.HasSuffix(pass.Fset.File(file.Pos()).Name(), "_test.go") { // (excludes all tests) + fileVersion = stdlib.BootstrapVersion.String() // package must bootstrap + } + + return !versions.Before(fileVersion, version) +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/fix.go b/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/fix.go index 43a456a457..ef06cf9bde 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/fix.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/fix.go @@ -2,37 +2,48 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package analysisflags +// Package driverutil defines implementation helper functions for +// analysis drivers such as unitchecker, {single,multi}checker, and +// analysistest. +package driverutil // This file defines the -fix logic common to unitchecker and // {single,multi}checker. import ( + "bytes" "fmt" - "go/format" + "go/ast" + "go/parser" + "go/printer" "go/token" + "go/types" "log" "maps" "os" "sort" + "strconv" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/astutil/free" "golang.org/x/tools/internal/diff" ) // FixAction abstracts a checker action (running one analyzer on one // package) for the purposes of applying its diagnostics' fixes. type FixAction struct { - Name string // e.g. "analyzer@package" + Name string // e.g. "analyzer@package" + Pkg *types.Package // (for import removal) + Files []*ast.File FileSet *token.FileSet - ReadFileFunc analysisinternal.ReadFileFunc + ReadFileFunc ReadFileFunc Diagnostics []analysis.Diagnostic } // ApplyFixes attempts to apply the first suggested fix associated // with each diagnostic reported by the specified actions. -// All fixes must have been validated by [analysisinternal.ValidateFixes]. +// All fixes must have been validated by [ValidateFixes]. // // Each fix is treated as an independent change; fixes are merged in // an arbitrary deterministic order as if by a three-way diff tool @@ -58,15 +69,15 @@ type FixAction struct { // composition of the two fixes is semantically correct. Coalescing // identical edits is appropriate for imports, but not for, say, // increments to a counter variable; the correct resolution in that -// case might be to increment it twice. Or consider two fixes that -// each delete the penultimate reference to an import or local -// variable: each fix is sound individually, and they may be textually -// distant from each other, but when both are applied, the program is -// no longer valid because it has an unreferenced import or local -// variable. -// TODO(adonovan): investigate replacing the final "gofmt" step with a -// formatter that applies the unused-import deletion logic of -// "goimports". +// case might be to increment it twice. +// +// Or consider two fixes that each delete the penultimate reference to +// a local variable: each fix is sound individually, and they may be +// textually distant from each other, but when both are applied, the +// program is no longer valid because it has an unreferenced local +// variable. (ApplyFixes solves the analogous problem for imports by +// eliminating imports whose name is unreferenced in the remainder of +// the fixed file.) // // Merging depends on both the order of fixes and they order of edits // within them. For example, if three fixes add import "a" twice and @@ -80,12 +91,15 @@ type FixAction struct { // applyFixes returns success if all fixes are valid, could be cleanly // merged, and the corresponding files were successfully updated. // -// If the -diff flag was set, instead of updating the files it display the final -// patch composed of all the cleanly merged fixes. +// If printDiff (from the -diff flag) is set, instead of updating the +// files it display the final patch composed of all the cleanly merged +// fixes. // // TODO(adonovan): handle file-system level aliases such as symbolic // links using robustio.FileID. -func ApplyFixes(actions []FixAction, verbose bool) error { +func ApplyFixes(actions []FixAction, printDiff, verbose bool) error { + generated := make(map[*token.File]bool) + // Select fixes to apply. // // If there are several for a given Diagnostic, choose the first. @@ -96,6 +110,15 @@ func ApplyFixes(actions []FixAction, verbose bool) error { } var fixes []*fixact for _, act := range actions { + for _, file := range act.Files { + tokFile := act.FileSet.File(file.FileStart) + // Memoize, since there may be many actions + // for the same package (list of files). + if _, seen := generated[tokFile]; !seen { + generated[tokFile] = ast.IsGenerated(file) + } + } + for _, diag := range act.Diagnostics { for i := range diag.SuggestedFixes { fix := &diag.SuggestedFixes[i] @@ -119,7 +142,7 @@ func ApplyFixes(actions []FixAction, verbose bool) error { // packages are not disjoint, due to test variants, so this // would not really address the issue.) baselineContent := make(map[string][]byte) - getBaseline := func(readFile analysisinternal.ReadFileFunc, filename string) ([]byte, error) { + getBaseline := func(readFile ReadFileFunc, filename string) ([]byte, error) { content, ok := baselineContent[filename] if !ok { var err error @@ -134,16 +157,32 @@ func ApplyFixes(actions []FixAction, verbose bool) error { // Apply each fix, updating the current state // only if the entire fix can be cleanly merged. - accumulatedEdits := make(map[string][]diff.Edit) - goodFixes := 0 + var ( + accumulatedEdits = make(map[string][]diff.Edit) + filePkgs = make(map[string]*types.Package) // maps each file to an arbitrary package that includes it + + goodFixes = 0 // number of fixes cleanly applied + skippedFixes = 0 // number of fixes skipped (because e.g. edits a generated file) + ) fixloop: for _, fixact := range fixes { + // Skip a fix if any of its edits touch a generated file. + for _, edit := range fixact.fix.TextEdits { + file := fixact.act.FileSet.File(edit.Pos) + if generated[file] { + skippedFixes++ + continue fixloop + } + } + // Convert analysis.TextEdits to diff.Edits, grouped by file. // Precondition: a prior call to validateFix succeeded. fileEdits := make(map[string][]diff.Edit) for _, edit := range fixact.fix.TextEdits { file := fixact.act.FileSet.File(edit.Pos) + filePkgs[file.Name()] = fixact.act.Pkg + baseline, err := getBaseline(fixact.act.ReadFileFunc, file.Name()) if err != nil { log.Printf("skipping fix to file %s: %v", file.Name(), err) @@ -191,7 +230,7 @@ fixloop: log.Printf("%s: fix %s applied", fixact.act.Name, fixact.fix.Message) } } - badFixes := len(fixes) - goodFixes + badFixes := len(fixes) - goodFixes - skippedFixes // number of fixes that could not be applied // Show diff or update files to final state. var files []string @@ -214,11 +253,11 @@ fixloop: } // Attempt to format each file. - if formatted, err := format.Source(final); err == nil { + if formatted, err := FormatSourceRemoveImports(filePkgs[file], final); err == nil { final = formatted } - if diffFlag { + if printDiff { // Since we formatted the file, we need to recompute the diff. unified := diff.Unified(file+" (old)", file+" (new)", string(baseline), string(final)) // TODO(adonovan): abstract the I/O. @@ -262,23 +301,149 @@ fixloop: // These numbers are potentially misleading: // The denominator includes duplicate conflicting fixes due to // common files in packages "p" and "p [p.test]", which may - // have been fixed fixed and won't appear in the re-run. + // have been fixed and won't appear in the re-run. // TODO(adonovan): eliminate identical fixes as an initial // filtering step. // // TODO(adonovan): should we log that n files were updated in case of total victory? if badFixes > 0 || filesUpdated < totalFiles { - if diffFlag { - return fmt.Errorf("%d of %d fixes skipped (e.g. due to conflicts)", badFixes, len(fixes)) + if printDiff { + return fmt.Errorf("%d of %s skipped (e.g. due to conflicts)", + badFixes, + plural(len(fixes), "fix", "fixes")) } else { - return fmt.Errorf("applied %d of %d fixes; %d files updated. (Re-run the command to apply more.)", - goodFixes, len(fixes), filesUpdated) + return fmt.Errorf("applied %d of %s; %s updated. (Re-run the command to apply more.)", + goodFixes, + plural(len(fixes), "fix", "fixes"), + plural(filesUpdated, "file", "files")) } } if verbose { - log.Printf("applied %d fixes, updated %d files", len(fixes), filesUpdated) + if skippedFixes > 0 { + log.Printf("skipped %s that would edit generated files", + plural(skippedFixes, "fix", "fixes")) + } + log.Printf("applied %s, updated %s", + plural(len(fixes), "fix", "fixes"), + plural(filesUpdated, "file", "files")) } return nil } + +// FormatSourceRemoveImports is a variant of [format.Source] that +// removes imports that became redundant when fixes were applied. +// +// Import removal is necessarily heuristic since we do not have type +// information for the fixed file and thus cannot accurately tell +// whether k is among the free names of T{k: 0}, which requires +// knowledge of whether T is a struct type. +func FormatSourceRemoveImports(pkg *types.Package, src []byte) ([]byte, error) { + // This function was reduced from the "strict entire file" + // path through [format.Source]. + + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "fixed.go", src, parser.ParseComments|parser.SkipObjectResolution) + if err != nil { + return nil, err + } + + ast.SortImports(fset, file) + + removeUnneededImports(fset, pkg, file) + + // printerNormalizeNumbers means to canonicalize number literal prefixes + // and exponents while printing. See https://golang.org/doc/go1.13#gofmt. + // + // This value is defined in go/printer specifically for go/format and cmd/gofmt. + const printerNormalizeNumbers = 1 << 30 + cfg := &printer.Config{ + Mode: printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers, + Tabwidth: 8, + } + var buf bytes.Buffer + if err := cfg.Fprint(&buf, fset, file); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// removeUnneededImports removes import specs that are not referenced +// within the fixed file. It uses [free.Names] to heuristically +// approximate the set of imported names needed by the body of the +// file based only on syntax. +// +// pkg provides type information about the unmodified package, in +// particular the name that would implicitly be declared by a +// non-renaming import of a given existing dependency. +func removeUnneededImports(fset *token.FileSet, pkg *types.Package, file *ast.File) { + // Map each existing dependency to its default import name. + // (We'll need this to interpret non-renaming imports.) + packageNames := make(map[string]string) + for _, imp := range pkg.Imports() { + packageNames[imp.Path()] = imp.Name() + } + + // Compute the set of free names of the file, + // ignoring its import decls. + freenames := make(map[string]bool) + for _, decl := range file.Decls { + if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT { + continue // skip import + } + + // TODO(adonovan): we could do better than includeComplitIdents=false + // since we have type information about the unmodified package, + // which is a good source of heuristics. + const includeComplitIdents = false + maps.Copy(freenames, free.Names(decl, includeComplitIdents)) + } + + // Check whether each import's declared name is free (referenced) by the file. + var deletions []func() + for _, spec := range file.Imports { + path, err := strconv.Unquote(spec.Path.Value) + if err != nil { + continue // malformed import; ignore + } + explicit := "" // explicit PkgName, if any + if spec.Name != nil { + explicit = spec.Name.Name + } + name := explicit // effective PkgName + if name == "" { + // Non-renaming import: use package's default name. + name = packageNames[path] + } + switch name { + case "": + continue // assume it's a new import + case ".": + continue // dot imports are tricky + case "_": + continue // keep blank imports + } + if !freenames[name] { + // Import's effective name is not free in (not used by) the file. + // Enqueue it for deletion after the loop. + deletions = append(deletions, func() { + astutil.DeleteNamedImport(fset, file, explicit, path) + }) + } + } + + // Apply the deletions. + for _, del := range deletions { + del() + } +} + +// plural returns "n nouns", selecting the plural form as approriate. +func plural(n int, singular, plural string) string { + if n == 1 { + return "1 " + singular + } else { + return fmt.Sprintf("%d %s", n, plural) + } +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go b/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go new file mode 100644 index 0000000000..7fc42a5ef7 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go @@ -0,0 +1,161 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package driverutil + +// This file defined output helpers common to all drivers. + +import ( + "encoding/json" + "fmt" + "go/token" + "io" + "log" + "os" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// TODO(adonovan): don't accept an io.Writer if we don't report errors. +// Either accept a bytes.Buffer (infallible), or return a []byte. + +// PrintPlain prints a diagnostic in plain text form. +// If contextLines is nonnegative, it also prints the +// offending line plus this many lines of context. +func PrintPlain(out io.Writer, fset *token.FileSet, contextLines int, diag analysis.Diagnostic) { + print := func(pos, end token.Pos, message string) { + posn := fset.Position(pos) + fmt.Fprintf(out, "%s: %s\n", posn, message) + + // show offending line plus N lines of context. + if contextLines >= 0 { + end := fset.Position(end) + if !end.IsValid() { + end = posn + } + // TODO(adonovan): highlight the portion of the line indicated + // by pos...end using ASCII art, terminal colors, etc? + data, _ := os.ReadFile(posn.Filename) + lines := strings.Split(string(data), "\n") + for i := posn.Line - contextLines; i <= end.Line+contextLines; i++ { + if 1 <= i && i <= len(lines) { + fmt.Fprintf(out, "%d\t%s\n", i, lines[i-1]) + } + } + } + } + + print(diag.Pos, diag.End, diag.Message) + for _, rel := range diag.Related { + print(rel.Pos, rel.End, "\t"+rel.Message) + } +} + +// A JSONTree is a mapping from package ID to analysis name to result. +// Each result is either a jsonError or a list of JSONDiagnostic. +type JSONTree map[string]map[string]any + +// A TextEdit describes the replacement of a portion of a file. +// Start and End are zero-based half-open indices into the original byte +// sequence of the file, and New is the new text. +type JSONTextEdit struct { + Filename string `json:"filename"` + Start int `json:"start"` + End int `json:"end"` + New string `json:"new"` +} + +// A JSONSuggestedFix describes an edit that should be applied as a whole or not +// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix +// consists of multiple non-contiguous edits. +type JSONSuggestedFix struct { + Message string `json:"message"` + Edits []JSONTextEdit `json:"edits"` +} + +// A JSONDiagnostic describes the JSON schema of an analysis.Diagnostic. +// +// TODO(matloob): include End position if present. +type JSONDiagnostic struct { + Category string `json:"category,omitempty"` + Posn string `json:"posn"` // e.g. "file.go:line:column" + Message string `json:"message"` + SuggestedFixes []JSONSuggestedFix `json:"suggested_fixes,omitempty"` + Related []JSONRelatedInformation `json:"related,omitempty"` +} + +// A JSONRelated describes a secondary position and message related to +// a primary diagnostic. +// +// TODO(adonovan): include End position if present. +type JSONRelatedInformation struct { + Posn string `json:"posn"` // e.g. "file.go:line:column" + Message string `json:"message"` +} + +// Add adds the result of analysis 'name' on package 'id'. +// The result is either a list of diagnostics or an error. +func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) { + var v any + if err != nil { + type jsonError struct { + Err string `json:"error"` + } + v = jsonError{err.Error()} + } else if len(diags) > 0 { + diagnostics := make([]JSONDiagnostic, 0, len(diags)) + for _, f := range diags { + var fixes []JSONSuggestedFix + for _, fix := range f.SuggestedFixes { + var edits []JSONTextEdit + for _, edit := range fix.TextEdits { + edits = append(edits, JSONTextEdit{ + Filename: fset.Position(edit.Pos).Filename, + Start: fset.Position(edit.Pos).Offset, + End: fset.Position(edit.End).Offset, + New: string(edit.NewText), + }) + } + fixes = append(fixes, JSONSuggestedFix{ + Message: fix.Message, + Edits: edits, + }) + } + var related []JSONRelatedInformation + for _, r := range f.Related { + related = append(related, JSONRelatedInformation{ + Posn: fset.Position(r.Pos).String(), + Message: r.Message, + }) + } + jdiag := JSONDiagnostic{ + Category: f.Category, + Posn: fset.Position(f.Pos).String(), + Message: f.Message, + SuggestedFixes: fixes, + Related: related, + } + diagnostics = append(diagnostics, jdiag) + } + v = diagnostics + } + if v != nil { + m, ok := tree[id] + if !ok { + m = make(map[string]any) + tree[id] = m + } + m[name] = v + } +} + +func (tree JSONTree) Print(out io.Writer) error { + data, err := json.MarshalIndent(tree, "", "\t") + if err != nil { + log.Panicf("internal error: JSON marshaling failed: %v", err) + } + _, err = fmt.Fprintf(out, "%s\n", data) + return err +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/readfile.go b/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/readfile.go new file mode 100644 index 0000000000..dc1d54dd8b --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/readfile.go @@ -0,0 +1,43 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package driverutil + +// This file defines helpers for implementing [analysis.Pass.ReadFile]. + +import ( + "fmt" + "slices" + + "golang.org/x/tools/go/analysis" +) + +// A ReadFileFunc is a function that returns the +// contents of a file, such as [os.ReadFile]. +type ReadFileFunc = func(filename string) ([]byte, error) + +// CheckedReadFile returns a wrapper around a Pass.ReadFile +// function that performs the appropriate checks. +func CheckedReadFile(pass *analysis.Pass, readFile ReadFileFunc) ReadFileFunc { + return func(filename string) ([]byte, error) { + if err := CheckReadable(pass, filename); err != nil { + return nil, err + } + return readFile(filename) + } +} + +// CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. +func CheckReadable(pass *analysis.Pass, filename string) error { + if slices.Contains(pass.OtherFiles, filename) || + slices.Contains(pass.IgnoredFiles, filename) { + return nil + } + for _, f := range pass.Files { + if pass.Fset.File(f.FileStart).Name() == filename { + return nil + } + } + return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) +} diff --git a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go b/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/url.go index 26a917a991..93b3ecfd49 100644 --- a/src/cmd/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/url.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package analysisflags +package driverutil import ( "fmt" diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/validatefix.go index 970d7507f0..7efc4197d6 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysis/driverutil/validatefix.go @@ -1,71 +1,20 @@ -// Copyright 2020 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package analysisinternal provides helper functions for use in both -// the analysis drivers in go/analysis and gopls, and in various -// analyzers. -// -// TODO(adonovan): this is not ideal as it may lead to unnecessary -// dependencies between drivers and analyzers. Split into analyzerlib -// and driverlib? -package analysisinternal +package driverutil + +// This file defines the validation of SuggestedFixes. import ( "cmp" "fmt" "go/token" - "os" "slices" "golang.org/x/tools/go/analysis" ) -// ReadFile reads a file and adds it to the FileSet in pass -// so that we can report errors against it using lineStart. -func ReadFile(pass *analysis.Pass, filename string) ([]byte, *token.File, error) { - readFile := pass.ReadFile - if readFile == nil { - readFile = os.ReadFile - } - content, err := readFile(filename) - if err != nil { - return nil, nil, err - } - tf := pass.Fset.AddFile(filename, -1, len(content)) - tf.SetLinesForContent(content) - return content, tf, nil -} - -// A ReadFileFunc is a function that returns the -// contents of a file, such as [os.ReadFile]. -type ReadFileFunc = func(filename string) ([]byte, error) - -// CheckedReadFile returns a wrapper around a Pass.ReadFile -// function that performs the appropriate checks. -func CheckedReadFile(pass *analysis.Pass, readFile ReadFileFunc) ReadFileFunc { - return func(filename string) ([]byte, error) { - if err := CheckReadable(pass, filename); err != nil { - return nil, err - } - return readFile(filename) - } -} - -// CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. -func CheckReadable(pass *analysis.Pass, filename string) error { - if slices.Contains(pass.OtherFiles, filename) || - slices.Contains(pass.IgnoredFiles, filename) { - return nil - } - for _, f := range pass.Files { - if pass.Fset.File(f.FileStart).Name() == filename { - return nil - } - } - return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) -} - // ValidateFixes validates the set of fixes for a single diagnostic. // Any error indicates a bug in the originating analyzer. // @@ -167,14 +116,3 @@ func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error { return nil } - -// Range returns an [analysis.Range] for the specified start and end positions. -func Range(pos, end token.Pos) analysis.Range { - return tokenRange{pos, end} -} - -// tokenRange is an implementation of the [analysis.Range] interface. -type tokenRange struct{ StartPos, EndPos token.Pos } - -func (r tokenRange) Pos() token.Pos { return r.StartPos } -func (r tokenRange) End() token.Pos { return r.EndPos } diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/typeindex/typeindex.go b/src/cmd/vendor/golang.org/x/tools/internal/analysis/typeindex/typeindex.go index bba21c6ea0..41146d9abb 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/typeindex/typeindex.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/analysis/typeindex/typeindex.go @@ -22,12 +22,12 @@ import ( var Analyzer = &analysis.Analyzer{ Name: "typeindex", Doc: "indexes of type information for later passes", - URL: "https://pkg.go.dev/golang.org/x/tools/internal/analysisinternal/typeindex", + URL: "https://pkg.go.dev/golang.org/x/tools/internal/analysis/typeindex", Run: func(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) return typeindex.New(inspect, pass.Pkg, pass.TypesInfo), nil }, RunDespiteErrors: true, Requires: []*analysis.Analyzer{inspect.Analyzer}, - ResultType: reflect.TypeOf(new(typeindex.Index)), + ResultType: reflect.TypeFor[*typeindex.Index](), } diff --git a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/generated/generated.go b/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/generated/generated.go deleted file mode 100644 index 13e1b69021..0000000000 --- a/src/cmd/vendor/golang.org/x/tools/internal/analysisinternal/generated/generated.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package generated defines an analyzer whose result makes it -// convenient to skip diagnostics within generated files. -package generated - -import ( - "go/ast" - "go/token" - "reflect" - - "golang.org/x/tools/go/analysis" -) - -var Analyzer = &analysis.Analyzer{ - Name: "generated", - Doc: "detect which Go files are generated", - URL: "https://pkg.go.dev/golang.org/x/tools/internal/analysisinternal/generated", - ResultType: reflect.TypeFor[*Result](), - Run: func(pass *analysis.Pass) (any, error) { - set := make(map[*token.File]bool) - for _, file := range pass.Files { - if ast.IsGenerated(file) { - set[pass.Fset.File(file.FileStart)] = true - } - } - return &Result{fset: pass.Fset, generatedFiles: set}, nil - }, -} - -type Result struct { - fset *token.FileSet - generatedFiles map[*token.File]bool -} - -// IsGenerated reports whether the position is within a generated file. -func (r *Result) IsGenerated(pos token.Pos) bool { - return r.generatedFiles[r.fset.File(pos)] -} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/free.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/free/free.go index e3cf313a8a..2c4d2c4e52 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/free.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/free/free.go @@ -2,18 +2,22 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Copied, with considerable changes, from go/parser/resolver.go -// at af53bd2c03. - -package inline +// Package free defines utilities for computing the free variables of +// a syntax tree without type information. This is inherently +// heuristic because of the T{f: x} ambiguity, in which f may or may +// not be a lexical reference depending on whether T is a struct type. +package free import ( "go/ast" "go/token" ) -// freeishNames computes an approximation to the free names of the AST -// at node n based solely on syntax, inserting values into the map. +// Copied, with considerable changes, from go/parser/resolver.go +// at af53bd2c03. + +// Names computes an approximation to the set of free names of the AST +// at node n based solely on syntax. // // In the absence of composite literals, the set of free names is exact. Composite // literals introduce an ambiguity that can only be resolved with type information: @@ -26,15 +30,22 @@ import ( // a struct type, so freeishNames underapproximates: the resulting set // may omit names that are free lexical references. // +// TODO(adonovan): includeComplitIdents is a crude hammer: the caller +// may have partial or heuristic information about whether a given T +// is struct type. Replace includeComplitIdents with a hook to query +// the caller. +// // The code is based on go/parser.resolveFile, but heavily simplified. Crucial // differences are: // - Instead of resolving names to their objects, this function merely records // whether they are free. // - Labels are ignored: they do not refer to values. -// - This is never called on FuncDecls or ImportSpecs, so the function -// panics if it sees one. -func freeishNames(free map[string]bool, n ast.Node, includeComplitIdents bool) { - v := &freeVisitor{free: free, includeComplitIdents: includeComplitIdents} +// - This is never called on ImportSpecs, so the function panics if it sees one. +func Names(n ast.Node, includeComplitIdents bool) map[string]bool { + v := &freeVisitor{ + free: make(map[string]bool), + includeComplitIdents: includeComplitIdents, + } // Begin with a scope, even though n might not be a form that establishes a scope. // For example, n might be: // x := ... @@ -42,7 +53,10 @@ func freeishNames(free map[string]bool, n ast.Node, includeComplitIdents bool) { v.openScope() ast.Walk(v, n) v.closeScope() - assert(v.scope == nil, "unbalanced scopes") + if v.scope != nil { + panic("unbalanced scopes") + } + return v.free } // A freeVisitor holds state for a free-name analysis. @@ -73,12 +87,12 @@ func (v *freeVisitor) Visit(n ast.Node) ast.Visitor { // Expressions. case *ast.Ident: - v.resolve(n) + v.use(n) case *ast.FuncLit: v.openScope() defer v.closeScope() - v.walkFuncType(n.Type) + v.walkFuncType(nil, n.Type) v.walkBody(n.Body) case *ast.SelectorExpr: @@ -93,7 +107,7 @@ func (v *freeVisitor) Visit(n ast.Node) ast.Visitor { case *ast.FuncType: v.openScope() defer v.closeScope() - v.walkFuncType(n) + v.walkFuncType(nil, n) case *ast.CompositeLit: v.walk(n.Type) @@ -107,7 +121,7 @@ func (v *freeVisitor) Visit(n ast.Node) ast.Visitor { if v.includeComplitIdents { // Over-approximate by treating both cases as potentially // free names. - v.resolve(ident) + v.use(ident) } else { // Under-approximate by ignoring potentially free names. } @@ -135,13 +149,11 @@ func (v *freeVisitor) Visit(n ast.Node) ast.Visitor { } case *ast.LabeledStmt: - // ignore labels - // TODO(jba): consider labels? + // Ignore labels. v.walk(n.Stmt) case *ast.BranchStmt: // Ignore labels. - // TODO(jba): consider labels? case *ast.BlockStmt: v.openScope() @@ -170,13 +182,11 @@ func (v *freeVisitor) Visit(n ast.Node) ast.Visitor { v.walkBody(n.Body) case *ast.TypeSwitchStmt: + v.openScope() + defer v.closeScope() if n.Init != nil { - v.openScope() - defer v.closeScope() v.walk(n.Init) } - v.openScope() - defer v.closeScope() v.walk(n.Assign) // We can use walkBody here because we don't track label scopes. v.walkBody(n.Body) @@ -225,11 +235,10 @@ func (v *freeVisitor) Visit(n ast.Node) ast.Visitor { for _, spec := range n.Specs { spec := spec.(*ast.ValueSpec) walkSlice(v, spec.Values) - if spec.Type != nil { - v.walk(spec.Type) - } + v.walk(spec.Type) v.declare(spec.Names...) } + case token.TYPE: for _, spec := range n.Specs { spec := spec.(*ast.TypeSpec) @@ -250,7 +259,14 @@ func (v *freeVisitor) Visit(n ast.Node) ast.Visitor { } case *ast.FuncDecl: - panic("encountered top-level function declaration in free analysis") + if n.Recv == nil && n.Name.Name != "init" { // package-level function + v.declare(n.Name) + } + v.openScope() + defer v.closeScope() + v.walkTypeParams(n.Type.TypeParams) + v.walkFuncType(n.Recv, n.Type) + v.walkBody(n.Body) default: return v @@ -259,67 +275,90 @@ func (v *freeVisitor) Visit(n ast.Node) ast.Visitor { return nil } -func (r *freeVisitor) openScope() { - r.scope = &scope{map[string]bool{}, r.scope} +func (v *freeVisitor) openScope() { + v.scope = &scope{map[string]bool{}, v.scope} } -func (r *freeVisitor) closeScope() { - r.scope = r.scope.outer +func (v *freeVisitor) closeScope() { + v.scope = v.scope.outer } -func (r *freeVisitor) walk(n ast.Node) { +func (v *freeVisitor) walk(n ast.Node) { if n != nil { - ast.Walk(r, n) + ast.Walk(v, n) } } -// walkFuncType walks a function type. It is used for explicit -// function types, like this: -// -// type RunFunc func(context.Context) error -// -// and function literals, like this: -// -// func(a, b int) int { return a + b} -// -// neither of which have type parameters. -// Function declarations do involve type parameters, but we don't -// handle them. -func (r *freeVisitor) walkFuncType(typ *ast.FuncType) { - // The order here doesn't really matter, because names in - // a field list cannot appear in types. - // (The situation is different for type parameters, for which - // see [freeVisitor.walkTypeParams].) - r.resolveFieldList(typ.Params) - r.resolveFieldList(typ.Results) - r.declareFieldList(typ.Params) - r.declareFieldList(typ.Results) +func (v *freeVisitor) walkFuncType(recv *ast.FieldList, typ *ast.FuncType) { + // First use field types... + v.walkRecvFieldType(recv) + v.walkFieldTypes(typ.Params) + v.walkFieldTypes(typ.Results) + + // ...then declare field names. + v.declareFieldNames(recv) + v.declareFieldNames(typ.Params) + v.declareFieldNames(typ.Results) +} + +// A receiver field is not like a param or result field because +// "func (recv R[T]) method()" uses R but declares T. +func (v *freeVisitor) walkRecvFieldType(list *ast.FieldList) { + if list == nil { + return + } + for _, f := range list.List { // valid => len=1 + typ := f.Type + if ptr, ok := typ.(*ast.StarExpr); ok { + typ = ptr.X + } + + // Analyze receiver type as Base[Index, ...] + var ( + base ast.Expr + indices []ast.Expr + ) + switch typ := typ.(type) { + case *ast.IndexExpr: // B[T] + base, indices = typ.X, []ast.Expr{typ.Index} + case *ast.IndexListExpr: // B[K, V] + base, indices = typ.X, typ.Indices + default: // B + base = typ + } + for _, expr := range indices { + if id, ok := expr.(*ast.Ident); ok { + v.declare(id) + } + } + v.walk(base) + } } // walkTypeParams is like walkFieldList, but declares type parameters eagerly so // that they may be resolved in the constraint expressions held in the field // Type. -func (r *freeVisitor) walkTypeParams(list *ast.FieldList) { - r.declareFieldList(list) - r.resolveFieldList(list) +func (v *freeVisitor) walkTypeParams(list *ast.FieldList) { + v.declareFieldNames(list) + v.walkFieldTypes(list) // constraints } -func (r *freeVisitor) walkBody(body *ast.BlockStmt) { +func (v *freeVisitor) walkBody(body *ast.BlockStmt) { if body == nil { return } - walkSlice(r, body.List) + walkSlice(v, body.List) } -func (r *freeVisitor) walkFieldList(list *ast.FieldList) { +func (v *freeVisitor) walkFieldList(list *ast.FieldList) { if list == nil { return } - r.resolveFieldList(list) // .Type may contain references - r.declareFieldList(list) // .Names declares names + v.walkFieldTypes(list) // .Type may contain references + v.declareFieldNames(list) // .Names declares names } -func (r *freeVisitor) shortVarDecl(lhs []ast.Expr) { +func (v *freeVisitor) shortVarDecl(lhs []ast.Expr) { // Go spec: A short variable declaration may redeclare variables provided // they were originally declared in the same block with the same type, and // at least one of the non-blank variables is new. @@ -330,7 +369,7 @@ func (r *freeVisitor) shortVarDecl(lhs []ast.Expr) { // In a well-formed program each expr must be an identifier, // but be forgiving. if id, ok := x.(*ast.Ident); ok { - r.declare(id) + v.declare(id) } } } @@ -341,42 +380,39 @@ func walkSlice[S ~[]E, E ast.Node](r *freeVisitor, list S) { } } -// resolveFieldList resolves the types of the fields in list. -// The companion method declareFieldList declares the names of the fields. -func (r *freeVisitor) resolveFieldList(list *ast.FieldList) { - if list == nil { - return - } - for _, f := range list.List { - r.walk(f.Type) +// walkFieldTypes resolves the types of the walkFieldTypes in list. +// The companion method declareFieldList declares the names of the walkFieldTypes. +func (v *freeVisitor) walkFieldTypes(list *ast.FieldList) { + if list != nil { + for _, f := range list.List { + v.walk(f.Type) + } } } -// declareFieldList declares the names of the fields in list. +// declareFieldNames declares the names of the fields in list. // (Names in a FieldList always establish new bindings.) // The companion method resolveFieldList resolves the types of the fields. -func (r *freeVisitor) declareFieldList(list *ast.FieldList) { - if list == nil { - return - } - for _, f := range list.List { - r.declare(f.Names...) +func (v *freeVisitor) declareFieldNames(list *ast.FieldList) { + if list != nil { + for _, f := range list.List { + v.declare(f.Names...) + } } } -// resolve marks ident as free if it is not in scope. -// TODO(jba): rename: no resolution is happening. -func (r *freeVisitor) resolve(ident *ast.Ident) { - if s := ident.Name; s != "_" && !r.scope.defined(s) { - r.free[s] = true +// use marks ident as free if it is not in scope. +func (v *freeVisitor) use(ident *ast.Ident) { + if s := ident.Name; s != "_" && !v.scope.defined(s) { + v.free[s] = true } } // declare adds each non-blank ident to the current scope. -func (r *freeVisitor) declare(idents ...*ast.Ident) { +func (v *freeVisitor) declare(idents ...*ast.Ident) { for _, id := range idents { if id.Name != "_" { - r.scope.names[id.Name] = true + v.scope.names[id.Name] = true } } } diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/stringlit.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/stringlit.go index 849d45d853..ce1e7de882 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/stringlit.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/stringlit.go @@ -14,16 +14,16 @@ import ( // RangeInStringLiteral calculates the positional range within a string literal // corresponding to the specified start and end byte offsets within the logical string. -func RangeInStringLiteral(lit *ast.BasicLit, start, end int) (token.Pos, token.Pos, error) { +func RangeInStringLiteral(lit *ast.BasicLit, start, end int) (Range, error) { startPos, err := PosInStringLiteral(lit, start) if err != nil { - return 0, 0, fmt.Errorf("start: %v", err) + return Range{}, fmt.Errorf("start: %v", err) } endPos, err := PosInStringLiteral(lit, end) if err != nil { - return 0, 0, fmt.Errorf("end: %v", err) + return Range{}, fmt.Errorf("end: %v", err) } - return startPos, endPos, nil + return Range{startPos, endPos}, nil } // PosInStringLiteral returns the position within a string literal diff --git a/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go b/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go index a1c0983504..7a02fca21e 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/astutil/util.go @@ -5,6 +5,7 @@ package astutil import ( + "fmt" "go/ast" "go/printer" "go/token" @@ -50,28 +51,26 @@ func PreorderStack(root ast.Node, stack []ast.Node, f func(n ast.Node, stack []a } // NodeContains reports whether the Pos/End range of node n encloses -// the given position pos. +// the given range. // // It is inclusive of both end points, to allow hovering (etc) when // the cursor is immediately after a node. // -// For unfortunate historical reasons, the Pos/End extent of an -// ast.File runs from the start of its package declaration---excluding -// copyright comments, build tags, and package documentation---to the -// end of its last declaration, excluding any trailing comments. So, -// as a special case, if n is an [ast.File], NodeContains uses -// n.FileStart <= pos && pos <= n.FileEnd to report whether the -// position lies anywhere within the file. +// Like [NodeRange], it treats the range of an [ast.File] as the +// file's complete extent. // // Precondition: n must not be nil. -func NodeContains(n ast.Node, pos token.Pos) bool { - var start, end token.Pos - if file, ok := n.(*ast.File); ok { - start, end = file.FileStart, file.FileEnd // entire file - } else { - start, end = n.Pos(), n.End() - } - return start <= pos && pos <= end +func NodeContains(n ast.Node, rng Range) bool { + return NodeRange(n).Contains(rng) +} + +// NodeContainPos reports whether the Pos/End range of node n encloses +// the given pos. +// +// Like [NodeRange], it treats the range of an [ast.File] as the +// file's complete extent. +func NodeContainsPos(n ast.Node, pos token.Pos) bool { + return NodeRange(n).ContainsPos(pos) } // IsChildOf reports whether cur.ParentEdge is ek. @@ -117,3 +116,118 @@ func Format(fset *token.FileSet, n ast.Node) string { printer.Fprint(&buf, fset, n) // ignore errors return buf.String() } + +// -- Range -- + +// Range is a Pos interval. +// It implements [analysis.Range] and [ast.Node]. +type Range struct{ Start, EndPos token.Pos } + +// RangeOf constructs a Range. +// +// RangeOf exists to pacify the "unkeyed literal" (composites) vet +// check. It would be nice if there were a way for a type to add +// itself to the allowlist. +func RangeOf(start, end token.Pos) Range { return Range{start, end} } + +// NodeRange returns the extent of node n as a Range. +// +// For unfortunate historical reasons, the Pos/End extent of an +// ast.File runs from the start of its package declaration---excluding +// copyright comments, build tags, and package documentation---to the +// end of its last declaration, excluding any trailing comments. So, +// as a special case, if n is an [ast.File], NodeContains uses +// n.FileStart <= pos && pos <= n.FileEnd to report whether the +// position lies anywhere within the file. +func NodeRange(n ast.Node) Range { + if file, ok := n.(*ast.File); ok { + return Range{file.FileStart, file.FileEnd} // entire file + } + return Range{n.Pos(), n.End()} +} + +func (r Range) Pos() token.Pos { return r.Start } +func (r Range) End() token.Pos { return r.EndPos } + +// ContainsPos reports whether the range (inclusive of both end points) +// includes the specified position. +func (r Range) ContainsPos(pos token.Pos) bool { + return r.Contains(RangeOf(pos, pos)) +} + +// Contains reports whether the range (inclusive of both end points) +// includes the specified range. +func (r Range) Contains(rng Range) bool { + return r.Start <= rng.Start && rng.EndPos <= r.EndPos +} + +// IsValid reports whether the range is valid. +func (r Range) IsValid() bool { return r.Start.IsValid() && r.Start <= r.EndPos } + +// -- + +// Select returns the syntax nodes identified by a user's text +// selection. It returns three nodes: the innermost node that wholly +// encloses the selection; and the first and last nodes that are +// wholly enclosed by the selection. +// +// For example, given this selection: +// +// { f(); g(); /* comment */ } +// ~~~~~~~~~~~ +// +// Select returns the enclosing BlockStmt, the f() CallExpr, and the g() CallExpr. +// +// Callers that require exactly one syntax tree (e.g. just f() or just +// g()) should check that the returned start and end nodes are +// identical. +// +// This function is intended to be called early in the handling of a +// user's request, since it is tolerant of sloppy selection including +// extraneous whitespace and comments. Use it in new code instead of +// PathEnclosingInterval. When the exact extent of a node is known, +// use [Cursor.FindByPos] instead. +func Select(curFile inspector.Cursor, start, end token.Pos) (_enclosing, _start, _end inspector.Cursor, _ error) { + curEnclosing, ok := curFile.FindByPos(start, end) + if !ok { + return noCursor, noCursor, noCursor, fmt.Errorf("invalid selection") + } + + // Find the first and last node wholly within the (start, end) range. + // We'll narrow the effective selection to them, to exclude whitespace. + // (This matches the functionality of PathEnclosingInterval.) + var curStart, curEnd inspector.Cursor + rng := RangeOf(start, end) + for cur := range curEnclosing.Preorder() { + if rng.Contains(NodeRange(cur.Node())) { + // The start node has the least Pos. + if !CursorValid(curStart) { + curStart = cur + } + // The end node has the greatest End. + // End positions do not change monotonically, + // so we must compute the max. + if !CursorValid(curEnd) || + cur.Node().End() > curEnd.Node().End() { + curEnd = cur + } + } + } + if !CursorValid(curStart) { + return noCursor, noCursor, noCursor, fmt.Errorf("no syntax selected") + } + return curEnclosing, curStart, curEnd, nil +} + +// CursorValid reports whether the cursor is valid. +// +// A valid cursor may yet be the virtual root node, +// cur.Inspector.Root(), which has no [Cursor.Node]. +// +// TODO(adonovan): move to cursorutil package, and move that package into x/tools. +// Ultimately, make this a method of Cursor. Needs a proposal. +func CursorValid(cur inspector.Cursor) bool { + return cur.Inspector() != nil +} + +var noCursor inspector.Cursor diff --git a/src/cmd/vendor/golang.org/x/tools/internal/cfginternal/cfginternal.go b/src/cmd/vendor/golang.org/x/tools/internal/cfginternal/cfginternal.go new file mode 100644 index 0000000000..a9b6236f4d --- /dev/null +++ b/src/cmd/vendor/golang.org/x/tools/internal/cfginternal/cfginternal.go @@ -0,0 +1,16 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cfginternal exposes internals of go/cfg. +// It cannot actually depend on symbols from go/cfg. +package cfginternal + +// IsNoReturn exposes (*cfg.CFG).noReturn to the ctrlflow analyzer. +// TODO(adonovan): add CFG.NoReturn to the public API. +// +// You must link [golang.org/x/tools/go/cfg] into your application for +// this function to be non-nil. +var IsNoReturn = func(cfg any) bool { + panic("golang.org/x/tools/go/cfg not linked into application") +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go index 7b7c5cc677..5acc68e1db 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -16,10 +16,6 @@ type Diff struct { ReplStart, ReplEnd int // offset of replacement text in B } -// DiffStrings returns the differences between two strings. -// It does not respect rune boundaries. -func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) } - // DiffBytes returns the differences between two byte sequences. // It does not respect rune boundaries. func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } diff --git a/src/cmd/vendor/golang.org/x/tools/internal/facts/imports.go b/src/cmd/vendor/golang.org/x/tools/internal/facts/imports.go index cc9383e800..324010b475 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/facts/imports.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/facts/imports.go @@ -53,8 +53,8 @@ func importMap(imports []*types.Package) map[string]*types.Package { case typesinternal.NamedOrAlias: // *types.{Named,Alias} // Add the type arguments if this is an instance. if targs := T.TypeArgs(); targs.Len() > 0 { - for i := 0; i < targs.Len(); i++ { - addType(targs.At(i)) + for t := range targs.Types() { + addType(t) } } @@ -70,8 +70,8 @@ func importMap(imports []*types.Package) map[string]*types.Package { // common aspects addObj(T.Obj()) if tparams := T.TypeParams(); tparams.Len() > 0 { - for i := 0; i < tparams.Len(); i++ { - addType(tparams.At(i)) + for tparam := range tparams.TypeParams() { + addType(tparam) } } @@ -81,8 +81,8 @@ func importMap(imports []*types.Package) map[string]*types.Package { addType(aliases.Rhs(T)) case *types.Named: addType(T.Underlying()) - for i := 0; i < T.NumMethods(); i++ { - addObj(T.Method(i)) + for method := range T.Methods() { + addObj(method) } } } @@ -101,28 +101,28 @@ func importMap(imports []*types.Package) map[string]*types.Package { addType(T.Params()) addType(T.Results()) if tparams := T.TypeParams(); tparams != nil { - for i := 0; i < tparams.Len(); i++ { - addType(tparams.At(i)) + for tparam := range tparams.TypeParams() { + addType(tparam) } } case *types.Struct: - for i := 0; i < T.NumFields(); i++ { - addObj(T.Field(i)) + for field := range T.Fields() { + addObj(field) } case *types.Tuple: - for i := 0; i < T.Len(); i++ { - addObj(T.At(i)) + for v := range T.Variables() { + addObj(v) } case *types.Interface: - for i := 0; i < T.NumMethods(); i++ { - addObj(T.Method(i)) + for method := range T.Methods() { + addObj(method) } - for i := 0; i < T.NumEmbeddeds(); i++ { - addType(T.EmbeddedType(i)) // walk Embedded for implicits + for etyp := range T.EmbeddedTypes() { + addType(etyp) // walk Embedded for implicits } case *types.Union: - for i := 0; i < T.Len(); i++ { - addType(T.Term(i).Type()) + for term := range T.Terms() { + addType(term.Type()) } case *types.TypeParam: if !typs[T] { diff --git a/src/cmd/vendor/golang.org/x/tools/internal/goplsexport/export.go b/src/cmd/vendor/golang.org/x/tools/internal/goplsexport/export.go index 2764a97fc7..bca4d8a0b0 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/goplsexport/export.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/goplsexport/export.go @@ -12,4 +12,5 @@ var ( ErrorsAsTypeModernizer *analysis.Analyzer // = modernize.errorsastypeAnalyzer StdIteratorsModernizer *analysis.Analyzer // = modernize.stditeratorsAnalyzer PlusBuildModernizer *analysis.Analyzer // = modernize.plusbuildAnalyzer + StringsCutModernizer *analysis.Analyzer // = modernize.stringscutAnalyzer ) diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go index 6df01d8ef9..9b96b1dbf1 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/delete.go @@ -331,109 +331,192 @@ func DeleteDecl(tokFile *token.File, curDecl inspector.Cursor) []analysis.TextEd } } +// find leftmost Pos bigger than start and rightmost less than end +func filterPos(nds []*ast.Comment, start, end token.Pos) (token.Pos, token.Pos, bool) { + l, r := end, token.NoPos + ok := false + for _, n := range nds { + if n.Pos() > start && n.Pos() < l { + l = n.Pos() + ok = true + } + if n.End() <= end && n.End() > r { + r = n.End() + ok = true + } + } + return l, r, ok +} + // DeleteStmt returns the edits to remove the [ast.Stmt] identified by -// curStmt, if it is contained within a BlockStmt, CaseClause, -// CommClause, or is the STMT in switch STMT; ... {...}. It returns nil otherwise. -func DeleteStmt(tokFile *token.File, curStmt inspector.Cursor) []analysis.TextEdit { - stmt := curStmt.Node().(ast.Stmt) - // if the stmt is on a line by itself delete the whole line - // otherwise just delete the statement. +// curStmt if it recognizes the context. It returns nil otherwise. +// TODO(pjw, adonovan): it should not return nil, it should return an error +// +// DeleteStmt is called with just the AST so it has trouble deciding if +// a comment is associated with the statement to be deleted. For instance, +// +// for /*A*/ init()/*B*/;/*C/cond()/*D/;/*E*/post() /*F*/ { /*G*/} +// +// comment B and C are indistinguishable, as are D and E. That is, as the +// AST does not say where the semicolons are, B and C could go either +// with the init() or the cond(), so cannot be removed safely. The same +// is true for D, E, and the post(). (And there are other similar cases.) +// But the other comments can be removed as they are unambiguously +// associated with the statement being deleted. In particular, +// it removes whole lines like +// +// stmt // comment +func DeleteStmt(file *token.File, curStmt inspector.Cursor) []analysis.TextEdit { + // if the stmt is on a line by itself, or a range of lines, delete the whole thing + // including comments. Except for the heads of switches, type + // switches, and for-statements that's the usual case. Complexity occurs where + // there are multiple statements on the same line, and adjacent comments. + + // In that case we remove some adjacent comments: + // In me()/*A*/;b(), comment A cannot be removed, because the ast + // is indistinguishable from me();/*A*/b() + // and the same for cases like switch me()/*A*/; x.(type) { - // this logic would be a lot simpler with the file contents, and somewhat simpler - // if the cursors included the comments. + // this would be more precise with the file contents, or if the ast + // contained the location of semicolons + var ( + stmt = curStmt.Node().(ast.Stmt) + tokFile = file + lineOf = tokFile.Line + stmtStartLine = lineOf(stmt.Pos()) + stmtEndLine = lineOf(stmt.End()) - lineOf := tokFile.Line - stmtStartLine, stmtEndLine := lineOf(stmt.Pos()), lineOf(stmt.End()) + leftSyntax, rightSyntax token.Pos // pieces of parent node on stmt{Start,End}Line + leftComments, rightComments []*ast.Comment // comments before/after stmt on the same line + ) - var from, to token.Pos - // bounds of adjacent syntax/comments on same line, if any - limits := func(left, right token.Pos) { + // remember the Pos that are on the same line as stmt + use := func(left, right token.Pos) { if lineOf(left) == stmtStartLine { - from = left + leftSyntax = left } if lineOf(right) == stmtEndLine { - to = right + rightSyntax = right } } - // TODO(pjw): there are other places a statement might be removed: - // IfStmt = "if" [ SimpleStmt ";" ] Expression Block [ "else" ( IfStmt | Block ) ] . - // (removing the blocks requires more rewriting than this routine would do) - // CommCase = "case" ( SendStmt | RecvStmt ) | "default" . - // (removing the stmt requires more rewriting, and it's unclear what the user means) - switch parent := curStmt.Parent().Node().(type) { - case *ast.SwitchStmt: - limits(parent.Switch, parent.Body.Lbrace) - case *ast.TypeSwitchStmt: - limits(parent.Switch, parent.Body.Lbrace) - if parent.Assign == stmt { - return nil // don't let the user break the type switch + + // find the comments, if any, on the same line +Big: + for _, cg := range astutil.EnclosingFile(curStmt).Comments { + for _, co := range cg.List { + if lineOf(co.End()) < stmtStartLine { + continue + } else if lineOf(co.Pos()) > stmtEndLine { + break Big // no more are possible + } + if lineOf(co.End()) == stmtStartLine && co.End() <= stmt.Pos() { + // comment is before the statement + leftComments = append(leftComments, co) + } else if lineOf(co.Pos()) == stmtEndLine && co.Pos() >= stmt.End() { + // comment is after the statement + rightComments = append(rightComments, co) + } } + } + + // find any other syntax on the same line + var ( + leftStmt, rightStmt token.Pos // end/start positions of sibling statements in a []Stmt list + inStmtList = false + curParent = curStmt.Parent() + ) + switch parent := curParent.Node().(type) { case *ast.BlockStmt: - limits(parent.Lbrace, parent.Rbrace) + use(parent.Lbrace, parent.Rbrace) + inStmtList = true + case *ast.CaseClause: + use(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + inStmtList = true case *ast.CommClause: - limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) if parent.Comm == stmt { return nil // maybe the user meant to remove the entire CommClause? } - case *ast.CaseClause: - limits(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + use(parent.Colon, curStmt.Parent().Parent().Node().(*ast.BlockStmt).Rbrace) + inStmtList = true case *ast.ForStmt: - limits(parent.For, parent.Body.Lbrace) - + use(parent.For, parent.Body.Lbrace) + // special handling, as init;cond;post BlockStmt is not a statment list + if parent.Init != nil && parent.Cond != nil && stmt == parent.Init && lineOf(parent.Cond.Pos()) == lineOf(stmt.End()) { + rightStmt = parent.Cond.Pos() + } else if parent.Post != nil && parent.Cond != nil && stmt == parent.Post && lineOf(parent.Cond.End()) == lineOf(stmt.Pos()) { + leftStmt = parent.Cond.End() + } + case *ast.IfStmt: + switch stmt { + case parent.Init: + use(parent.If, parent.Body.Lbrace) + case parent.Else: + // stmt is the {...} in "if cond {} else {...}" and removing + // it would require removing the 'else' keyword, but the ast + // does not contain its position. + return nil + } + case *ast.SwitchStmt: + use(parent.Switch, parent.Body.Lbrace) + case *ast.TypeSwitchStmt: + if stmt == parent.Assign { + return nil // don't remove .(type) + } + use(parent.Switch, parent.Body.Lbrace) default: return nil // not one of ours } - if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine { - from = prev.Node().End() // preceding statement ends on same line - } - if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine { - to = next.Node().Pos() // following statement begins on same line - } - // and now for the comments -Outer: - for _, cg := range astutil.EnclosingFile(curStmt).Comments { - for _, co := range cg.List { - if lineOf(co.End()) < stmtStartLine { - continue - } else if lineOf(co.Pos()) > stmtEndLine { - break Outer // no more are possible - } - if lineOf(co.End()) == stmtStartLine && co.End() < stmt.Pos() { - if !from.IsValid() || co.End() > from { - from = co.End() - continue // maybe there are more - } - } - if lineOf(co.Pos()) == stmtEndLine && co.Pos() > stmt.End() { - if !to.IsValid() || co.Pos() < to { - to = co.Pos() - continue // maybe there are more - } + if inStmtList { + // find the siblings, if any, on the same line + if prev, found := curStmt.PrevSibling(); found && lineOf(prev.Node().End()) == stmtStartLine { + if _, ok := prev.Node().(ast.Stmt); ok { + leftStmt = prev.Node().End() // preceding statement ends on same line } } + if next, found := curStmt.NextSibling(); found && lineOf(next.Node().Pos()) == stmtEndLine { + rightStmt = next.Node().Pos() // following statement begins on same line + } } - // if either from or to is valid, just remove the statement - // otherwise remove the line - edit := analysis.TextEdit{Pos: stmt.Pos(), End: stmt.End()} - if from.IsValid() || to.IsValid() { - // remove just the statement. - // we can't tell if there is a ; or whitespace right after the statement - // ideally we'd like to remove the former and leave the latter - // (if gofmt has run, there likely won't be a ;) - // In type switches we know there's a semicolon somewhere after the statement, - // but the extra work for this special case is not worth it, as gofmt will fix it. - return []analysis.TextEdit{edit} - } - // remove the whole line - for lineOf(edit.Pos) == stmtStartLine { - edit.Pos-- + + // compute the left and right limits of the edit + var leftEdit, rightEdit token.Pos + if leftStmt.IsValid() { + leftEdit = stmt.Pos() // can't remove preceding comments: a()/*A*/; me() + } else if leftSyntax.IsValid() { + // remove intervening leftComments + if a, _, ok := filterPos(leftComments, leftSyntax, stmt.Pos()); ok { + leftEdit = a + } else { + leftEdit = stmt.Pos() + } + } else { // remove whole line + for leftEdit = stmt.Pos(); lineOf(leftEdit) == stmtStartLine; leftEdit-- { + } + if leftEdit < stmt.Pos() { + leftEdit++ // beginning of line + } } - edit.Pos++ // get back tostmtStartLine - for lineOf(edit.End) == stmtEndLine { - edit.End++ + if rightStmt.IsValid() { + rightEdit = stmt.End() // can't remove following comments + } else if rightSyntax.IsValid() { + // remove intervening rightComments + if _, b, ok := filterPos(rightComments, stmt.End(), rightSyntax); ok { + rightEdit = b + } else { + rightEdit = stmt.End() + } + } else { // remove whole line + fend := token.Pos(file.Base()) + token.Pos(file.Size()) + for rightEdit = stmt.End(); fend >= rightEdit && lineOf(rightEdit) == stmtEndLine; rightEdit++ { + } + // don't remove \n if there was other stuff earlier + if leftSyntax.IsValid() || leftStmt.IsValid() { + rightEdit-- + } } - return []analysis.TextEdit{edit} + + return []analysis.TextEdit{{Pos: leftEdit, End: rightEdit}} } // DeleteUnusedVars computes the edits required to delete the diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/callee.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/callee.go index b46340c66a..ce5beb2724 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/callee.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/callee.go @@ -36,6 +36,7 @@ type gobCallee struct { // results of type analysis (does not reach go/types data structures) PkgPath string // package path of declaring package Name string // user-friendly name for error messages + GoVersion string // version of Go effective in callee file Unexported []string // names of free objects that are unexported FreeRefs []freeRef // locations of references to free objects FreeObjs []object // descriptions of free objects @@ -114,6 +115,24 @@ func AnalyzeCallee(logf func(string, ...any), fset *token.FileSet, pkg *types.Pa return nil, fmt.Errorf("cannot inline function %s as it has no body", name) } + // Record the file's Go goVersion so that we don't + // inline newer code into file using an older dialect. + // + // Using the file version is overly conservative. + // A more precise solution would be for the type checker to + // record which language features the callee actually needs; + // see https://go.dev/issue/75726. + // + // We don't have the ast.File handy, so instead of a + // lookup we must scan the entire FileVersions map. + var goVersion string + for file, v := range info.FileVersions { + if file.Pos() < decl.Pos() && decl.Pos() < file.End() { + goVersion = v + break + } + } + // Record the location of all free references in the FuncDecl. // (Parameters are not free by this definition.) var ( @@ -342,6 +361,7 @@ func AnalyzeCallee(logf func(string, ...any), fset *token.FileSet, pkg *types.Pa Content: content, PkgPath: pkg.Path(), Name: name, + GoVersion: goVersion, Unexported: unexported, FreeObjs: freeObjs, FreeRefs: freeRefs, @@ -421,11 +441,11 @@ func analyzeParams(logf func(string, ...any), fset *token.FileSet, info *types.I if sig.Recv() != nil { params = append(params, newParamInfo(sig.Recv(), false)) } - for i := 0; i < sig.Params().Len(); i++ { - params = append(params, newParamInfo(sig.Params().At(i), false)) + for v := range sig.Params().Variables() { + params = append(params, newParamInfo(v, false)) } - for i := 0; i < sig.Results().Len(); i++ { - results = append(results, newParamInfo(sig.Results().At(i), true)) + for v := range sig.Results().Variables() { + results = append(results, newParamInfo(v, true)) } } @@ -497,8 +517,8 @@ func analyzeTypeParams(_ logger, fset *token.FileSet, info *types.Info, decl *as paramInfos := make(map[*types.TypeName]*paramInfo) var params []*paramInfo collect := func(tpl *types.TypeParamList) { - for i := range tpl.Len() { - typeName := tpl.At(i).Obj() + for tparam := range tpl.TypeParams() { + typeName := tparam.Obj() info := ¶mInfo{Name: typeName.Name()} params = append(params, info) paramInfos[typeName] = info @@ -639,8 +659,7 @@ func analyzeAssignment(info *types.Info, stack []ast.Node) (assignable, ifaceAss return true, types.IsInterface(under.Elem()), false case *types.Struct: // Struct{k: expr} if id, _ := kv.Key.(*ast.Ident); id != nil { - for fi := range under.NumFields() { - field := under.Field(fi) + for field := range under.Fields() { if info.Uses[id] == field { return true, types.IsInterface(field.Type()), false } diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/inline.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/inline.go index 2443504da7..03ef0714e0 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/inline.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/inline.go @@ -24,9 +24,11 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/types/typeutil" internalastutil "golang.org/x/tools/internal/astutil" + "golang.org/x/tools/internal/astutil/free" "golang.org/x/tools/internal/packagepath" "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // A Caller describes the function call and its enclosing context. @@ -424,12 +426,35 @@ func newImportState(logf func(string, ...any), caller *Caller, callee *gobCallee // For simplicity we ignore existing dot imports, so that a qualified // identifier (QI) in the callee is always represented by a QI in the caller, // allowing us to treat a QI like a selection on a package name. - is := &importState{ + ist := &importState{ logf: logf, caller: caller, importMap: make(map[string][]string), } + // Build an index of used-once PkgNames. + type pkgNameUse struct { + count int + id *ast.Ident // an arbitrary use + } + pkgNameUses := make(map[*types.PkgName]pkgNameUse) + for id, obj := range caller.Info.Uses { + if pkgname, ok := obj.(*types.PkgName); ok { + u := pkgNameUses[pkgname] + u.id = id + u.count++ + pkgNameUses[pkgname] = u + } + } + // soleUse returns the ident that refers to pkgname, if there is exactly one. + soleUse := func(pkgname *types.PkgName) *ast.Ident { + u := pkgNameUses[pkgname] + if u.count == 1 { + return u.id + } + return nil + } + for _, imp := range caller.File.Imports { if pkgName, ok := importedPkgName(caller.Info, imp); ok && pkgName.Name() != "." && @@ -448,7 +473,7 @@ func newImportState(logf func(string, ...any), caller *Caller, callee *gobCallee // need this import. Doing so eagerly simplifies the resulting logic. needed := true sel, ok := ast.Unparen(caller.Call.Fun).(*ast.SelectorExpr) - if ok && soleUse(caller.Info, pkgName) == sel.X { + if ok && soleUse(pkgName) == sel.X { needed = false // no longer needed by caller // Check to see if any of the inlined free objects need this package. for _, obj := range callee.FreeObjs { @@ -463,13 +488,13 @@ func newImportState(logf func(string, ...any), caller *Caller, callee *gobCallee // return value holds these. if needed { path := pkgName.Imported().Path() - is.importMap[path] = append(is.importMap[path], pkgName.Name()) + ist.importMap[path] = append(ist.importMap[path], pkgName.Name()) } else { - is.oldImports = append(is.oldImports, oldImport{pkgName: pkgName, spec: imp}) + ist.oldImports = append(ist.oldImports, oldImport{pkgName: pkgName, spec: imp}) } } } - return is + return ist } // importName finds an existing import name to use in a particular shadowing @@ -575,9 +600,8 @@ func (i *importState) localName(pkgPath, pkgName string, shadow shadowMap) strin // Since they are not relevant to removing unused imports, we instruct // freeishNames to omit composite-literal keys that are identifiers. func trimNewImports(newImports []newImport, new ast.Node) []newImport { - free := map[string]bool{} const omitComplitIdents = false - freeishNames(free, new, omitComplitIdents) + free := free.Names(new, omitComplitIdents) var res []newImport for _, ni := range newImports { if free[ni.pkgName] { @@ -677,6 +701,15 @@ func (st *state) inlineCall() (*inlineCallResult, error) { callee.Name, callee.Unexported[0]) } + // Reject cross-file inlining if callee requires a newer dialect of Go (#75726). + // (Versions default to types.Config.GoVersion, which is unset in many tests, + // though should be populated by an analysis driver.) + callerGoVersion := caller.Info.FileVersions[caller.File] + if callerGoVersion != "" && callee.GoVersion != "" && versions.Before(callerGoVersion, callee.GoVersion) { + return nil, fmt.Errorf("cannot inline call to %s (declared using %s) into a file using %s", + callee.Name, callee.GoVersion, callerGoVersion) + } + // -- analyze callee's free references in caller context -- // Compute syntax path enclosing Call, innermost first (Path[0]=Call), @@ -2019,7 +2052,7 @@ func checkFalconConstraints(logf logger, params []*parameter, args []*argument, pkg.Scope().Insert(types.NewTypeName(token.NoPos, pkg, typ.Name, types.Typ[typ.Kind])) } - // Declared constants and variables for for parameters. + // Declared constants and variables for parameters. nconst := 0 for i, param := range params { name := param.info.Name @@ -2388,14 +2421,13 @@ func createBindingDecl(logf logger, caller *Caller, args []*argument, calleeDecl // (caller syntax), so we can use type info. // But Type is the untyped callee syntax, // so we have to use a syntax-only algorithm. - free := make(map[string]bool) + const includeComplitIdents = true + free := free.Names(spec.Type, includeComplitIdents) for _, value := range spec.Values { for name := range freeVars(caller.Info, value) { free[name] = true } } - const includeComplitIdents = true - freeishNames(free, spec.Type, includeComplitIdents) for name := range free { if names[name] { logf("binding decl would shadow free name %q", name) @@ -3456,7 +3488,7 @@ func (st *state) assignStmts(callerStmt *ast.AssignStmt, returnOperands []ast.Ex assert(callIdx == -1, "malformed (duplicative) AST") callIdx = i for j, returnOperand := range returnOperands { - freeishNames(freeNames, returnOperand, includeComplitIdents) + maps.Copy(freeNames, free.Names(returnOperand, includeComplitIdents)) rhs = append(rhs, returnOperand) if resultInfo[j]&nonTrivialResult != 0 { nonTrivial[i+j] = true @@ -3469,7 +3501,7 @@ func (st *state) assignStmts(callerStmt *ast.AssignStmt, returnOperands []ast.Ex // We must clone before clearing positions, since e came from the caller. expr = internalastutil.CloneNode(expr) clearPositions(expr) - freeishNames(freeNames, expr, includeComplitIdents) + maps.Copy(freeNames, free.Names(expr, includeComplitIdents)) rhs = append(rhs, expr) } } @@ -3727,18 +3759,4 @@ func hasNonTrivialReturn(returnInfo [][]returnOperandFlags) bool { return false } -// soleUse returns the ident that refers to obj, if there is exactly one. -func soleUse(info *types.Info, obj types.Object) (sole *ast.Ident) { - // This is not efficient, but it is called infrequently. - for id, obj2 := range info.Uses { - if obj2 == obj { - if sole != nil { - return nil // not unique - } - sole = id - } - } - return sole -} - type unit struct{} // for representing sets as maps diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/util.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/util.go index 205e5b6aad..5f895cce57 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/util.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/inline/util.go @@ -97,6 +97,7 @@ func checkInfoFields(info *types.Info) { assert(info.Selections != nil, "types.Info.Selections is nil") assert(info.Types != nil, "types.Info.Types is nil") assert(info.Uses != nil, "types.Info.Uses is nil") + assert(info.FileVersions != nil, "types.Info.FileVersions is nil") } // intersects reports whether the maps' key sets intersect. diff --git a/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go b/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go index 27b9750896..26bc079808 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/refactor/refactor.go @@ -17,6 +17,11 @@ import ( // FreshName returns the name of an identifier that is undefined // at the specified position, based on the preferred name. +// +// TODO(adonovan): refine this to choose a fresh name only when there +// would be a conflict with the existing declaration: it's fine to +// redeclare a name in a narrower scope so long as there are no free +// references to the outer name from within the narrower scope. func FreshName(scope *types.Scope, pos token.Pos, preferred string) string { newName := preferred for i := 0; ; i++ { diff --git a/src/cmd/vendor/golang.org/x/tools/internal/stdlib/deps.go b/src/cmd/vendor/golang.org/x/tools/internal/stdlib/deps.go index 96ad6c5821..581784da43 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/stdlib/deps.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -12,354 +12,508 @@ type pkginfo struct { } var deps = [...]pkginfo{ - {"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, - {"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, - {"bufio", "\x03k\x83\x01D\x14"}, - {"bytes", "n*Y\x03\fG\x02\x02"}, + {"archive/tar", "\x03n\x03E<\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, + {"archive/zip", "\x02\x04d\a\x03\x12\x021<\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, + {"bufio", "\x03n\x84\x01D\x14"}, + {"bytes", "q*Z\x03\fG\x02\x02"}, {"cmp", ""}, - {"compress/bzip2", "\x02\x02\xed\x01A"}, - {"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"}, - {"compress/gzip", "\x02\x04a\a\x03\x14lT"}, - {"compress/lzw", "\x02l\x03\x80\x01"}, - {"compress/zlib", "\x02\x04a\a\x03\x12\x01m"}, - {"container/heap", "\xb3\x02"}, + {"compress/bzip2", "\x02\x02\xf1\x01A"}, + {"compress/flate", "\x02o\x03\x81\x01\f\x033\x01\x03"}, + {"compress/gzip", "\x02\x04d\a\x03\x14mT"}, + {"compress/lzw", "\x02o\x03\x81\x01"}, + {"compress/zlib", "\x02\x04d\a\x03\x12\x01n"}, + {"container/heap", "\xb7\x02"}, {"container/list", ""}, {"container/ring", ""}, - {"context", "n\\m\x01\r"}, - {"crypto", "\x83\x01nC"}, - {"crypto/aes", "\x10\n\a\x93\x02"}, - {"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"}, - {"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"}, - {"crypto/dsa", "A\x04)\x83\x01\r"}, - {"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"}, - {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"}, - {"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"}, - {"crypto/elliptic", "0>\x83\x01\r9"}, - {"crypto/fips140", " \x05"}, - {"crypto/hkdf", "-\x13\x01-\x15"}, - {"crypto/hmac", "\x1a\x14\x12\x01\x111"}, - {"crypto/internal/boring", "\x0e\x02\rf"}, - {"crypto/internal/boring/bbig", "\x1a\xe4\x01M"}, - {"crypto/internal/boring/bcache", "\xb8\x02\x13"}, + {"context", "q[o\x01\r"}, + {"crypto", "\x86\x01oC"}, + {"crypto/aes", "\x10\n\t\x95\x02"}, + {"crypto/cipher", "\x03 \x01\x01\x1f\x11\x1c+Y"}, + {"crypto/des", "\x10\x15\x1f-+\x9c\x01\x03"}, + {"crypto/dsa", "D\x04)\x84\x01\r"}, + {"crypto/ecdh", "\x03\v\f\x10\x04\x16\x04\r\x1c\x84\x01"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\a\v\x06\x01\x04\f\x01\x1c\x84\x01\r\x05K\x01"}, + {"crypto/ed25519", "\x0e\x1e\x11\a\n\a\x1c\x84\x01C"}, + {"crypto/elliptic", "2?\x84\x01\r9"}, + {"crypto/fips140", "\"\x05"}, + {"crypto/hkdf", "/\x14\x01-\x15"}, + {"crypto/hmac", "\x1a\x16\x13\x01\x111"}, + {"crypto/internal/boring", "\x0e\x02\ri"}, + {"crypto/internal/boring/bbig", "\x1a\xe8\x01M"}, + {"crypto/internal/boring/bcache", "\xbc\x02\x13"}, {"crypto/internal/boring/sig", ""}, - {"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, - {"crypto/internal/entropy", "F"}, - {"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"}, - {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"}, - {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"}, - {"crypto/internal/fips140/alias", "\xcb\x02"}, - {"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"}, - {"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"}, - {"crypto/internal/fips140/check/checktest", "%\x85\x02!"}, - {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"}, - {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"}, - {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"}, - {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"}, - {"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"}, - {"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"}, - {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"}, - {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"}, - {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"}, - {"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"}, - {"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"}, - {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"}, - {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"}, - {"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"}, - {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"}, - {"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"}, - {"crypto/internal/fips140/ssh", "%^"}, - {"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"}, - {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"}, - {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"}, - {"crypto/internal/fips140cache", "\xaa\x02\r&"}, + {"crypto/internal/constanttime", ""}, + {"crypto/internal/cryptotest", "\x03\r\n\b%\x0e\x19\x06\x12\x12 \x04\x06\t\x18\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, + {"crypto/internal/entropy", "I"}, + {"crypto/internal/entropy/v1.0.0", "B/\x93\x018\x13"}, + {"crypto/internal/fips140", "A0\xbd\x01\v\x16"}, + {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x13\x05\x01\x01\x06*\x93\x014"}, + {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x11\x05\x01\a*\x90\x01"}, + {"crypto/internal/fips140/alias", "\xcf\x02"}, + {"crypto/internal/fips140/bigmod", "'\x18\x01\a*\x93\x01"}, + {"crypto/internal/fips140/check", "\"\x0e\x06\t\x02\xb4\x01Z"}, + {"crypto/internal/fips140/check/checktest", "'\x87\x02!"}, + {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x13\x05\t\x01(\x84\x01\x0f7\x01"}, + {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\t\r2\x84\x01\x0f7"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x02\x069\x15oF"}, + {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\v9\xc7\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x112\x93\x017"}, + {"crypto/internal/fips140/edwards25519/field", "'\x13\x052\x93\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\x06;\x15"}, + {"crypto/internal/fips140/hmac", "\x03\x1f\x14\x01\x019\x15"}, + {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0e\x03\x052\xca\x01"}, + {"crypto/internal/fips140/nistec", "\x1e\t\f\f2\x93\x01*\r\x14"}, + {"crypto/internal/fips140/nistec/fiat", "'\x137\x93\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\x06;\x15"}, + {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\r\x01\x01\x027\x15oF"}, + {"crypto/internal/fips140/sha256", "\x03\x1f\x1d\x01\a*\x15~"}, + {"crypto/internal/fips140/sha3", "\x03\x1f\x18\x05\x011\x93\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1f\x1d\x01\a*\x15~"}, + {"crypto/internal/fips140/ssh", "'_"}, + {"crypto/internal/fips140/subtle", "\x1e\a\x1a\xc5\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\x06\x029\x15"}, + {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\a\t2\x15"}, + {"crypto/internal/fips140cache", "\xae\x02\r&"}, {"crypto/internal/fips140deps", ""}, - {"crypto/internal/fips140deps/byteorder", "\x99\x01"}, - {"crypto/internal/fips140deps/cpu", "\xae\x01\a"}, - {"crypto/internal/fips140deps/godebug", "\xb6\x01"}, - {"crypto/internal/fips140hash", "5\x1b3\xc8\x01"}, - {"crypto/internal/fips140only", "'\r\x01\x01M3;"}, + {"crypto/internal/fips140deps/byteorder", "\x9c\x01"}, + {"crypto/internal/fips140deps/cpu", "\xb1\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xb9\x01"}, + {"crypto/internal/fips140deps/time", "\xc9\x02"}, + {"crypto/internal/fips140hash", "7\x1c3\xc9\x01"}, + {"crypto/internal/fips140only", ")\r\x01\x01N3<"}, {"crypto/internal/fips140test", ""}, - {"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"}, - {"crypto/internal/impl", "\xb5\x02"}, - {"crypto/internal/randutil", "\xf1\x01\x12"}, - {"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"}, - {"crypto/internal/sysrand/internal/seccomp", "n"}, - {"crypto/md5", "\x0e3-\x15\x16g"}, - {"crypto/mlkem", "/"}, - {"crypto/pbkdf2", "2\x0e\x01-\x15"}, - {"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"}, - {"crypto/rc4", "#\x1e-\xc6\x01"}, - {"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"}, - {"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"}, - {"crypto/sha256", "\x0e\f\x1aO"}, - {"crypto/sha3", "\x0e'N\xc8\x01"}, - {"crypto/sha512", "\x0e\f\x1cM"}, - {"crypto/subtle", "8\x9b\x01W"}, - {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, - {"crypto/tls/internal/fips140tls", "\x17\xa1\x02"}, - {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, - {"crypto/x509/pkix", "d\x06\a\x8d\x01G"}, - {"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, - {"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"}, - {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"}, - {"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"}, - {"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"}, - {"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"}, - {"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"}, - {"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"}, - {"debug/plan9obj", "g\a\x03e\x1b,"}, - {"embed", "n*@\x19\x01S"}, + {"crypto/internal/hpke", "\x0e\x01\x01\x03\x056#+hM"}, + {"crypto/internal/impl", "\xb9\x02"}, + {"crypto/internal/randutil", "\xf5\x01\x12"}, + {"crypto/internal/sysrand", "qo! \r\r\x01\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "q"}, + {"crypto/md5", "\x0e6-\x15\x16h"}, + {"crypto/mlkem", "1"}, + {"crypto/pbkdf2", "4\x0f\x01-\x15"}, + {"crypto/rand", "\x1a\b\a\x1b\x04\x01(\x84\x01\rM"}, + {"crypto/rc4", "%\x1f-\xc7\x01"}, + {"crypto/rsa", "\x0e\f\x01\v\x0f\x0e\x01\x04\x06\a\x1c\x03\x123<\f\x01"}, + {"crypto/sha1", "\x0e\f*\x03*\x15\x16\x15S"}, + {"crypto/sha256", "\x0e\f\x1cP"}, + {"crypto/sha3", "\x0e)O\xc9\x01"}, + {"crypto/sha512", "\x0e\f\x1eN"}, + {"crypto/subtle", "\x1e\x1c\x9c\x01X"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\r\n\x01\n\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b<\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, + {"crypto/tls/internal/fips140tls", "\x17\xa5\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x015\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x039\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/pkix", "g\x06\a\x8e\x01G"}, + {"database/sql", "\x03\nN\x16\x03\x81\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, + {"database/sql/driver", "\rd\x03\xb5\x01\x0f\x11"}, + {"debug/buildinfo", "\x03[\x02\x01\x01\b\a\x03e\x1a\x02\x01+\x0f\x1f"}, + {"debug/dwarf", "\x03g\a\x03\x81\x011\x11\x01\x01"}, + {"debug/elf", "\x03\x06T\r\a\x03e\x1b\x01\f \x17\x01\x16"}, + {"debug/gosym", "\x03g\n\xc3\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06T\r\ne\x1c,\x17\x01"}, + {"debug/pe", "\x03\x06T\r\a\x03e\x1c,\x17\x01\x16"}, + {"debug/plan9obj", "j\a\x03e\x1c,"}, + {"embed", "q*A\x19\x01S"}, {"embed/internal/embedtest", ""}, {"encoding", ""}, - {"encoding/ascii85", "\xf1\x01C"}, - {"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"}, - {"encoding/base32", "\xf1\x01A\x02"}, - {"encoding/base64", "\x99\x01XA\x02"}, - {"encoding/binary", "n\x83\x01\f(\r\x05"}, - {"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"}, - {"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, - {"encoding/hex", "n\x03\x80\x01A\x03"}, - {"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, - {"encoding/pem", "\x03c\b\x83\x01A\x03"}, - {"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"}, - {"errors", "\xca\x01\x81\x01"}, - {"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"}, - {"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"}, - {"fmt", "nE>\f \b\r\x02\x03\x12"}, - {"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"}, - {"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, - {"go/build/constraint", "n\xc6\x01\x01\x12\x02"}, - {"go/constant", "q\x0f}\x01\x024\x01\x02\x12"}, - {"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"}, - {"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"}, - {"go/format", "\x03n\x01\v\x01\x02qD"}, - {"go/importer", "s\a\x01\x01\x04\x01p9"}, - {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"}, - {"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"}, - {"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"}, - {"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"}, - {"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"}, - {"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"}, - {"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"}, - {"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, - {"go/version", "\xbb\x01z"}, - {"hash", "\xf1\x01"}, - {"hash/adler32", "n\x15\x16"}, - {"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"}, - {"hash/crc64", "n\x15\x16\x9e\x01"}, - {"hash/fnv", "n\x15\x16g"}, - {"hash/maphash", "\x83\x01\x11!\x03\x93\x01"}, - {"html", "\xb5\x02\x02\x12"}, - {"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, - {"image", "\x02l\x1ee\x0f4\x03\x01"}, + {"encoding/ascii85", "\xf5\x01C"}, + {"encoding/asn1", "\x03n\x03e(\x01'\r\x02\x01\x10\x03\x01"}, + {"encoding/base32", "\xf5\x01A\x02"}, + {"encoding/base64", "\x9c\x01YA\x02"}, + {"encoding/binary", "q\x84\x01\f(\r\x05"}, + {"encoding/csv", "\x02\x01n\x03\x81\x01D\x12\x02"}, + {"encoding/gob", "\x02c\x05\a\x03e\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, + {"encoding/hex", "q\x03\x81\x01A\x03"}, + {"encoding/json", "\x03\x01a\x04\b\x03\x81\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, + {"encoding/pem", "\x03f\b\x84\x01A\x03"}, + {"encoding/xml", "\x02\x01b\f\x03\x81\x014\x05\n\x01\x02\x10\x02"}, + {"errors", "\xcc\x01\x83\x01"}, + {"expvar", "nK@\b\v\x15\r\b\x02\x03\x01\x11"}, + {"flag", "e\f\x03\x81\x01,\b\x05\b\x02\x01\x10"}, + {"fmt", "qE&\x19\f \b\r\x02\x03\x12"}, + {"go/ast", "\x03\x01p\x0e\x01r\x03)\b\r\x02\x01\x12\x02"}, + {"go/build", "\x02\x01n\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\b\x1b\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, + {"go/build/constraint", "q\xc7\x01\x01\x12\x02"}, + {"go/constant", "t\x0f~\x01\x024\x01\x02\x12"}, + {"go/doc", "\x04p\x01\x05\t=51\x10\x02\x01\x12\x02"}, + {"go/doc/comment", "\x03q\xc2\x01\x01\x01\x01\x12\x02"}, + {"go/format", "\x03q\x01\v\x01\x02rD"}, + {"go/importer", "v\a\x01\x01\x04\x01q9"}, + {"go/internal/gccgoimporter", "\x02\x01[\x13\x03\x04\v\x01o\x02,\x01\x05\x11\x01\f\b"}, + {"go/internal/gcimporter", "\x02r\x0f\x010\x05\r/,\x15\x03\x02"}, + {"go/internal/srcimporter", "t\x01\x01\n\x03\x01q,\x01\x05\x12\x02\x14"}, + {"go/parser", "\x03n\x03\x01\x02\v\x01r\x01+\x06\x12"}, + {"go/printer", "t\x01\x02\x03\tr\f \x15\x02\x01\x02\v\x05\x02"}, + {"go/scanner", "\x03q\x0fr2\x10\x01\x13\x02"}, + {"go/token", "\x04p\x84\x01>\x02\x03\x01\x0f\x02"}, + {"go/types", "\x03\x01\x06g\x03\x01\x03\b\x03\x024\x062\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, + {"go/version", "\xbe\x01{"}, + {"hash", "\xf5\x01"}, + {"hash/adler32", "q\x15\x16"}, + {"hash/crc32", "q\x15\x16\x15\x8a\x01\x01\x13"}, + {"hash/crc64", "q\x15\x16\x9f\x01"}, + {"hash/fnv", "q\x15\x16h"}, + {"hash/maphash", "\x86\x01\x11<|"}, + {"html", "\xb9\x02\x02\x12"}, + {"html/template", "\x03k\x06\x18-<\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, + {"image", "\x02o\x1ef\x0f4\x03\x01"}, {"image/color", ""}, - {"image/color/palette", "\x8c\x01"}, - {"image/draw", "\x8b\x01\x01\x04"}, - {"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"}, - {"image/internal/imageutil", "\x8b\x01"}, - {"image/jpeg", "\x02l\x1d\x01\x04a"}, - {"image/png", "\x02\a^\n\x12\x02\x06\x01eC"}, - {"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"}, - {"internal/abi", "\xb5\x01\x96\x01"}, - {"internal/asan", "\xcb\x02"}, - {"internal/bisect", "\xaa\x02\r\x01"}, - {"internal/buildcfg", "qGe\x06\x02\x05\n\x01"}, - {"internal/bytealg", "\xae\x01\x9d\x01"}, + {"image/color/palette", "\x8f\x01"}, + {"image/draw", "\x8e\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05i\x03\x1a\x01\x01\x01\vY"}, + {"image/internal/imageutil", "\x8e\x01"}, + {"image/jpeg", "\x02o\x1d\x01\x04b"}, + {"image/png", "\x02\aa\n\x12\x02\x06\x01fC"}, + {"index/suffixarray", "\x03g\a\x84\x01\f+\n\x01"}, + {"internal/abi", "\xb8\x01\x97\x01"}, + {"internal/asan", "\xcf\x02"}, + {"internal/bisect", "\xae\x02\r\x01"}, + {"internal/buildcfg", "tGf\x06\x02\x05\n\x01"}, + {"internal/bytealg", "\xb1\x01\x9e\x01"}, {"internal/byteorder", ""}, {"internal/cfg", ""}, - {"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"}, - {"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"}, + {"internal/cgrouptest", "tZS\x06\x0f\x02\x01\x04\x01"}, + {"internal/chacha8rand", "\x9c\x01\x15\a\x97\x01"}, {"internal/copyright", ""}, {"internal/coverage", ""}, {"internal/coverage/calloc", ""}, - {"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"}, - {"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"}, - {"internal/coverage/cmerge", "q-_"}, - {"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"}, - {"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"}, - {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"}, - {"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."}, - {"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"}, - {"internal/coverage/rtcov", "\xcb\x02"}, - {"internal/coverage/slicereader", "g\n\x80\x01Z"}, - {"internal/coverage/slicewriter", "q\x80\x01"}, - {"internal/coverage/stringtab", "q8\x04D"}, + {"internal/coverage/cfile", "n\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02&,\x06\a\n\x01\x03\r\x06"}, + {"internal/coverage/cformat", "\x04p-\x04P\v6\x01\x02\r"}, + {"internal/coverage/cmerge", "t-`"}, + {"internal/coverage/decodecounter", "j\n-\v\x02G,\x17\x17"}, + {"internal/coverage/decodemeta", "\x02h\n\x16\x17\v\x02G,"}, + {"internal/coverage/encodecounter", "\x02h\n-\f\x01\x02E\v!\x15"}, + {"internal/coverage/encodemeta", "\x02\x01g\n\x12\x04\x17\r\x02E,."}, + {"internal/coverage/pods", "\x04p-\x80\x01\x06\x05\n\x02\x01"}, + {"internal/coverage/rtcov", "\xcf\x02"}, + {"internal/coverage/slicereader", "j\n\x81\x01Z"}, + {"internal/coverage/slicewriter", "t\x81\x01"}, + {"internal/coverage/stringtab", "t8\x04E"}, {"internal/coverage/test", ""}, {"internal/coverage/uleb128", ""}, - {"internal/cpu", "\xcb\x02"}, - {"internal/dag", "\x04m\xc1\x01\x03"}, - {"internal/diff", "\x03n\xc2\x01\x02"}, - {"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"}, - {"internal/filepathlite", "n*@\x1a@"}, - {"internal/fmtsort", "\x04\xa1\x02\r"}, - {"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, + {"internal/cpu", "\xcf\x02"}, + {"internal/dag", "\x04p\xc2\x01\x03"}, + {"internal/diff", "\x03q\xc3\x01\x02"}, + {"internal/exportdata", "\x02\x01n\x03\x02c\x1c,\x01\x05\x11\x01\x02"}, + {"internal/filepathlite", "q*A\x1a@"}, + {"internal/fmtsort", "\x04\xa5\x02\r"}, + {"internal/fuzz", "\x03\nE\x18\x04\x03\x03\x01\v\x036<\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, {"internal/goarch", ""}, - {"internal/godebug", "\x96\x01!\x80\x01\x01\x13"}, + {"internal/godebug", "\x99\x01!\x81\x01\x01\x13"}, {"internal/godebugs", ""}, {"internal/goexperiment", ""}, {"internal/goos", ""}, - {"internal/goroot", "\x9d\x02\x01\x05\x12\x02"}, + {"internal/goroot", "\xa1\x02\x01\x05\x12\x02"}, {"internal/gover", "\x04"}, {"internal/goversion", ""}, - {"internal/itoa", ""}, - {"internal/lazyregexp", "\x9d\x02\v\r\x02"}, - {"internal/lazytemplate", "\xf1\x01,\x18\x02\f"}, - {"internal/msan", "\xcb\x02"}, + {"internal/lazyregexp", "\xa1\x02\v\r\x02"}, + {"internal/lazytemplate", "\xf5\x01,\x18\x02\f"}, + {"internal/msan", "\xcf\x02"}, {"internal/nettrace", ""}, - {"internal/obscuretestdata", "f\x8b\x01,"}, - {"internal/oserror", "n"}, - {"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"}, + {"internal/obscuretestdata", "i\x8c\x01,"}, + {"internal/oserror", "q"}, + {"internal/pkgbits", "\x03O\x18\a\x03\x04\vr\r\x1f\r\n\x01"}, {"internal/platform", ""}, - {"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"}, - {"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"}, + {"internal/poll", "qj\x05\x159\r\x01\x01\f\x06"}, + {"internal/profile", "\x03\x04j\x03\x81\x017\n\x01\x01\x01\x10"}, {"internal/profilerecord", ""}, - {"internal/race", "\x94\x01\xb7\x01"}, - {"internal/reflectlite", "\x94\x01!9<!"}, - {"internal/runtime/atomic", "\xb5\x01\x96\x01"}, - {"internal/runtime/cgroup", "\x98\x01:\x02w"}, - {"internal/runtime/exithook", "\xcb\x01\x80\x01"}, - {"internal/runtime/gc", "\xb5\x01"}, - {"internal/runtime/maps", "\x94\x01\x01 \v\t\a\x03x"}, - {"internal/runtime/math", "\xb5\x01"}, + {"internal/race", "\x97\x01\xb8\x01"}, + {"internal/reflectlite", "\x97\x01!:<!"}, + {"internal/runtime/atomic", "\xb8\x01\x97\x01"}, + {"internal/runtime/cgroup", "\x9b\x01<\x04t"}, + {"internal/runtime/exithook", "\xcd\x01\x82\x01"}, + {"internal/runtime/gc", "\xb8\x01"}, + {"internal/runtime/gc/internal/gen", "\n`\n\x17j\x04\v\x1d\b\x10\x02"}, + {"internal/runtime/gc/scan", "\xb1\x01\a\x18\x06y"}, + {"internal/runtime/maps", "\x97\x01\x01 \n\t\t\x02y"}, + {"internal/runtime/math", "\xb8\x01"}, {"internal/runtime/startlinetest", ""}, - {"internal/runtime/strconv", "\xd0\x01"}, - {"internal/runtime/sys", "\xb5\x01\x04"}, - {"internal/runtime/syscall", "\xb5\x01\x96\x01"}, + {"internal/runtime/sys", "\xb8\x01\x04"}, + {"internal/runtime/syscall/linux", "\xb8\x01\x97\x01"}, {"internal/runtime/wasitest", ""}, - {"internal/saferio", "\xf1\x01Z"}, - {"internal/singleflight", "\xb7\x02"}, - {"internal/stringslite", "\x98\x01\xb3\x01"}, - {"internal/sync", "\x94\x01!\x14o\x13"}, - {"internal/synctest", "\x94\x01\xb7\x01"}, - {"internal/syscall/execenv", "\xb9\x02"}, - {"internal/syscall/unix", "\xaa\x02\x0e\x01\x12"}, - {"internal/sysinfo", "\x02\x01\xab\x01C,\x18\x02"}, + {"internal/saferio", "\xf5\x01Z"}, + {"internal/singleflight", "\xbb\x02"}, + {"internal/strconv", "\x84\x02K"}, + {"internal/stringslite", "\x9b\x01\xb4\x01"}, + {"internal/sync", "\x97\x01!\x13q\x13"}, + {"internal/synctest", "\x97\x01\xb8\x01"}, + {"internal/syscall/execenv", "\xbd\x02"}, + {"internal/syscall/unix", "\xae\x02\x0e\x01\x12"}, + {"internal/sysinfo", "\x02\x01\xae\x01D,\x18\x02"}, {"internal/syslist", ""}, - {"internal/testenv", "\x03\na\x02\x01)\x1b\x10-+\x01\x05\a\n\x01\x02\x02\x01\v"}, - {"internal/testhash", "\x03\x80\x01n\x118\v"}, - {"internal/testlog", "\xb7\x02\x01\x13"}, - {"internal/testpty", "n\x03\xac\x01"}, - {"internal/trace", "\x02\x01\x01\x06]\a\x03t\x03\x03\x06\x03\t5\x01\x01\x01\x10\x06"}, - {"internal/trace/internal/testgen", "\x03d\nr\x03\x02\x03\x011\v\r\x10"}, - {"internal/trace/internal/tracev1", "\x03\x01c\a\x03z\x06\f5\x01"}, - {"internal/trace/raw", "\x02e\nw\x03\x06C\x01\x12"}, - {"internal/trace/testtrace", "\x02\x01k\x03r\x03\x05\x01\x057\n\x02\x01"}, + {"internal/testenv", "\x03\nd\x02\x01)\x1b\x0f/+\x01\x05\a\n\x01\x02\x02\x01\v"}, + {"internal/testhash", "\x03\x83\x01o\x118\v"}, + {"internal/testlog", "\xbb\x02\x01\x13"}, + {"internal/testpty", "q\x03\xad\x01"}, + {"internal/trace", "\x02\x01\x01\x06`\a\x03u\x03\x03\x06\x03\t5\x01\x01\x01\x10\x06"}, + {"internal/trace/internal/testgen", "\x03g\ns\x03\x02\x03\x011\v\r\x10"}, + {"internal/trace/internal/tracev1", "\x03\x01f\a\x03{\x06\f5\x01"}, + {"internal/trace/raw", "\x02h\nx\x03\x06C\x01\x12"}, + {"internal/trace/testtrace", "\x02\x01n\x03o\x04\x03\x05\x01\x05,\v\x02\b\x02\x01\x05"}, {"internal/trace/tracev2", ""}, - {"internal/trace/traceviewer", "\x02^\v\x06\x19=\x1c\a\a\x04\b\v\x15\x01\x05\a\n\x01\x02\x0e"}, + {"internal/trace/traceviewer", "\x02a\v\x06\x19<\x1e\a\a\x04\b\v\x15\x01\x05\a\n\x01\x02\x0e"}, {"internal/trace/traceviewer/format", ""}, - {"internal/trace/version", "qw\t"}, - {"internal/txtar", "\x03n\xac\x01\x18"}, - {"internal/types/errors", "\xb4\x02"}, - {"internal/unsafeheader", "\xcb\x02"}, - {"internal/xcoff", "Z\r\a\x03e\x1b,\x17\x01"}, - {"internal/zstd", "g\a\x03\x80\x01\x0f"}, - {"io", "n\xc9\x01"}, - {"io/fs", "n*+.1\x10\x13\x04"}, - {"io/ioutil", "\xf1\x01\x01+\x15\x03"}, - {"iter", "\xc9\x01a!"}, - {"log", "q\x80\x01\x05'\r\r\x01\r"}, + {"internal/trace/version", "tx\t"}, + {"internal/txtar", "\x03q\xad\x01\x18"}, + {"internal/types/errors", "\xb8\x02"}, + {"internal/unsafeheader", "\xcf\x02"}, + {"internal/xcoff", "]\r\a\x03e\x1c,\x17\x01"}, + {"internal/zstd", "j\a\x03\x81\x01\x0f"}, + {"io", "q\xca\x01"}, + {"io/fs", "q**01\x10\x13\x04"}, + {"io/ioutil", "\xf5\x01\x01+\x15\x03"}, + {"iter", "\xcb\x01c!"}, + {"log", "t\x81\x01\x05'\r\r\x01\r"}, {"log/internal", ""}, - {"log/slog", "\x03\nU\t\x03\x03\x80\x01\x04\x01\x02\x02\x03(\x05\b\x02\x01\x02\x01\r\x02\x02\x02"}, + {"log/slog", "\x03\nX\t\x03\x03\x81\x01\x04\x01\x02\x02\x03(\x05\b\x02\x01\x02\x01\r\x02\x02\x02"}, {"log/slog/internal", ""}, - {"log/slog/internal/benchmarks", "\ra\x03\x80\x01\x06\x03:\x11"}, - {"log/slog/internal/buffer", "\xb7\x02"}, - {"log/syslog", "n\x03\x84\x01\x12\x16\x18\x02\x0e"}, - {"maps", "\xf4\x01W"}, - {"math", "\xae\x01RK"}, - {"math/big", "\x03k\x03(\x15C\f\x03\x020\x02\x01\x02\x14"}, - {"math/big/internal/asmgen", "\x03\x01m\x8f\x012\x03"}, - {"math/bits", "\xcb\x02"}, - {"math/cmplx", "\xfd\x01\x03"}, - {"math/rand", "\xb6\x01G:\x01\x13"}, - {"math/rand/v2", "n+\x03a\x03K"}, - {"mime", "\x02\x01c\b\x03\x80\x01\v!\x15\x03\x02\x10\x02"}, - {"mime/multipart", "\x02\x01H#\x03E;\v\x01\a\x02\x15\x02\x06\x0f\x02\x01\x16"}, - {"mime/quotedprintable", "\x02\x01n\x80\x01"}, - {"net", "\x04\ta*\x1e\a\x04\x05\x11\x01\x04\x15\x01%\x06\r\b\x05\x01\x01\f\x06\a"}, - {"net/http", "\x02\x01\x04\x04\x02>\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"}, - {"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"}, - {"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"}, - {"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"}, - {"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"}, - {"net/http/httptrace", "\rFnF\x14\n "}, - {"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"}, - {"net/http/internal", "\x02\x01k\x03\x80\x01"}, - {"net/http/internal/ascii", "\xb5\x02\x12"}, - {"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"}, - {"net/http/internal/testcert", "\xb5\x02"}, - {"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"}, + {"log/slog/internal/benchmarks", "\rd\x03\x81\x01\x06\x03:\x11"}, + {"log/slog/internal/buffer", "\xbb\x02"}, + {"log/syslog", "q\x03\x85\x01\x12\x16\x18\x02\x0e"}, + {"maps", "\xf8\x01W"}, + {"math", "\xb1\x01SK"}, + {"math/big", "\x03n\x03(\x15D\f\x03\x020\x02\x01\x02\x14"}, + {"math/big/internal/asmgen", "\x03\x01p\x90\x012\x03"}, + {"math/bits", "\xcf\x02"}, + {"math/cmplx", "\x81\x02\x03"}, + {"math/rand", "\xb9\x01H:\x01\x13"}, + {"math/rand/v2", "q+\x03b\x03K"}, + {"mime", "\x02\x01f\b\x03\x81\x01\v!\x15\x03\x02\x10\x02"}, + {"mime/multipart", "\x02\x01K#\x03E<\v\x01\a\x02\x15\x02\x06\x0f\x02\x01\x16"}, + {"mime/quotedprintable", "\x02\x01q\x81\x01"}, + {"net", "\x04\td*\x1e\n\x05\x12\x01\x01\x04\x15\x01%\x06\r\b\x05\x01\x01\f\x06\a"}, + {"net/http", "\x02\x01\x03\x01\x04\x02A\b\x13\x01\a\x03E<\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"}, + {"net/http/cgi", "\x02T\x1b\x03\x81\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"}, + {"net/http/cookiejar", "\x04m\x03\x97\x01\x01\b\f\x16\x03\x02\x0e\x04"}, + {"net/http/fcgi", "\x02\x01\n]\a\x03\x81\x01\x16\x01\x01\x14\x18\x02\x0e"}, + {"net/http/httptest", "\x02\x01\nI\x02\x1b\x01\x81\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"}, + {"net/http/httptrace", "\rImH\x14\n "}, + {"net/http/httputil", "\x02\x01\nd\x03\x81\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"}, + {"net/http/internal", "\x02\x01n\x03\x81\x01"}, + {"net/http/internal/ascii", "\xb9\x02\x12"}, + {"net/http/internal/httpcommon", "\rd\x03\x9d\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"}, + {"net/http/internal/testcert", "\xb9\x02"}, + {"net/http/pprof", "\x02\x01\ng\x18-\x02\x0e,\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"}, {"net/internal/cgotest", ""}, - {"net/internal/socktest", "q\xc6\x01\x02"}, - {"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"}, - {"net/netip", "\x04j*\x01$@\x034\x16"}, - {"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"}, - {"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"}, - {"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"}, - {"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"}, - {"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"}, - {"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"}, - {"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"}, - {"os/exec/internal/fdtest", "\xb9\x02"}, - {"os/signal", "\r\x90\x02\x15\x05\x02"}, - {"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"}, - {"path", "n*\xb1\x01"}, - {"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"}, - {"plugin", "n"}, - {"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"}, + {"net/internal/socktest", "t\xc7\x01\x02"}, + {"net/mail", "\x02o\x03\x81\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"}, + {"net/netip", "\x04m*\x01e\x034\x16"}, + {"net/rpc", "\x02j\x05\x03\x0f\nh\x04\x12\x01\x1d\r\x03\x02"}, + {"net/rpc/jsonrpc", "n\x03\x03\x81\x01\x16\x11\x1f"}, + {"net/smtp", "\x192\v\x13\b\x03\x81\x01\x16\x14\x1a"}, + {"net/textproto", "\x02\x01n\x03\x81\x01\f\n-\x01\x02\x14"}, + {"net/url", "q\x03\xa7\x01\v\x10\x02\x01\x16"}, + {"os", "q*\x01\x19\x03\x10\x13\x01\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"}, + {"os/exec", "\x03\ndH&\x01\x15\x01+\x06\a\n\x01\x04\f"}, + {"os/exec/internal/fdtest", "\xbd\x02"}, + {"os/signal", "\r\x94\x02\x15\x05\x02"}, + {"os/user", "\x02\x01n\x03\x81\x01,\r\n\x01\x02"}, + {"path", "q*\xb2\x01"}, + {"path/filepath", "q*\x1aA+\r\b\x03\x04\x10"}, + {"plugin", "q"}, + {"reflect", "q&\x04\x1d\x13\b\x03\x05\x17\x06\t-\n\x03\x10\x02\x02"}, {"reflect/internal/example1", ""}, {"reflect/internal/example2", ""}, - {"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"}, - {"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"}, - {"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"}, - {"runtime/coverage", "\xa0\x01Q"}, - {"runtime/debug", "qUW\r\b\x02\x01\x10\x06"}, - {"runtime/metrics", "\xb7\x01F-!"}, - {"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"}, - {"runtime/race", "\xb0\x02"}, + {"regexp", "\x03\xf2\x018\t\x02\x01\x02\x10\x02"}, + {"regexp/syntax", "\xb6\x02\x01\x01\x01\x02\x10\x02"}, + {"runtime", "\x97\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0e\x03\x01\x01\x01\x02\x01\x01\x02\x01\x04\x01\x10c"}, + {"runtime/coverage", "\xa3\x01R"}, + {"runtime/debug", "tTY\r\b\x02\x01\x10\x06"}, + {"runtime/metrics", "\xba\x01G-!"}, + {"runtime/pprof", "\x02\x01\x01\x03\x06]\a\x03#$\x0f+\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"}, + {"runtime/race", "\xb4\x02"}, {"runtime/race/internal/amd64v1", ""}, - {"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"}, - {"slices", "\x04\xf0\x01\fK"}, - {"sort", "\xca\x0162"}, - {"strconv", "n*@%\x03I"}, - {"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"}, + {"runtime/trace", "\rd\x03x\t9\b\x05\x01\r\x06"}, + {"slices", "\x04\xf4\x01\fK"}, + {"sort", "\xcc\x0182"}, + {"strconv", "q*@\x01q"}, + {"strings", "q&\x04A\x19\x03\f7\x10\x02\x02"}, {"structs", ""}, - {"sync", "\xc9\x01\x10\x01P\x0e\x13"}, - {"sync/atomic", "\xcb\x02"}, - {"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"}, - {"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"}, - {"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"}, - {"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"}, - {"testing/iotest", "\x03k\x03\x80\x01\x04"}, - {"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"}, - {"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"}, - {"testing/synctest", "\xda\x01`\x11"}, - {"text/scanner", "\x03n\x80\x01,*\x02"}, - {"text/tabwriter", "q\x80\x01X"}, - {"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"}, - {"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"}, - {"time", "n*\x1e\"(*\r\x02\x12"}, - {"time/tzdata", "n\xcb\x01\x12"}, + {"sync", "\xcb\x01\x12\x01P\x0e\x13"}, + {"sync/atomic", "\xcf\x02"}, + {"syscall", "q'\x03\x01\x1c\n\x03\x06\f\x04S\b\x05\x01\x13"}, + {"testing", "\x03\nd\x02\x01W\x16\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x04"}, + {"testing/fstest", "q\x03\x81\x01\x01\n&\x10\x03\b\b"}, + {"testing/internal/testdeps", "\x02\v\xaa\x01.\x10,\x03\x05\x03\x06\a\x02\x0e"}, + {"testing/iotest", "\x03n\x03\x81\x01\x04"}, + {"testing/quick", "s\x01\x8d\x01\x05#\x10\x10"}, + {"testing/slogtest", "\rd\x03\x87\x01.\x05\x10\v"}, + {"testing/synctest", "\xde\x01`\x11"}, + {"text/scanner", "\x03q\x81\x01,*\x02"}, + {"text/tabwriter", "t\x81\x01X"}, + {"text/template", "q\x03B?\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"}, + {"text/template/parse", "\x03q\xba\x01\n\x01\x12\x02"}, + {"time", "q*\x1e#(*\r\x02\x12"}, + {"time/tzdata", "q\xcc\x01\x12"}, {"unicode", ""}, {"unicode/utf16", ""}, {"unicode/utf8", ""}, - {"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"}, + {"unique", "\x97\x01!$\x01Q\r\x01\x13\x12"}, {"unsafe", ""}, - {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"}, - {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"}, - {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"}, + {"vendor/golang.org/x/crypto/chacha20", "\x10Z\a\x93\x01*&"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10Z\a\xdf\x01\x04\x01\a"}, + {"vendor/golang.org/x/crypto/cryptobyte", "g\n\x03\x8e\x01' \n"}, {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, - {"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"}, - {"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"}, - {"vendor/golang.org/x/net/dns/dnsmessage", "n"}, - {"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"}, - {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"}, - {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"}, - {"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"}, - {"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, - {"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"}, - {"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"}, - {"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"}, - {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"}, - {"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"}, - {"weak", "\x94\x01\x96\x01!"}, + {"vendor/golang.org/x/crypto/internal/alias", "\xcf\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "U\x15\x9a\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "q"}, + {"vendor/golang.org/x/net/http/httpguts", "\x8b\x02\x14\x1a\x14\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "q\x03\x97\x01\x10\x05\x01\x18\x14\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03n\x03\x81\x01F"}, + {"vendor/golang.org/x/net/idna", "t\x8d\x018\x14\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03g\a\x03\x81\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, + {"vendor/golang.org/x/sys/cpu", "\xa1\x02\r\n\x01\x16"}, + {"vendor/golang.org/x/text/secure/bidirule", "q\xdc\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03n\x84\x01X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bi\x85\x01>\x16"}, + {"vendor/golang.org/x/text/unicode/norm", "j\n\x81\x01F\x12\x11"}, + {"weak", "\x97\x01\x97\x01!"}, } + +// bootstrap is the list of bootstrap packages extracted from cmd/dist. +var bootstrap = map[string]bool{ + "cmp": true, + "cmd/asm": true, + "cmd/asm/internal/arch": true, + "cmd/asm/internal/asm": true, + "cmd/asm/internal/flags": true, + "cmd/asm/internal/lex": true, + "cmd/cgo": true, + "cmd/compile": true, + "cmd/compile/internal/abi": true, + "cmd/compile/internal/abt": true, + "cmd/compile/internal/amd64": true, + "cmd/compile/internal/arm": true, + "cmd/compile/internal/arm64": true, + "cmd/compile/internal/base": true, + "cmd/compile/internal/bitvec": true, + "cmd/compile/internal/compare": true, + "cmd/compile/internal/coverage": true, + "cmd/compile/internal/deadlocals": true, + "cmd/compile/internal/devirtualize": true, + "cmd/compile/internal/dwarfgen": true, + "cmd/compile/internal/escape": true, + "cmd/compile/internal/gc": true, + "cmd/compile/internal/importer": true, + "cmd/compile/internal/inline": true, + "cmd/compile/internal/inline/inlheur": true, + "cmd/compile/internal/inline/interleaved": true, + "cmd/compile/internal/ir": true, + "cmd/compile/internal/liveness": true, + "cmd/compile/internal/logopt": true, + "cmd/compile/internal/loong64": true, + "cmd/compile/internal/loopvar": true, + "cmd/compile/internal/mips": true, + "cmd/compile/internal/mips64": true, + "cmd/compile/internal/noder": true, + "cmd/compile/internal/objw": true, + "cmd/compile/internal/pgoir": true, + "cmd/compile/internal/pkginit": true, + "cmd/compile/internal/ppc64": true, + "cmd/compile/internal/rangefunc": true, + "cmd/compile/internal/reflectdata": true, + "cmd/compile/internal/riscv64": true, + "cmd/compile/internal/rttype": true, + "cmd/compile/internal/s390x": true, + "cmd/compile/internal/ssa": true, + "cmd/compile/internal/ssagen": true, + "cmd/compile/internal/staticdata": true, + "cmd/compile/internal/staticinit": true, + "cmd/compile/internal/syntax": true, + "cmd/compile/internal/test": true, + "cmd/compile/internal/typebits": true, + "cmd/compile/internal/typecheck": true, + "cmd/compile/internal/types": true, + "cmd/compile/internal/types2": true, + "cmd/compile/internal/walk": true, + "cmd/compile/internal/wasm": true, + "cmd/compile/internal/x86": true, + "cmd/internal/archive": true, + "cmd/internal/bio": true, + "cmd/internal/codesign": true, + "cmd/internal/dwarf": true, + "cmd/internal/edit": true, + "cmd/internal/gcprog": true, + "cmd/internal/goobj": true, + "cmd/internal/hash": true, + "cmd/internal/macho": true, + "cmd/internal/obj": true, + "cmd/internal/obj/arm": true, + "cmd/internal/obj/arm64": true, + "cmd/internal/obj/loong64": true, + "cmd/internal/obj/mips": true, + "cmd/internal/obj/ppc64": true, + "cmd/internal/obj/riscv": true, + "cmd/internal/obj/s390x": true, + "cmd/internal/obj/wasm": true, + "cmd/internal/obj/x86": true, + "cmd/internal/objabi": true, + "cmd/internal/par": true, + "cmd/internal/pgo": true, + "cmd/internal/pkgpath": true, + "cmd/internal/quoted": true, + "cmd/internal/src": true, + "cmd/internal/sys": true, + "cmd/internal/telemetry": true, + "cmd/internal/telemetry/counter": true, + "cmd/link": true, + "cmd/link/internal/amd64": true, + "cmd/link/internal/arm": true, + "cmd/link/internal/arm64": true, + "cmd/link/internal/benchmark": true, + "cmd/link/internal/dwtest": true, + "cmd/link/internal/ld": true, + "cmd/link/internal/loadelf": true, + "cmd/link/internal/loader": true, + "cmd/link/internal/loadmacho": true, + "cmd/link/internal/loadpe": true, + "cmd/link/internal/loadxcoff": true, + "cmd/link/internal/loong64": true, + "cmd/link/internal/mips": true, + "cmd/link/internal/mips64": true, + "cmd/link/internal/ppc64": true, + "cmd/link/internal/riscv64": true, + "cmd/link/internal/s390x": true, + "cmd/link/internal/sym": true, + "cmd/link/internal/wasm": true, + "cmd/link/internal/x86": true, + "compress/flate": true, + "compress/zlib": true, + "container/heap": true, + "debug/dwarf": true, + "debug/elf": true, + "debug/macho": true, + "debug/pe": true, + "go/build/constraint": true, + "go/constant": true, + "go/version": true, + "internal/abi": true, + "internal/coverage": true, + "cmd/internal/cov/covcmd": true, + "internal/bisect": true, + "internal/buildcfg": true, + "internal/exportdata": true, + "internal/goarch": true, + "internal/godebugs": true, + "internal/goexperiment": true, + "internal/goroot": true, + "internal/gover": true, + "internal/goversion": true, + "internal/lazyregexp": true, + "internal/pkgbits": true, + "internal/platform": true, + "internal/profile": true, + "internal/race": true, + "internal/runtime/gc": true, + "internal/saferio": true, + "internal/syscall/unix": true, + "internal/types/errors": true, + "internal/unsafeheader": true, + "internal/xcoff": true, + "internal/zstd": true, + "math/bits": true, + "sort": true, +} + +// BootstrapVersion is the minor version of Go used during toolchain +// bootstrapping. Packages for which [IsBootstrapPackage] must not use +// features of Go newer than this version. +const BootstrapVersion = Version(24) // go1.24.6 diff --git a/src/cmd/vendor/golang.org/x/tools/internal/stdlib/import.go b/src/cmd/vendor/golang.org/x/tools/internal/stdlib/import.go index f6909878a8..8ecc672b8b 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/stdlib/import.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/stdlib/import.go @@ -87,3 +87,11 @@ func find(pkg string) (int, bool) { return strings.Compare(p.name, n) }) } + +// IsBootstrapPackage reports whether pkg is one of the low-level +// packages in the Go distribution that must compile with the older +// language version specified by [BootstrapVersion] during toolchain +// bootstrapping; see golang.org/s/go15bootstrap. +func IsBootstrapPackage(pkg string) bool { + return bootstrap[pkg] +} diff --git a/src/cmd/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/src/cmd/vendor/golang.org/x/tools/internal/stdlib/manifest.go index c1faa50d36..362f23c436 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -225,6 +225,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Buffer).Grow", Method, 1, ""}, {"(*Buffer).Len", Method, 0, ""}, {"(*Buffer).Next", Method, 0, ""}, + {"(*Buffer).Peek", Method, 26, ""}, {"(*Buffer).Read", Method, 0, ""}, {"(*Buffer).ReadByte", Method, 0, ""}, {"(*Buffer).ReadBytes", Method, 0, ""}, @@ -1628,6 +1629,7 @@ var PackageSymbols = map[string][]Symbol{ {"ResultNoRows", Var, 0, ""}, {"Rows", Type, 0, ""}, {"RowsAffected", Type, 0, ""}, + {"RowsColumnScanner", Type, 26, ""}, {"RowsColumnTypeDatabaseTypeName", Type, 8, ""}, {"RowsColumnTypeLength", Type, 8, ""}, {"RowsColumnTypeNullable", Type, 8, ""}, @@ -4953,6 +4955,7 @@ var PackageSymbols = map[string][]Symbol{ }, "errors": { {"As", Func, 13, "func(err error, target any) bool"}, + {"AsType", Func, 26, "func[E error](err error) (E, bool)"}, {"ErrUnsupported", Var, 21, ""}, {"Is", Func, 13, "func(err error, target error) bool"}, {"Join", Func, 20, "func(errs ...error) error"}, @@ -5090,7 +5093,7 @@ var PackageSymbols = map[string][]Symbol{ {"Append", Func, 19, "func(b []byte, a ...any) []byte"}, {"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"}, {"Appendln", Func, 19, "func(b []byte, a ...any) []byte"}, - {"Errorf", Func, 0, "func(format string, a ...any) error"}, + {"Errorf", Func, 0, "func(format string, a ...any) (err error)"}, {"FormatString", Func, 20, "func(state State, verb rune) string"}, {"Formatter", Type, 0, ""}, {"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"}, @@ -5155,6 +5158,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*DeclStmt).Pos", Method, 0, ""}, {"(*DeferStmt).End", Method, 0, ""}, {"(*DeferStmt).Pos", Method, 0, ""}, + {"(*Directive).End", Method, 26, ""}, + {"(*Directive).ParseArgs", Method, 26, ""}, + {"(*Directive).Pos", Method, 26, ""}, {"(*Ellipsis).End", Method, 0, ""}, {"(*Ellipsis).Pos", Method, 0, ""}, {"(*EmptyStmt).End", Method, 0, ""}, @@ -5320,6 +5326,15 @@ var PackageSymbols = map[string][]Symbol{ {"DeferStmt", Type, 0, ""}, {"DeferStmt.Call", Field, 0, ""}, {"DeferStmt.Defer", Field, 0, ""}, + {"Directive", Type, 26, ""}, + {"Directive.Args", Field, 26, ""}, + {"Directive.ArgsPos", Field, 26, ""}, + {"Directive.Name", Field, 26, ""}, + {"Directive.Slash", Field, 26, ""}, + {"Directive.Tool", Field, 26, ""}, + {"DirectiveArg", Type, 26, ""}, + {"DirectiveArg.Arg", Field, 26, ""}, + {"DirectiveArg.Pos", Field, 26, ""}, {"Ellipsis", Type, 0, ""}, {"Ellipsis.Ellipsis", Field, 0, ""}, {"Ellipsis.Elt", Field, 0, ""}, @@ -5469,6 +5484,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParenExpr.Lparen", Field, 0, ""}, {"ParenExpr.Rparen", Field, 0, ""}, {"ParenExpr.X", Field, 0, ""}, + {"ParseDirective", Func, 26, "func(pos token.Pos, c string) (Directive, bool)"}, {"Pkg", Const, 0, ""}, {"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"}, {"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"}, @@ -7271,6 +7287,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Logger).WarnContext", Method, 21, ""}, {"(*Logger).With", Method, 21, ""}, {"(*Logger).WithGroup", Method, 21, ""}, + {"(*MultiHandler).Enabled", Method, 26, ""}, + {"(*MultiHandler).Handle", Method, 26, ""}, + {"(*MultiHandler).WithAttrs", Method, 26, ""}, + {"(*MultiHandler).WithGroup", Method, 26, ""}, {"(*Record).Add", Method, 21, ""}, {"(*Record).AddAttrs", Method, 21, ""}, {"(*TextHandler).Enabled", Method, 21, ""}, @@ -7358,9 +7378,11 @@ var PackageSymbols = map[string][]Symbol{ {"LogValuer", Type, 21, ""}, {"Logger", Type, 21, ""}, {"MessageKey", Const, 21, ""}, + {"MultiHandler", Type, 26, ""}, {"New", Func, 21, "func(h Handler) *Logger"}, {"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"}, {"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"}, + {"NewMultiHandler", Func, 26, "func(handlers ...Handler) *MultiHandler"}, {"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"}, {"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"}, {"Record", Type, 21, ""}, @@ -7515,7 +7537,7 @@ var PackageSymbols = map[string][]Symbol{ {"MinInt64", Const, 0, ""}, {"MinInt8", Const, 0, ""}, {"Mod", Func, 0, "func(x float64, y float64) float64"}, - {"Modf", Func, 0, "func(f float64) (int float64, frac float64)"}, + {"Modf", Func, 0, "func(f float64) (integer float64, fractional float64)"}, {"NaN", Func, 0, "func() float64"}, {"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"}, {"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"}, @@ -7972,6 +7994,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*DNSError).Unwrap", Method, 23, ""}, {"(*Dialer).Dial", Method, 1, ""}, {"(*Dialer).DialContext", Method, 7, ""}, + {"(*Dialer).DialIP", Method, 26, ""}, + {"(*Dialer).DialTCP", Method, 26, ""}, + {"(*Dialer).DialUDP", Method, 26, ""}, + {"(*Dialer).DialUnix", Method, 26, ""}, {"(*Dialer).MultipathTCP", Method, 21, ""}, {"(*Dialer).SetMultipathTCP", Method, 21, ""}, {"(*IP).UnmarshalText", Method, 2, ""}, @@ -8457,6 +8483,7 @@ var PackageSymbols = map[string][]Symbol{ {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""}, {"HTTP2Config.PingTimeout", Field, 24, ""}, {"HTTP2Config.SendPingTimeout", Field, 24, ""}, + {"HTTP2Config.StrictMaxConcurrentRequests", Field, 26, ""}, {"HTTP2Config.WriteByteTimeout", Field, 24, ""}, {"Handle", Func, 0, "func(pattern string, handler Handler)"}, {"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"}, @@ -8904,6 +8931,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Prefix).AppendText", Method, 24, ""}, {"(Prefix).AppendTo", Method, 18, ""}, {"(Prefix).Bits", Method, 18, ""}, + {"(Prefix).Compare", Method, 26, ""}, {"(Prefix).Contains", Method, 18, ""}, {"(Prefix).IsSingleIP", Method, 18, ""}, {"(Prefix).IsValid", Method, 18, ""}, @@ -9177,6 +9205,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Process).Release", Method, 0, ""}, {"(*Process).Signal", Method, 0, ""}, {"(*Process).Wait", Method, 0, ""}, + {"(*Process).WithHandle", Method, 26, ""}, {"(*ProcessState).ExitCode", Method, 12, ""}, {"(*ProcessState).Exited", Method, 0, ""}, {"(*ProcessState).Pid", Method, 0, ""}, @@ -9234,6 +9263,7 @@ var PackageSymbols = map[string][]Symbol{ {"ErrExist", Var, 0, ""}, {"ErrInvalid", Var, 0, ""}, {"ErrNoDeadline", Var, 10, ""}, + {"ErrNoHandle", Var, 26, ""}, {"ErrNotExist", Var, 0, ""}, {"ErrPermission", Var, 0, ""}, {"ErrProcessDone", Var, 16, ""}, @@ -9461,7 +9491,7 @@ var PackageSymbols = map[string][]Symbol{ {"ListSeparator", Const, 0, ""}, {"Localize", Func, 23, "func(path string) (string, error)"}, {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"}, - {"Rel", Func, 0, "func(basepath string, targpath string) (string, error)"}, + {"Rel", Func, 0, "func(basePath string, targPath string) (string, error)"}, {"Separator", Const, 0, ""}, {"SkipAll", Var, 20, ""}, {"SkipDir", Var, 0, ""}, @@ -9932,7 +9962,7 @@ var PackageSymbols = map[string][]Symbol{ {"PanicNilError", Type, 21, ""}, {"Pinner", Type, 21, ""}, {"ReadMemStats", Func, 0, "func(m *MemStats)"}, - {"ReadTrace", Func, 5, "func() []byte"}, + {"ReadTrace", Func, 5, "func() (buf []byte)"}, {"SetBlockProfileRate", Func, 1, "func(rate int)"}, {"SetCPUProfileRate", Func, 0, "func(hz int)"}, {"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"}, @@ -16679,6 +16709,7 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0, ""}, }, "testing": { + {"(*B).ArtifactDir", Method, 26, ""}, {"(*B).Attr", Method, 25, ""}, {"(*B).Chdir", Method, 24, ""}, {"(*B).Cleanup", Method, 14, ""}, @@ -16713,6 +16744,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0, ""}, {"(*B).TempDir", Method, 15, ""}, {"(*F).Add", Method, 18, ""}, + {"(*F).ArtifactDir", Method, 26, ""}, {"(*F).Attr", Method, 25, ""}, {"(*F).Chdir", Method, 24, ""}, {"(*F).Cleanup", Method, 18, ""}, @@ -16738,6 +16770,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18, ""}, {"(*M).Run", Method, 4, ""}, {"(*PB).Next", Method, 3, ""}, + {"(*T).ArtifactDir", Method, 26, ""}, {"(*T).Attr", Method, 25, ""}, {"(*T).Chdir", Method, 24, ""}, {"(*T).Cleanup", Method, 14, ""}, diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/normalize.go index f49802b8ef..8d13f12147 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -160,8 +160,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in // The term set of an interface is the intersection of the term sets of its // embedded types. tset.terms = allTermlist - for i := 0; i < u.NumEmbeddeds(); i++ { - embedded := u.EmbeddedType(i) + for embedded := range u.EmbeddedTypes() { if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } @@ -174,8 +173,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil - for i := 0; i < u.Len(); i++ { - t := u.Term(i) + for t := range u.Terms() { var terms termlist switch t.Type().Underlying().(type) { case *types.Interface: diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/element.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/element.go index 4957f02164..5fe4d8abcb 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/element.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -35,8 +35,8 @@ func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T type // Recursion over signatures of each method. tmset := msets.MethodSet(T) - for i := 0; i < tmset.Len(); i++ { - sig := tmset.At(i).Type().(*types.Signature) + for method := range tmset.Methods() { + sig := method.Type().(*types.Signature) // It is tempting to call visit(sig, false) // but, as noted in golang.org/cl/65450043, // the Signature.Recv field is ignored by diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go index f2affec4fb..e0d63c46c6 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go @@ -48,7 +48,7 @@ func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { return ok && IsPackageLevel(obj) && f.Pkg().Path() == pkgPath && - f.Type().(*types.Signature).Recv() == nil && + f.Signature().Recv() == nil && slices.Contains(names, f.Name()) } @@ -60,7 +60,7 @@ func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { // which is important for the performance of syntax matching. func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { if fn, ok := obj.(*types.Func); ok { - if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + if recv := fn.Signature().Recv(); recv != nil { _, T := ReceiverNamed(recv) return T != nil && IsTypeNamed(T, pkgPath, typeName) && diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go index 64f47919f0..4e2756fc49 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -19,7 +19,7 @@ import ( // TODO(adonovan): this function ignores the effect of shadowing. It // should accept a [token.Pos] and a [types.Info] and compute only the // set of imports that are not shadowed at that point, analogous to -// [analysisinternal.AddImport]. It could also compute (as a side +// [analysis.AddImport]. It could also compute (as a side // effect) the set of additional imports required to ensure that there // is an accessible import for each necessary package, making it // converge even more closely with AddImport. diff --git a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go index 453bba2ad5..d612a71029 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -258,12 +258,12 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { case *types.Signature: var params []*ast.Field - for i := 0; i < t.Params().Len(); i++ { + for v := range t.Params().Variables() { params = append(params, &ast.Field{ - Type: TypeExpr(t.Params().At(i).Type(), qual), + Type: TypeExpr(v.Type(), qual), Names: []*ast.Ident{ { - Name: t.Params().At(i).Name(), + Name: v.Name(), }, }, }) @@ -273,9 +273,9 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} } var returns []*ast.Field - for i := 0; i < t.Results().Len(); i++ { + for v := range t.Results().Variables() { returns = append(returns, &ast.Field{ - Type: TypeExpr(t.Results().At(i).Type(), qual), + Type: TypeExpr(v.Type(), qual), }) } return &ast.FuncType{ @@ -315,8 +315,8 @@ func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok { if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 { var indices []ast.Expr - for i := range typeArgs.Len() { - indices = append(indices, TypeExpr(typeArgs.At(i), qual)) + for t0 := range typeArgs.Types() { + indices = append(indices, TypeExpr(t0, qual)) } expr = &ast.IndexListExpr{ X: expr, diff --git a/src/cmd/vendor/golang.org/x/tools/internal/versions/features.go b/src/cmd/vendor/golang.org/x/tools/internal/versions/features.go index b53f178616..a5f4e3252c 100644 --- a/src/cmd/vendor/golang.org/x/tools/internal/versions/features.go +++ b/src/cmd/vendor/golang.org/x/tools/internal/versions/features.go @@ -7,13 +7,17 @@ package versions // This file contains predicates for working with file versions to // decide when a tool should consider a language feature enabled. -// GoVersions that features in x/tools can be gated to. +// named constants, to avoid misspelling const ( Go1_18 = "go1.18" Go1_19 = "go1.19" Go1_20 = "go1.20" Go1_21 = "go1.21" Go1_22 = "go1.22" + Go1_23 = "go1.23" + Go1_24 = "go1.24" + Go1_25 = "go1.25" + Go1_26 = "go1.26" ) // Future is an invalid unknown Go version sometime in the future. diff --git a/src/cmd/vendor/modules.txt b/src/cmd/vendor/modules.txt index e92804a90d..80b23723bc 100644 --- a/src/cmd/vendor/modules.txt +++ b/src/cmd/vendor/modules.txt @@ -28,7 +28,7 @@ golang.org/x/arch/x86/x86asm # golang.org/x/build v0.0.0-20250806225920-b7c66c047964 ## explicit; go 1.23.0 golang.org/x/build/relnote -# golang.org/x/mod v0.29.0 +# golang.org/x/mod v0.30.1-0.20251114215501-3f03020ad526 ## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile @@ -39,16 +39,16 @@ golang.org/x/mod/sumdb/dirhash golang.org/x/mod/sumdb/note golang.org/x/mod/sumdb/tlog golang.org/x/mod/zip -# golang.org/x/sync v0.17.0 +# golang.org/x/sync v0.18.0 ## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.37.0 +# golang.org/x/sys v0.38.0 ## explicit; go 1.24.0 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 +# golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 ## explicit; go 1.24.0 golang.org/x/telemetry golang.org/x/telemetry/counter @@ -73,7 +73,7 @@ golang.org/x/text/internal/tag golang.org/x/text/language golang.org/x/text/transform golang.org/x/text/unicode/norm -# golang.org/x/tools v0.38.1-0.20251015192825-7d9453ccc0f5 +# golang.org/x/tools v0.39.1-0.20251114194111-59ff18ce4883 ## explicit; go 1.24.0 golang.org/x/tools/cmd/bisect golang.org/x/tools/cover @@ -98,6 +98,7 @@ golang.org/x/tools/go/analysis/passes/httpresponse golang.org/x/tools/go/analysis/passes/ifaceassert golang.org/x/tools/go/analysis/passes/inline golang.org/x/tools/go/analysis/passes/inspect +golang.org/x/tools/go/analysis/passes/internal/ctrlflowinternal golang.org/x/tools/go/analysis/passes/internal/gofixdirective golang.org/x/tools/go/analysis/passes/loopclosure golang.org/x/tools/go/analysis/passes/lostcancel @@ -127,11 +128,13 @@ golang.org/x/tools/go/cfg golang.org/x/tools/go/types/objectpath golang.org/x/tools/go/types/typeutil golang.org/x/tools/internal/aliases -golang.org/x/tools/internal/analysisinternal -golang.org/x/tools/internal/analysisinternal/generated -golang.org/x/tools/internal/analysisinternal/typeindex +golang.org/x/tools/internal/analysis/analyzerutil +golang.org/x/tools/internal/analysis/driverutil +golang.org/x/tools/internal/analysis/typeindex golang.org/x/tools/internal/astutil +golang.org/x/tools/internal/astutil/free golang.org/x/tools/internal/bisect +golang.org/x/tools/internal/cfginternal golang.org/x/tools/internal/diff golang.org/x/tools/internal/diff/lcs golang.org/x/tools/internal/facts diff --git a/src/cmd/vet/testdata/print/print.go b/src/cmd/vet/testdata/print/print.go index 3761da420b..9145c12fb8 100644 --- a/src/cmd/vet/testdata/print/print.go +++ b/src/cmd/vet/testdata/print/print.go @@ -75,7 +75,7 @@ func PrintfTests() { fmt.Printf("%b %b %b %b", 3e9, x, fslice, c) fmt.Printf("%o %o", 3, i) fmt.Printf("%p", p) - fmt.Printf("%q %q %q %q", 3, i, 'x', r) + fmt.Printf("%q %q %q", rune(3), 'x', r) fmt.Printf("%s %s %s", "hi", s, []byte{65}) fmt.Printf("%t %t", true, b) fmt.Printf("%T %T", 3, i) diff --git a/src/crypto/crypto.go b/src/crypto/crypto.go index 6b3db5a1a3..0bf9ec834b 100644 --- a/src/crypto/crypto.go +++ b/src/crypto/crypto.go @@ -253,3 +253,21 @@ func SignMessage(signer Signer, rand io.Reader, msg []byte, opts SignerOpts) (si } return signer.Sign(rand, msg, opts) } + +// Decapsulator is an interface for an opaque private KEM key that can be used for +// decapsulation operations. For example, an ML-KEM key kept in a hardware module. +// +// It is implemented, for example, by [crypto/mlkem.DecapsulationKey768]. +type Decapsulator interface { + Encapsulator() Encapsulator + Decapsulate(ciphertext []byte) (sharedKey []byte, err error) +} + +// Encapsulator is an interface for a public KEM key that can be used for +// encapsulation operations. +// +// It is implemented, for example, by [crypto/mlkem.EncapsulationKey768]. +type Encapsulator interface { + Bytes() []byte + Encapsulate() (sharedKey, ciphertext []byte) +} diff --git a/src/crypto/ecdh/ecdh.go b/src/crypto/ecdh/ecdh.go index 231f1ea04c..82daacf473 100644 --- a/src/crypto/ecdh/ecdh.go +++ b/src/crypto/ecdh/ecdh.go @@ -92,6 +92,18 @@ func (k *PublicKey) Curve() Curve { return k.curve } +// KeyExchanger is an interface for an opaque private key that can be used for +// key exchange operations. For example, an ECDH key kept in a hardware module. +// +// It is implemented by [PrivateKey]. +type KeyExchanger interface { + PublicKey() *PublicKey + Curve() Curve + ECDH(*PublicKey) ([]byte, error) +} + +var _ KeyExchanger = (*PrivateKey)(nil) + // PrivateKey is an ECDH private key, usually kept secret. // // These keys can be parsed with [crypto/x509.ParsePKCS8PrivateKey] and encoded diff --git a/src/crypto/internal/fips140/mlkem/mlkem1024.go b/src/crypto/internal/fips140/mlkem/mlkem1024.go index edde161422..953eea9bc2 100644 --- a/src/crypto/internal/fips140/mlkem/mlkem1024.go +++ b/src/crypto/internal/fips140/mlkem/mlkem1024.go @@ -369,11 +369,12 @@ func pkeEncrypt1024(cc *[CiphertextSize1024]byte, ex *encryptionKey1024, m *[mes u := make([]ringElement, k1024) // NTT⁻¹(AT ◦ r) + e1 for i := range u { - u[i] = e1[i] + var uHat nttElement for j := range r { // Note that i and j are inverted, as we need the transposed of A. - u[i] = polyAdd(u[i], inverseNTT(nttMul(ex.a[j*k1024+i], r[j]))) + uHat = polyAdd(uHat, nttMul(ex.a[j*k1024+i], r[j])) } + u[i] = polyAdd(e1[i], inverseNTT(uHat)) } μ := ringDecodeAndDecompress1(m) diff --git a/src/crypto/internal/fips140/mlkem/mlkem768.go b/src/crypto/internal/fips140/mlkem/mlkem768.go index 088c2954de..c4c3a9deaf 100644 --- a/src/crypto/internal/fips140/mlkem/mlkem768.go +++ b/src/crypto/internal/fips140/mlkem/mlkem768.go @@ -428,11 +428,12 @@ func pkeEncrypt(cc *[CiphertextSize768]byte, ex *encryptionKey, m *[messageSize] u := make([]ringElement, k) // NTT⁻¹(AT ◦ r) + e1 for i := range u { - u[i] = e1[i] + var uHat nttElement for j := range r { // Note that i and j are inverted, as we need the transposed of A. - u[i] = polyAdd(u[i], inverseNTT(nttMul(ex.a[j*k+i], r[j]))) + uHat = polyAdd(uHat, nttMul(ex.a[j*k+i], r[j])) } + u[i] = polyAdd(e1[i], inverseNTT(uHat)) } μ := ringDecodeAndDecompress1(m) diff --git a/src/crypto/mlkem/mlkem.go b/src/crypto/mlkem/mlkem.go index 69c0bc571f..176b79673b 100644 --- a/src/crypto/mlkem/mlkem.go +++ b/src/crypto/mlkem/mlkem.go @@ -11,7 +11,10 @@ // [NIST FIPS 203]: https://doi.org/10.6028/NIST.FIPS.203 package mlkem -import "crypto/internal/fips140/mlkem" +import ( + "crypto" + "crypto/internal/fips140/mlkem" +) const ( // SharedKeySize is the size of a shared key produced by ML-KEM. @@ -82,6 +85,16 @@ func (dk *DecapsulationKey768) EncapsulationKey() *EncapsulationKey768 { return &EncapsulationKey768{dk.key.EncapsulationKey()} } +// Encapsulator returns the encapsulation key, like +// [DecapsulationKey768.EncapsulationKey]. +// +// It implements [crypto.Decapsulator]. +func (dk *DecapsulationKey768) Encapsulator() crypto.Encapsulator { + return dk.EncapsulationKey() +} + +var _ crypto.Decapsulator = (*DecapsulationKey768)(nil) + // An EncapsulationKey768 is the public key used to produce ciphertexts to be // decapsulated by the corresponding DecapsulationKey768. type EncapsulationKey768 struct { @@ -108,6 +121,9 @@ func (ek *EncapsulationKey768) Bytes() []byte { // encapsulation key, drawing random bytes from the default crypto/rand source. // // The shared key must be kept secret. +// +// For testing, derandomized encapsulation is provided by the +// [crypto/mlkem/mlkemtest] package. func (ek *EncapsulationKey768) Encapsulate() (sharedKey, ciphertext []byte) { return ek.key.Encapsulate() } @@ -161,6 +177,16 @@ func (dk *DecapsulationKey1024) EncapsulationKey() *EncapsulationKey1024 { return &EncapsulationKey1024{dk.key.EncapsulationKey()} } +// Encapsulator returns the encapsulation key, like +// [DecapsulationKey1024.EncapsulationKey]. +// +// It implements [crypto.Decapsulator]. +func (dk *DecapsulationKey1024) Encapsulator() crypto.Encapsulator { + return dk.EncapsulationKey() +} + +var _ crypto.Decapsulator = (*DecapsulationKey1024)(nil) + // An EncapsulationKey1024 is the public key used to produce ciphertexts to be // decapsulated by the corresponding DecapsulationKey1024. type EncapsulationKey1024 struct { @@ -187,6 +213,9 @@ func (ek *EncapsulationKey1024) Bytes() []byte { // encapsulation key, drawing random bytes from the default crypto/rand source. // // The shared key must be kept secret. +// +// For testing, derandomized encapsulation is provided by the +// [crypto/mlkem/mlkemtest] package. func (ek *EncapsulationKey1024) Encapsulate() (sharedKey, ciphertext []byte) { return ek.key.Encapsulate() } diff --git a/src/crypto/mlkem/mlkem_test.go b/src/crypto/mlkem/mlkem_test.go index 207d6d48c3..922147ab15 100644 --- a/src/crypto/mlkem/mlkem_test.go +++ b/src/crypto/mlkem/mlkem_test.go @@ -2,12 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package mlkem +package mlkem_test import ( "bytes" "crypto/internal/fips140/mlkem" "crypto/internal/fips140/sha3" + . "crypto/mlkem" + "crypto/mlkem/mlkemtest" "crypto/rand" "encoding/hex" "flag" @@ -176,7 +178,7 @@ func TestAccumulated(t *testing.T) { s := sha3.NewShake128() o := sha3.NewShake128() seed := make([]byte, SeedSize) - var msg [32]byte + msg := make([]byte, 32) ct1 := make([]byte, CiphertextSize768) for i := 0; i < n; i++ { @@ -188,8 +190,11 @@ func TestAccumulated(t *testing.T) { ek := dk.EncapsulationKey() o.Write(ek.Bytes()) - s.Read(msg[:]) - k, ct := ek.key.EncapsulateInternal(&msg) + s.Read(msg) + k, ct, err := mlkemtest.Encapsulate768(ek, msg) + if err != nil { + t.Fatal(err) + } o.Write(ct) o.Write(k) @@ -231,8 +236,6 @@ func BenchmarkKeyGen(b *testing.B) { func BenchmarkEncaps(b *testing.B) { seed := make([]byte, SeedSize) rand.Read(seed) - var m [32]byte - rand.Read(m[:]) dk, err := NewDecapsulationKey768(seed) if err != nil { b.Fatal(err) @@ -244,7 +247,7 @@ func BenchmarkEncaps(b *testing.B) { if err != nil { b.Fatal(err) } - K, c := ek.key.EncapsulateInternal(&m) + K, c := ek.Encapsulate() sink ^= c[0] ^ K[0] } } diff --git a/src/crypto/mlkem/mlkemtest/mlkemtest.go b/src/crypto/mlkem/mlkemtest/mlkemtest.go new file mode 100644 index 0000000000..39e3994ea9 --- /dev/null +++ b/src/crypto/mlkem/mlkemtest/mlkemtest.go @@ -0,0 +1,46 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mlkemtest provides testing functions for the ML-KEM algorithm. +package mlkemtest + +import ( + fips140mlkem "crypto/internal/fips140/mlkem" + "crypto/mlkem" + "errors" +) + +// Encapsulate768 implements derandomized ML-KEM-768 encapsulation +// (ML-KEM.Encaps_internal from FIPS 203) using the provided encapsulation key +// ek and 32 bytes of randomness. +// +// It must only be used for known-answer tests. +func Encapsulate768(ek *mlkem.EncapsulationKey768, random []byte) (sharedKey, ciphertext []byte, err error) { + if len(random) != 32 { + return nil, nil, errors.New("mlkemtest: Encapsulate768: random must be 32 bytes") + } + k, err := fips140mlkem.NewEncapsulationKey768(ek.Bytes()) + if err != nil { + return nil, nil, errors.New("mlkemtest: Encapsulate768: failed to reconstruct key: " + err.Error()) + } + sharedKey, ciphertext = k.EncapsulateInternal((*[32]byte)(random)) + return sharedKey, ciphertext, nil +} + +// Encapsulate1024 implements derandomized ML-KEM-1024 encapsulation +// (ML-KEM.Encaps_internal from FIPS 203) using the provided encapsulation key +// ek and 32 bytes of randomness. +// +// It must only be used for known-answer tests. +func Encapsulate1024(ek *mlkem.EncapsulationKey1024, random []byte) (sharedKey, ciphertext []byte, err error) { + if len(random) != 32 { + return nil, nil, errors.New("mlkemtest: Encapsulate1024: random must be 32 bytes") + } + k, err := fips140mlkem.NewEncapsulationKey1024(ek.Bytes()) + if err != nil { + return nil, nil, errors.New("mlkemtest: Encapsulate1024: failed to reconstruct key: " + err.Error()) + } + sharedKey, ciphertext = k.EncapsulateInternal((*[32]byte)(random)) + return sharedKey, ciphertext, nil +} diff --git a/src/crypto/rsa/equal_test.go b/src/crypto/rsa/equal_test.go index 435429c3d1..39a9cdc86c 100644 --- a/src/crypto/rsa/equal_test.go +++ b/src/crypto/rsa/equal_test.go @@ -21,7 +21,7 @@ func TestEqual(t *testing.T) { t.Errorf("public key is not equal to itself: %v", public) } if !public.Equal(crypto.Signer(private).Public().(*rsa.PublicKey)) { - t.Errorf("private.Public() is not Equal to public: %q", public) + t.Errorf("private.Public() is not Equal to public: %v", public) } if !private.Equal(private) { t.Errorf("private key is not equal to itself: %v", private) diff --git a/src/crypto/rsa/pkcs1v15.go b/src/crypto/rsa/pkcs1v15.go index f1e4ef48a4..76853a9445 100644 --- a/src/crypto/rsa/pkcs1v15.go +++ b/src/crypto/rsa/pkcs1v15.go @@ -18,6 +18,12 @@ import ( // PKCS1v15DecryptOptions is for passing options to PKCS #1 v1.5 decryption using // the [crypto.Decrypter] interface. +// +// Deprecated: PKCS #1 v1.5 encryption is dangerous and should not be used. +// See [draft-irtf-cfrg-rsa-guidance-05] for more information. Use +// [EncryptOAEP] and [DecryptOAEP] instead. +// +// [draft-irtf-cfrg-rsa-guidance-05]: https://www.ietf.org/archive/id/draft-irtf-cfrg-rsa-guidance-05.html#name-rationale type PKCS1v15DecryptOptions struct { // SessionKeyLen is the length of the session key that is being // decrypted. If not zero, then a padding error during decryption will @@ -37,8 +43,11 @@ type PKCS1v15DecryptOptions struct { // deterministically on the bytes read from random, and may change // between calls and/or between versions. // -// WARNING: use of this function to encrypt plaintexts other than -// session keys is dangerous. Use RSA OAEP in new protocols. +// Deprecated: PKCS #1 v1.5 encryption is dangerous and should not be used. +// See [draft-irtf-cfrg-rsa-guidance-05] for more information. Use +// [EncryptOAEP] and [DecryptOAEP] instead. +// +// [draft-irtf-cfrg-rsa-guidance-05]: https://www.ietf.org/archive/id/draft-irtf-cfrg-rsa-guidance-05.html#name-rationale func EncryptPKCS1v15(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error) { if fips140only.Enabled { return nil, errors.New("crypto/rsa: use of PKCS#1 v1.5 encryption is not allowed in FIPS 140-only mode") @@ -91,14 +100,17 @@ func EncryptPKCS1v15(random io.Reader, pub *PublicKey, msg []byte) ([]byte, erro return rsa.Encrypt(fk, em) } -// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS #1 v1.5. -// The random parameter is legacy and ignored, and it can be nil. +// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from +// PKCS #1 v1.5. The random parameter is legacy and ignored, and it can be nil. // -// Note that whether this function returns an error or not discloses secret -// information. If an attacker can cause this function to run repeatedly and -// learn whether each instance returned an error then they can decrypt and -// forge signatures as if they had the private key. See -// DecryptPKCS1v15SessionKey for a way of solving this problem. +// Deprecated: PKCS #1 v1.5 encryption is dangerous and should not be used. +// Whether this function returns an error or not discloses secret information. +// If an attacker can cause this function to run repeatedly and learn whether +// each instance returned an error then they can decrypt and forge signatures as +// if they had the private key. See [draft-irtf-cfrg-rsa-guidance-05] for more +// information. Use [EncryptOAEP] and [DecryptOAEP] instead. +// +// [draft-irtf-cfrg-rsa-guidance-05]: https://www.ietf.org/archive/id/draft-irtf-cfrg-rsa-guidance-05.html#name-rationale func DecryptPKCS1v15(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error) { if err := checkPublicKeySize(&priv.PublicKey); err != nil { return nil, err @@ -160,6 +172,13 @@ func DecryptPKCS1v15(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]b // Standard PKCS #1”, Daniel Bleichenbacher, Advances in Cryptology (Crypto '98) // - [1] RFC 3218, Preventing the Million Message Attack on CMS, // https://www.rfc-editor.org/rfc/rfc3218.html +// +// Deprecated: PKCS #1 v1.5 encryption is dangerous and should not be used. The +// protections implemented by this function are limited and fragile, as +// explained above. See [draft-irtf-cfrg-rsa-guidance-05] for more information. +// Use [EncryptOAEP] and [DecryptOAEP] instead. +// +// [draft-irtf-cfrg-rsa-guidance-05]: https://www.ietf.org/archive/id/draft-irtf-cfrg-rsa-guidance-05.html#name-rationale func DecryptPKCS1v15SessionKey(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error { if err := checkPublicKeySize(&priv.PublicKey); err != nil { return err diff --git a/src/crypto/tls/bogo_shim_test.go b/src/crypto/tls/bogo_shim_test.go index 8f171d9259..02a943c13c 100644 --- a/src/crypto/tls/bogo_shim_test.go +++ b/src/crypto/tls/bogo_shim_test.go @@ -461,7 +461,7 @@ func bogoShim() { } if *expectVersion != 0 && cs.Version != uint16(*expectVersion) { - log.Fatalf("expected ssl version %q, got %q", uint16(*expectVersion), cs.Version) + log.Fatalf("expected ssl version %d, got %d", *expectVersion, cs.Version) } if *declineALPN && cs.NegotiatedProtocol != "" { log.Fatal("unexpected ALPN protocol") diff --git a/src/database/sql/fakedb_test.go b/src/database/sql/fakedb_test.go index 003e6c6298..e5f0459303 100644 --- a/src/database/sql/fakedb_test.go +++ b/src/database/sql/fakedb_test.go @@ -969,7 +969,7 @@ func (s *fakeStmt) QueryContext(ctx context.Context, args []driver.NamedValue) ( idx := t.columnIndex(wcol.Column) if idx == -1 { t.mu.Unlock() - return nil, fmt.Errorf("fakedb: invalid where clause column %q", wcol) + return nil, fmt.Errorf("fakedb: invalid where clause column %v", wcol) } tcol := trow.cols[idx] if bs, ok := tcol.([]byte); ok { diff --git a/src/debug/elf/elf.go b/src/debug/elf/elf.go index 58e37daed2..557648ece9 100644 --- a/src/debug/elf/elf.go +++ b/src/debug/elf/elf.go @@ -2305,6 +2305,8 @@ const ( R_LARCH_TLS_TPREL32 R_LARCH = 10 R_LARCH_TLS_TPREL64 R_LARCH = 11 R_LARCH_IRELATIVE R_LARCH = 12 + R_LARCH_TLS_DESC32 R_LARCH = 13 + R_LARCH_TLS_DESC64 R_LARCH = 14 R_LARCH_MARK_LA R_LARCH = 20 R_LARCH_MARK_PCREL R_LARCH = 21 R_LARCH_SOP_PUSH_PCREL R_LARCH = 22 @@ -2390,6 +2392,23 @@ const ( R_LARCH_ADD_ULEB128 R_LARCH = 107 R_LARCH_SUB_ULEB128 R_LARCH = 108 R_LARCH_64_PCREL R_LARCH = 109 + R_LARCH_CALL36 R_LARCH = 110 + R_LARCH_TLS_DESC_PC_HI20 R_LARCH = 111 + R_LARCH_TLS_DESC_PC_LO12 R_LARCH = 112 + R_LARCH_TLS_DESC64_PC_LO20 R_LARCH = 113 + R_LARCH_TLS_DESC64_PC_HI12 R_LARCH = 114 + R_LARCH_TLS_DESC_HI20 R_LARCH = 115 + R_LARCH_TLS_DESC_LO12 R_LARCH = 116 + R_LARCH_TLS_DESC64_LO20 R_LARCH = 117 + R_LARCH_TLS_DESC64_HI12 R_LARCH = 118 + R_LARCH_TLS_DESC_LD R_LARCH = 119 + R_LARCH_TLS_DESC_CALL R_LARCH = 120 + R_LARCH_TLS_LE_HI20_R R_LARCH = 121 + R_LARCH_TLS_LE_ADD_R R_LARCH = 122 + R_LARCH_TLS_LE_LO12_R R_LARCH = 123 + R_LARCH_TLS_LD_PCREL20_S2 R_LARCH = 124 + R_LARCH_TLS_GD_PCREL20_S2 R_LARCH = 125 + R_LARCH_TLS_DESC_PCREL20_S2 R_LARCH = 126 ) var rlarchStrings = []intName{ @@ -2406,6 +2425,8 @@ var rlarchStrings = []intName{ {10, "R_LARCH_TLS_TPREL32"}, {11, "R_LARCH_TLS_TPREL64"}, {12, "R_LARCH_IRELATIVE"}, + {13, "R_LARCH_TLS_DESC32"}, + {14, "R_LARCH_TLS_DESC64"}, {20, "R_LARCH_MARK_LA"}, {21, "R_LARCH_MARK_PCREL"}, {22, "R_LARCH_SOP_PUSH_PCREL"}, @@ -2491,6 +2512,23 @@ var rlarchStrings = []intName{ {107, "R_LARCH_ADD_ULEB128"}, {108, "R_LARCH_SUB_ULEB128"}, {109, "R_LARCH_64_PCREL"}, + {110, "R_LARCH_CALL36"}, + {111, "R_LARCH_TLS_DESC_PC_HI20"}, + {112, "R_LARCH_TLS_DESC_PC_LO12"}, + {113, "R_LARCH_TLS_DESC64_PC_LO20"}, + {114, "R_LARCH_TLS_DESC64_PC_HI12"}, + {115, "R_LARCH_TLS_DESC_HI20"}, + {116, "R_LARCH_TLS_DESC_LO12"}, + {117, "R_LARCH_TLS_DESC64_LO20"}, + {118, "R_LARCH_TLS_DESC64_HI12"}, + {119, "R_LARCH_TLS_DESC_LD"}, + {120, "R_LARCH_TLS_DESC_CALL"}, + {121, "R_LARCH_TLS_LE_HI20_R"}, + {122, "R_LARCH_TLS_LE_ADD_R"}, + {123, "R_LARCH_TLS_LE_LO12_R"}, + {124, "R_LARCH_TLS_LD_PCREL20_S2"}, + {125, "R_LARCH_TLS_GD_PCREL20_S2"}, + {126, "R_LARCH_TLS_DESC_PCREL20_S2"}, } func (i R_LARCH) String() string { return stringName(uint32(i), rlarchStrings, false) } diff --git a/src/debug/elf/elf_test.go b/src/debug/elf/elf_test.go index 0350d53050..256f850f96 100644 --- a/src/debug/elf/elf_test.go +++ b/src/debug/elf/elf_test.go @@ -34,6 +34,7 @@ var nameTests = []nameTest{ {R_ALPHA_OP_PUSH, "R_ALPHA_OP_PUSH"}, {R_ARM_THM_ABS5, "R_ARM_THM_ABS5"}, {R_386_GOT32, "R_386_GOT32"}, + {R_LARCH_CALL36, "R_LARCH_CALL36"}, {R_PPC_GOT16_HI, "R_PPC_GOT16_HI"}, {R_SPARC_GOT22, "R_SPARC_GOT22"}, {ET_LOOS + 5, "ET_LOOS+5"}, diff --git a/src/fmt/scan_test.go b/src/fmt/scan_test.go index a4f80c23c2..ff9e4f30ca 100644 --- a/src/fmt/scan_test.go +++ b/src/fmt/scan_test.go @@ -998,7 +998,7 @@ func TestScanStateCount(t *testing.T) { t.Errorf("bad scan rune: %q %q %q should be '1' '2' '➂'", a.rune, b.rune, c.rune) } if a.size != 1 || b.size != 1 || c.size != 3 { - t.Errorf("bad scan size: %q %q %q should be 1 1 3", a.size, b.size, c.size) + t.Errorf("bad scan size: %d %d %d should be 1 1 3", a.size, b.size, c.size) } } diff --git a/src/go.mod b/src/go.mod index c5e901b9ef..e6cb3d5b43 100644 --- a/src/go.mod +++ b/src/go.mod @@ -8,6 +8,6 @@ require ( ) require ( - golang.org/x/sys v0.37.0 // indirect + golang.org/x/sys v0.38.0 // indirect golang.org/x/text v0.30.0 // indirect ) diff --git a/src/go.sum b/src/go.sum index 4a52682161..fe184a8647 100644 --- a/src/go.sum +++ b/src/go.sum @@ -2,7 +2,7 @@ golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= diff --git a/src/go/ast/ast.go b/src/go/ast/ast.go index a6dab5bb51..37fc3c9666 100644 --- a/src/go/ast/ast.go +++ b/src/go/ast/ast.go @@ -312,11 +312,10 @@ type ( // // For raw string literals (Kind == token.STRING && Value[0] == '`'), // the Value field contains the string text without carriage returns (\r) that - // may have been present in the source. Because the end position is - // computed using len(Value), the position reported by [BasicLit.End] does not match the - // true source end position for raw string literals containing carriage returns. + // may have been present in the source. BasicLit struct { ValuePos token.Pos // literal position + ValueEnd token.Pos // position immediately after the literal Kind token.Token // token.INT, token.FLOAT, token.IMAG, token.CHAR, or token.STRING Value string // literal string; e.g. 42, 0x7f, 3.14, 1e-9, 2.4i, 'a', '\x7f', "foo" or `\m\n\o` } @@ -535,7 +534,15 @@ func (x *Ellipsis) End() token.Pos { } return x.Ellipsis + 3 // len("...") } -func (x *BasicLit) End() token.Pos { return token.Pos(int(x.ValuePos) + len(x.Value)) } +func (x *BasicLit) End() token.Pos { + if !x.ValueEnd.IsValid() { + // Not from parser; use a heuristic. + // (Incorrect for `...` containing \r\n; + // see https://go.dev/issue/76031.) + return token.Pos(int(x.ValuePos) + len(x.Value)) + } + return x.ValueEnd +} func (x *FuncLit) End() token.Pos { return x.Body.End() } func (x *CompositeLit) End() token.Pos { return x.Rbrace + 1 } func (x *ParenExpr) End() token.Pos { return x.Rparen + 1 } diff --git a/src/go/ast/commentmap_test.go b/src/go/ast/commentmap_test.go index f0faeed610..0d5e8de013 100644 --- a/src/go/ast/commentmap_test.go +++ b/src/go/ast/commentmap_test.go @@ -109,7 +109,7 @@ func TestCommentMap(t *testing.T) { } cmap := NewCommentMap(fset, f, f.Comments) - // very correct association of comments + // verify correct association of comments for n, list := range cmap { key := fmt.Sprintf("%2d: %T", fset.Position(n.Pos()).Line, n) got := ctext(list) diff --git a/src/go/ast/example_test.go b/src/go/ast/example_test.go index 31b32efece..36daa7e7e1 100644 --- a/src/go/ast/example_test.go +++ b/src/go/ast/example_test.go @@ -113,31 +113,32 @@ func main() { // 34 . . . . . . . Args: []ast.Expr (len = 1) { // 35 . . . . . . . . 0: *ast.BasicLit { // 36 . . . . . . . . . ValuePos: 4:10 - // 37 . . . . . . . . . Kind: STRING - // 38 . . . . . . . . . Value: "\"Hello, World!\"" - // 39 . . . . . . . . } - // 40 . . . . . . . } - // 41 . . . . . . . Ellipsis: - - // 42 . . . . . . . Rparen: 4:25 - // 43 . . . . . . } - // 44 . . . . . } - // 45 . . . . } - // 46 . . . . Rbrace: 5:1 - // 47 . . . } - // 48 . . } - // 49 . } - // 50 . FileStart: 1:1 - // 51 . FileEnd: 5:3 - // 52 . Scope: *ast.Scope { - // 53 . . Objects: map[string]*ast.Object (len = 1) { - // 54 . . . "main": *(obj @ 11) - // 55 . . } - // 56 . } - // 57 . Unresolved: []*ast.Ident (len = 1) { - // 58 . . 0: *(obj @ 29) - // 59 . } - // 60 . GoVersion: "" - // 61 } + // 37 . . . . . . . . . ValueEnd: 4:25 + // 38 . . . . . . . . . Kind: STRING + // 39 . . . . . . . . . Value: "\"Hello, World!\"" + // 40 . . . . . . . . } + // 41 . . . . . . . } + // 42 . . . . . . . Ellipsis: - + // 43 . . . . . . . Rparen: 4:25 + // 44 . . . . . . } + // 45 . . . . . } + // 46 . . . . } + // 47 . . . . Rbrace: 5:1 + // 48 . . . } + // 49 . . } + // 50 . } + // 51 . FileStart: 1:1 + // 52 . FileEnd: 5:3 + // 53 . Scope: *ast.Scope { + // 54 . . Objects: map[string]*ast.Object (len = 1) { + // 55 . . . "main": *(obj @ 11) + // 56 . . } + // 57 . } + // 58 . Unresolved: []*ast.Ident (len = 1) { + // 59 . . 0: *(obj @ 29) + // 60 . } + // 61 . GoVersion: "" + // 62 } } func ExamplePreorder() { diff --git a/src/go/ast/issues_test.go b/src/go/ast/issues_test.go index 28d6a30fbb..f5e26af462 100644 --- a/src/go/ast/issues_test.go +++ b/src/go/ast/issues_test.go @@ -30,10 +30,10 @@ func TestIssue33649(t *testing.T) { tf = f return true }) - tfEnd := tf.Base() + tf.Size() + tfEnd := tf.End() fd := f.Decls[len(f.Decls)-1].(*ast.FuncDecl) - fdEnd := int(fd.End()) + fdEnd := fd.End() if fdEnd != tfEnd { t.Errorf("%q: got fdEnd = %d; want %d (base = %d, size = %d)", src, fdEnd, tfEnd, tf.Base(), tf.Size()) diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go index 8db0b5e92e..1b6e32d07c 100644 --- a/src/go/build/deps_test.go +++ b/src/go/build/deps_test.go @@ -339,6 +339,7 @@ var depsRules = ` < internal/gover < go/version < go/token + < go/internal/scannerhooks < go/scanner < go/ast < go/internal/typeparams; @@ -737,6 +738,9 @@ var depsRules = ` testing < internal/testhash; + CRYPTO-MATH + < crypto/mlkem/mlkemtest; + CRYPTO-MATH, testing, internal/testenv, internal/testhash, encoding/json < crypto/internal/cryptotest; diff --git a/src/go/internal/scannerhooks/hooks.go b/src/go/internal/scannerhooks/hooks.go new file mode 100644 index 0000000000..057261df06 --- /dev/null +++ b/src/go/internal/scannerhooks/hooks.go @@ -0,0 +1,11 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scannerhooks defines nonexported channels between parser and scanner. +// Ideally this package could be eliminated by adding API to scanner. +package scannerhooks + +import "go/token" + +var StringEnd func(scanner any) token.Pos diff --git a/src/go/parser/interface.go b/src/go/parser/interface.go index a9a1cfb736..ec9cb469ed 100644 --- a/src/go/parser/interface.go +++ b/src/go/parser/interface.go @@ -120,7 +120,7 @@ func ParseFile(fset *token.FileSet, filename string, src any, mode Mode) (f *ast // Ensure the start/end are consistent, // whether parsing succeeded or not. f.FileStart = token.Pos(file.Base()) - f.FileEnd = token.Pos(file.Base() + file.Size()) + f.FileEnd = file.End() p.errors.Sort() err = p.errors.Err() diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go index e725371e76..e01a221968 100644 --- a/src/go/parser/parser.go +++ b/src/go/parser/parser.go @@ -28,6 +28,7 @@ import ( "fmt" "go/ast" "go/build/constraint" + "go/internal/scannerhooks" "go/scanner" "go/token" "strings" @@ -52,9 +53,10 @@ type parser struct { goVersion string // minimum Go version found in //go:build comment // Next token - pos token.Pos // token position - tok token.Token // one token look-ahead - lit string // token literal + pos token.Pos // token position + tok token.Token // one token look-ahead + lit string // token literal + stringEnd token.Pos // position immediately after token; STRING only // Error recovery // (used to limit the number of calls to parser.advance @@ -163,6 +165,10 @@ func (p *parser) next0() { continue } } else { + if p.tok == token.STRING { + p.stringEnd = scannerhooks.StringEnd(&p.scanner) + } + // Found a non-comment; top of file is over. p.top = false } @@ -720,7 +726,7 @@ func (p *parser) parseFieldDecl() *ast.Field { var tag *ast.BasicLit if p.tok == token.STRING { - tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit} + tag = &ast.BasicLit{ValuePos: p.pos, ValueEnd: p.stringEnd, Kind: p.tok, Value: p.lit} p.next() } @@ -1474,7 +1480,11 @@ func (p *parser) parseOperand() ast.Expr { return x case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING: - x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit} + end := p.pos + token.Pos(len(p.lit)) + if p.tok == token.STRING { + end = p.stringEnd + } + x := &ast.BasicLit{ValuePos: p.pos, ValueEnd: end, Kind: p.tok, Value: p.lit} p.next() return x @@ -2511,9 +2521,11 @@ func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) as } pos := p.pos + end := p.pos var path string if p.tok == token.STRING { path = p.lit + end = p.stringEnd p.next() } else if p.tok.IsLiteral() { p.error(pos, "import path must be a string") @@ -2528,7 +2540,7 @@ func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) as spec := &ast.ImportSpec{ Doc: doc, Name: ident, - Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path}, + Path: &ast.BasicLit{ValuePos: pos, ValueEnd: end, Kind: token.STRING, Value: path}, Comment: comment, } p.imports = append(p.imports, spec) diff --git a/src/go/parser/parser_test.go b/src/go/parser/parser_test.go index 87b7d7bbab..8118189230 100644 --- a/src/go/parser/parser_test.go +++ b/src/go/parser/parser_test.go @@ -946,3 +946,53 @@ func _() {} t.Errorf("unexpected doc comment %v", docComment2) } } + +// Tests of BasicLit.End() method, which in go1.26 started precisely +// recording the Value token's end position instead of heuristically +// computing it, which is inaccurate for strings containing "\r". +func TestBasicLit_End(t *testing.T) { + // lit is a raw string literal containing [a b c \r \n], + // denoting "abc\n", because the scanner normalizes \r\n to \n. + const stringlit = "`abc\r\n`" + + // The semicolons exercise the case in which the next token + // (a SEMICOLON implied by a \n) isn't immediate but follows + // some horizontal space. + const src = `package p + +import ` + stringlit + ` ; + +type _ struct{ x int ` + stringlit + ` } + +const _ = ` + stringlit + ` ; +` + + fset := token.NewFileSet() + f, _ := ParseFile(fset, "", src, ParseComments|SkipObjectResolution) + tokFile := fset.File(f.Pos()) + + count := 0 + ast.Inspect(f, func(n ast.Node) bool { + if lit, ok := n.(*ast.BasicLit); ok { + count++ + var ( + start = tokFile.Offset(lit.Pos()) + end = tokFile.Offset(lit.End()) + ) + + // Check BasicLit.Value. + if want := "`abc\n`"; lit.Value != want { + t.Errorf("%s: BasicLit.Value = %q, want %q", fset.Position(lit.Pos()), lit.Value, want) + } + + // Check source extent. + if got := src[start:end]; got != stringlit { + t.Errorf("%s: src[BasicLit.Pos:End] = %q, want %q", fset.Position(lit.Pos()), got, stringlit) + } + } + return true + }) + if count != 3 { + t.Errorf("found %d BasicLit, want 3", count) + } +} diff --git a/src/go/scanner/scanner.go b/src/go/scanner/scanner.go index cdbeb6323c..07d987c88f 100644 --- a/src/go/scanner/scanner.go +++ b/src/go/scanner/scanner.go @@ -10,6 +10,7 @@ package scanner import ( "bytes" "fmt" + "go/internal/scannerhooks" "go/token" "path/filepath" "strconv" @@ -41,11 +42,19 @@ type Scanner struct { lineOffset int // current line offset insertSemi bool // insert a semicolon before next newline nlPos token.Pos // position of newline in preceding comment + stringEnd token.Pos // end position; defined only for STRING tokens // public state - ok to modify ErrorCount int // number of errors encountered } +// Provide go/parser with backdoor access to the StringEnd information. +func init() { + scannerhooks.StringEnd = func(scanner any) token.Pos { + return scanner.(*Scanner).stringEnd + } +} + const ( bom = 0xFEFF // byte order mark, only permitted as very first character eof = -1 // end of file @@ -691,7 +700,7 @@ func stripCR(b []byte, comment bool) []byte { return c[:i] } -func (s *Scanner) scanRawString() string { +func (s *Scanner) scanRawString() (string, int) { // '`' opening already consumed offs := s.offset - 1 @@ -712,11 +721,12 @@ func (s *Scanner) scanRawString() string { } lit := s.src[offs:s.offset] + rawLen := len(lit) if hasCR { lit = stripCR(lit, false) } - return string(lit) + return string(lit), rawLen } func (s *Scanner) skipWhitespace() { @@ -850,6 +860,7 @@ scanAgain: insertSemi = true tok = token.STRING lit = s.scanString() + s.stringEnd = pos + token.Pos(len(lit)) case '\'': insertSemi = true tok = token.CHAR @@ -857,7 +868,9 @@ scanAgain: case '`': insertSemi = true tok = token.STRING - lit = s.scanRawString() + var rawLen int + lit, rawLen = s.scanRawString() + s.stringEnd = pos + token.Pos(rawLen) case ':': tok = s.switch2(token.COLON, token.DEFINE) case '.': diff --git a/src/go/token/position.go b/src/go/token/position.go index 39756f257d..37017d4374 100644 --- a/src/go/token/position.go +++ b/src/go/token/position.go @@ -127,6 +127,11 @@ func (f *File) Size() int { return f.size } +// End returns the end position of file f as registered with AddFile. +func (f *File) End() Pos { + return Pos(f.base + f.size) +} + // LineCount returns the number of lines in file f. func (f *File) LineCount() int { f.mutex.Lock() diff --git a/src/go/token/position_test.go b/src/go/token/position_test.go index c588a34d3d..3d02068ebf 100644 --- a/src/go/token/position_test.go +++ b/src/go/token/position_test.go @@ -572,7 +572,7 @@ func fsetString(fset *FileSet) string { buf.WriteRune('{') sep := "" fset.Iterate(func(f *File) bool { - fmt.Fprintf(&buf, "%s%s:%d-%d", sep, f.Name(), f.Base(), f.Base()+f.Size()) + fmt.Fprintf(&buf, "%s%s:%d-%d", sep, f.Name(), f.Base(), f.End()) sep = " " return true }) @@ -643,3 +643,11 @@ func TestRemovedFileFileReturnsNil(t *testing.T) { } } } + +func TestFile_End(t *testing.T) { + f := NewFileSet().AddFile("a.go", 100, 42) + got := fmt.Sprintf("%d, %d", f.Base(), f.End()) + if want := "100, 142"; got != want { + t.Errorf("Base, End = %s, want %s", got, want) + } +} diff --git a/src/go/token/tree.go b/src/go/token/tree.go index 7ba927c606..b2ca09f2c0 100644 --- a/src/go/token/tree.go +++ b/src/go/token/tree.go @@ -313,8 +313,8 @@ func (t *tree) add(file *File) { } if prev := (*pos).file; prev != file { panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", - prev.Name(), prev.Base(), prev.Base()+prev.Size(), - file.Name(), file.Base(), file.Base()+file.Size())) + prev.Name(), prev.Base(), prev.End(), + file.Name(), file.Base(), file.End())) } } diff --git a/src/go/types/check.go b/src/go/types/check.go index 44d3ae5586..638b1f6fcc 100644 --- a/src/go/types/check.go +++ b/src/go/types/check.go @@ -191,12 +191,13 @@ type Checker struct { usedPkgNames map[*PkgName]bool // set of used package names mono monoGraph // graph for detecting non-monomorphizable instantiation loops - firstErr error // first error encountered - methods map[*TypeName][]*Func // maps package scope type names to associated non-blank (non-interface) methods - untyped map[ast.Expr]exprInfo // map of expressions without final type - delayed []action // stack of delayed action segments; segments are processed in FIFO order - objPath []Object // path of object dependencies during type inference (for cycle reporting) - cleaners []cleaner // list of types that may need a final cleanup at the end of type-checking + firstErr error // first error encountered + methods map[*TypeName][]*Func // maps package scope type names to associated non-blank (non-interface) methods + untyped map[ast.Expr]exprInfo // map of expressions without final type + delayed []action // stack of delayed action segments; segments are processed in FIFO order + objPath []Object // path of object dependencies during type-checking (for cycle reporting) + objPathIdx map[Object]int // map of object to object path index during type-checking (for cycle reporting) + cleaners []cleaner // list of types that may need a final cleanup at the end of type-checking // environment within which the current object is type-checked (valid only // for the duration of type-checking a specific object) @@ -268,19 +269,22 @@ func (check *Checker) later(f func()) *action { return &check.delayed[i] } -// push pushes obj onto the object path and returns its index in the path. -func (check *Checker) push(obj Object) int { +// push pushes obj onto the object path and records its index in the path index map. +func (check *Checker) push(obj Object) { + if check.objPathIdx == nil { + check.objPathIdx = make(map[Object]int) + } + check.objPathIdx[obj] = len(check.objPath) check.objPath = append(check.objPath, obj) - return len(check.objPath) - 1 } -// pop pops and returns the topmost object from the object path. -func (check *Checker) pop() Object { +// pop pops an object from the object path and removes it from the path index map. +func (check *Checker) pop() { i := len(check.objPath) - 1 obj := check.objPath[i] - check.objPath[i] = nil + check.objPath[i] = nil // help the garbage collector check.objPath = check.objPath[:i] - return obj + delete(check.objPathIdx, obj) } type cleaner interface { @@ -343,6 +347,7 @@ func (check *Checker) initFiles(files []*ast.File) { check.untyped = nil check.delayed = nil check.objPath = nil + check.objPathIdx = nil check.cleaners = nil // We must initialize usedVars and usedPkgNames both here and in NewChecker, diff --git a/src/go/types/cycles.go b/src/go/types/cycles.go index 87e8e9729b..bd894258b1 100644 --- a/src/go/types/cycles.go +++ b/src/go/types/cycles.go @@ -1,3 +1,6 @@ +// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT. +// Source: ../../cmd/compile/internal/types2/cycles.go + // Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -54,7 +57,6 @@ func (check *Checker) directCycle(tname *TypeName, pathIdx map[*TypeName]int) { // tname is marked grey - we have a cycle on the path beginning at start. // Mark tname as invalid. tname.setType(Typ[Invalid]) - tname.setColor(black) // collect type names on cycle var cycle []Object diff --git a/src/go/types/decl.go b/src/go/types/decl.go index 2dab5cf7b9..fffcb590e6 100644 --- a/src/go/types/decl.go +++ b/src/go/types/decl.go @@ -63,114 +63,77 @@ func (check *Checker) objDecl(obj Object, def *TypeName) { if check.indent == 0 { fmt.Println() // empty line between top-level objects for readability } - check.trace(obj.Pos(), "-- checking %s (%s, objPath = %s)", obj, obj.color(), pathString(check.objPath)) + check.trace(obj.Pos(), "-- checking %s (objPath = %s)", obj, pathString(check.objPath)) check.indent++ defer func() { check.indent-- - check.trace(obj.Pos(), "=> %s (%s)", obj, obj.color()) + check.trace(obj.Pos(), "=> %s", obj) }() } - // Checking the declaration of obj means inferring its type - // (and possibly its value, for constants). - // An object's type (and thus the object) may be in one of - // three states which are expressed by colors: + // Checking the declaration of an object means determining its type + // (and also its value for constants). An object (and thus its type) + // may be in 1 of 3 states: // - // - an object whose type is not yet known is painted white (initial color) - // - an object whose type is in the process of being inferred is painted grey - // - an object whose type is fully inferred is painted black + // - not in Checker.objPathIdx and type == nil : type is not yet known (white) + // - in Checker.objPathIdx : type is pending (grey) + // - not in Checker.objPathIdx and type != nil : type is known (black) // - // During type inference, an object's color changes from white to grey - // to black (pre-declared objects are painted black from the start). - // A black object (i.e., its type) can only depend on (refer to) other black - // ones. White and grey objects may depend on white and black objects. - // A dependency on a grey object indicates a cycle which may or may not be - // valid. + // During type-checking, an object changes from white to grey to black. + // Predeclared objects start as black (their type is known without checking). // - // When objects turn grey, they are pushed on the object path (a stack); - // they are popped again when they turn black. Thus, if a grey object (a - // cycle) is encountered, it is on the object path, and all the objects - // it depends on are the remaining objects on that path. Color encoding - // is such that the color value of a grey object indicates the index of - // that object in the object path. - - // During type-checking, white objects may be assigned a type without - // traversing through objDecl; e.g., when initializing constants and - // variables. Update the colors of those objects here (rather than - // everywhere where we set the type) to satisfy the color invariants. - if obj.color() == white && obj.Type() != nil { - obj.setColor(black) - return - } - - switch obj.color() { - case white: - assert(obj.Type() == nil) - // All color values other than white and black are considered grey. - // Because black and white are < grey, all values >= grey are grey. - // Use those values to encode the object's index into the object path. - obj.setColor(grey + color(check.push(obj))) - defer func() { - check.pop().setColor(black) - }() - - case black: - assert(obj.Type() != nil) - return - - default: - // Color values other than white or black are considered grey. - fallthrough + // A black object may only depend on (refer to) to other black objects. White + // and grey objects may depend on white or black objects. A dependency on a + // grey object indicates a (possibly invalid) cycle. + // + // When an object is marked grey, it is pushed onto the object path (a stack) + // and its index in the path is recorded in the path index map. It is popped + // and removed from the map when its type is determined (and marked black). - case grey: - // We have a (possibly invalid) cycle. - // In the existing code, this is marked by a non-nil type - // for the object except for constants and variables whose - // type may be non-nil (known), or nil if it depends on the - // not-yet known initialization value. - // In the former case, set the type to Typ[Invalid] because - // we have an initialization cycle. The cycle error will be - // reported later, when determining initialization order. - // TODO(gri) Report cycle here and simplify initialization - // order code. + // If this object is grey, we have a (possibly invalid) cycle. This is signaled + // by a non-nil type for the object, except for constants and variables whose + // type may be non-nil (known), or nil if it depends on a not-yet known + // initialization value. + // + // In the former case, set the type to Typ[Invalid] because we have an + // initialization cycle. The cycle error will be reported later, when + // determining initialization order. + // + // TODO(gri) Report cycle here and simplify initialization order code. + if _, ok := check.objPathIdx[obj]; ok { switch obj := obj.(type) { - case *Const: - if !check.validCycle(obj) || obj.typ == nil { - obj.typ = Typ[Invalid] - } - - case *Var: - if !check.validCycle(obj) || obj.typ == nil { - obj.typ = Typ[Invalid] + case *Const, *Var: + if !check.validCycle(obj) || obj.Type() == nil { + obj.setType(Typ[Invalid]) } - case *TypeName: if !check.validCycle(obj) { - // break cycle - // (without this, calling underlying() - // below may lead to an endless loop - // if we have a cycle for a defined - // (*Named) type) - obj.typ = Typ[Invalid] + obj.setType(Typ[Invalid]) } - case *Func: if !check.validCycle(obj) { - // Don't set obj.typ to Typ[Invalid] here - // because plenty of code type-asserts that - // functions have a *Signature type. Grey - // functions have their type set to an empty - // signature which makes it impossible to + // Don't set type to Typ[Invalid]; plenty of code asserts that + // functions have a *Signature type. Instead, leave the type + // as an empty signature, which makes it impossible to // initialize a variable with the function. } - default: panic("unreachable") } + assert(obj.Type() != nil) return } + if obj.Type() != nil { // black, meaning it's already type-checked + return + } + + // white, meaning it must be type-checked + + check.push(obj) // mark as grey + defer check.pop() + d := check.objMap[obj] if d == nil { check.dump("%v: %s should have been declared", obj.Pos(), obj) @@ -222,8 +185,8 @@ func (check *Checker) validCycle(obj Object) (valid bool) { } // Count cycle objects. - assert(obj.color() >= grey) - start := obj.color() - grey // index of obj in objPath + start, found := check.objPathIdx[obj] + assert(found) cycle := check.objPath[start:] tparCycle := false // if set, the cycle is through a type parameter list nval := 0 // number of (constant or variable) values in the cycle @@ -607,11 +570,16 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *TypeName check.collectTypeParams(&alias.tparams, tdecl.TypeParams) } - rhs = check.definedType(tdecl.Type, obj) + rhs = check.declaredType(tdecl.Type, obj) assert(rhs != nil) - alias.fromRHS = rhs - unalias(alias) // populate alias.actual + + // spec: In an alias declaration the given type cannot be a type parameter declared in the same declaration." + // (see also go.dev/issue/75884, go.dev/issue/#75885) + if tpar, ok := rhs.(*TypeParam); ok && alias.tparams != nil && slices.Index(alias.tparams.list(), tpar) >= 0 { + check.error(tdecl.Type, MisplacedTypeParam, "cannot use type parameter declared in alias declaration as RHS") + alias.fromRHS = Typ[Invalid] + } } else { // With Go1.23, the default behavior is to use Alias nodes, // reflected by check.enableAlias. Signal non-default behavior. @@ -658,7 +626,7 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *TypeName check.collectTypeParams(&named.tparams, tdecl.TypeParams) } - rhs = check.definedType(tdecl.Type, obj) + rhs = check.declaredType(tdecl.Type, obj) assert(rhs != nil) named.fromRHS = rhs @@ -857,17 +825,8 @@ func (check *Checker) funcDecl(obj *Func, decl *declInfo) { sig := new(Signature) obj.typ = sig // guard against cycles - // Avoid cycle error when referring to method while type-checking the signature. - // This avoids a nuisance in the best case (non-parameterized receiver type) and - // since the method is not a type, we get an error. If we have a parameterized - // receiver type, instantiating the receiver type leads to the instantiation of - // its methods, and we don't want a cycle error in that case. - // TODO(gri) review if this is correct and/or whether we still need this? - saved := obj.color_ - obj.color_ = black fdecl := decl.fdecl check.funcType(sig, fdecl.Recv, fdecl.Type) - obj.color_ = saved // Set the scope's extent to the complete "func (...) { ... }" // so that Scope.Innermost works correctly. @@ -980,10 +939,9 @@ func (check *Checker) declStmt(d ast.Decl) { // the innermost containing block." scopePos := d.spec.Name.Pos() check.declare(check.scope, d.spec.Name, obj, scopePos) - // mark and unmark type before calling typeDecl; its type is still nil (see Checker.objDecl) - obj.setColor(grey + color(check.push(obj))) + check.push(obj) // mark as grey check.typeDecl(obj, d.spec, nil) - check.pop().setColor(black) + check.pop() default: check.errorf(d.node(), InvalidSyntaxTree, "unknown ast.Decl node %T", d.node()) } diff --git a/src/go/types/generate_test.go b/src/go/types/generate_test.go index e5e0874d17..2fa72c28bf 100644 --- a/src/go/types/generate_test.go +++ b/src/go/types/generate_test.go @@ -126,6 +126,11 @@ var filemap = map[string]action{ "context.go": nil, "context_test.go": nil, "conversions.go": nil, + "cycles.go": func(f *ast.File) { + renameImportPath(f, `"cmd/compile/internal/syntax"->"go/ast"`) + renameSelectorExprs(f, "syntax.Name->ast.Ident", "rhs.Value->rhs.Name") + renameSelectors(f, "Trace->_Trace") + }, "errors_test.go": func(f *ast.File) { renameIdents(f, "nopos->noposn") }, "errsupport.go": nil, "gccgosizes.go": nil, @@ -145,7 +150,7 @@ var filemap = map[string]action{ renameIdents(f, "syntax->ast") renameSelectors(f, "ElemList->Elts") }, - "lookup.go": func(f *ast.File) { fixTokenPos(f) }, + "lookup.go": fixTokenPos, "main_test.go": nil, "map.go": nil, "mono.go": func(f *ast.File) { diff --git a/src/go/types/object.go b/src/go/types/object.go index 7bf705cb81..57158c1595 100644 --- a/src/go/types/object.go +++ b/src/go/types/object.go @@ -45,18 +45,12 @@ type Object interface { // 0 for all other objects (including objects in file scopes). order() uint32 - // color returns the object's color. - color() color - // setType sets the type of the object. setType(Type) // setOrder sets the order number of the object. It must be > 0. setOrder(uint32) - // setColor sets the object's color. It must not be white. - setColor(color color) - // setParent sets the parent scope of the object. setParent(*Scope) @@ -105,41 +99,9 @@ type object struct { name string typ Type order_ uint32 - color_ color scopePos_ token.Pos } -// color encodes the color of an object (see Checker.objDecl for details). -type color uint32 - -// An object may be painted in one of three colors. -// Color values other than white or black are considered grey. -const ( - white color = iota - black - grey // must be > white and black -) - -func (c color) String() string { - switch c { - case white: - return "white" - case black: - return "black" - default: - return "grey" - } -} - -// colorFor returns the (initial) color for an object depending on -// whether its type t is known or not. -func colorFor(t Type) color { - if t != nil { - return black - } - return white -} - // Parent returns the scope in which the object is declared. // The result is nil for methods and struct fields. func (obj *object) Parent() *Scope { return obj.parent } @@ -167,13 +129,11 @@ func (obj *object) Id() string { return Id(obj.pkg, obj.name) } func (obj *object) String() string { panic("abstract") } func (obj *object) order() uint32 { return obj.order_ } -func (obj *object) color() color { return obj.color_ } func (obj *object) scopePos() token.Pos { return obj.scopePos_ } func (obj *object) setParent(parent *Scope) { obj.parent = parent } func (obj *object) setType(typ Type) { obj.typ = typ } func (obj *object) setOrder(order uint32) { assert(order > 0); obj.order_ = order } -func (obj *object) setColor(color color) { assert(color != white); obj.color_ = color } func (obj *object) setScopePos(pos token.Pos) { obj.scopePos_ = pos } func (obj *object) sameId(pkg *Package, name string, foldCase bool) bool { @@ -250,7 +210,7 @@ type PkgName struct { // NewPkgName returns a new PkgName object representing an imported package. // The remaining arguments set the attributes found with all Objects. func NewPkgName(pos token.Pos, pkg *Package, name string, imported *Package) *PkgName { - return &PkgName{object{nil, pos, pkg, name, Typ[Invalid], 0, black, nopos}, imported} + return &PkgName{object{nil, pos, pkg, name, Typ[Invalid], 0, nopos}, imported} } // Imported returns the package that was imported. @@ -266,7 +226,7 @@ type Const struct { // NewConst returns a new constant with value val. // The remaining arguments set the attributes found with all Objects. func NewConst(pos token.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const { - return &Const{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, val} + return &Const{object{nil, pos, pkg, name, typ, 0, nopos}, val} } // Val returns the constant's value. @@ -291,7 +251,7 @@ type TypeName struct { // argument for NewNamed, which will set the TypeName's type as a side- // effect. func NewTypeName(pos token.Pos, pkg *Package, name string, typ Type) *TypeName { - return &TypeName{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}} + return &TypeName{object{nil, pos, pkg, name, typ, 0, nopos}} } // NewTypeNameLazy returns a new defined type like NewTypeName, but it @@ -405,7 +365,7 @@ func NewField(pos token.Pos, pkg *Package, name string, typ Type, embedded bool) // newVar returns a new variable. // The arguments set the attributes found with all Objects. func newVar(kind VarKind, pos token.Pos, pkg *Package, name string, typ Type) *Var { - return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, kind: kind} + return &Var{object: object{nil, pos, pkg, name, typ, 0, nopos}, kind: kind} } // Anonymous reports whether the variable is an embedded field. @@ -455,7 +415,7 @@ func NewFunc(pos token.Pos, pkg *Package, name string, sig *Signature) *Func { // as this would violate object.{Type,color} invariants. // TODO(adonovan): propose to disallow NewFunc with nil *Signature. } - return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, false, nil} + return &Func{object{nil, pos, pkg, name, typ, 0, nopos}, false, nil} } // Signature returns the signature (type) of the function or method. @@ -537,7 +497,7 @@ type Label struct { // NewLabel returns a new label. func NewLabel(pos token.Pos, pkg *Package, name string) *Label { - return &Label{object{pos: pos, pkg: pkg, name: name, typ: Typ[Invalid], color_: black}, false} + return &Label{object{pos: pos, pkg: pkg, name: name, typ: Typ[Invalid]}, false} } // A Builtin represents a built-in function. @@ -548,7 +508,7 @@ type Builtin struct { } func newBuiltin(id builtinId) *Builtin { - return &Builtin{object{name: predeclaredFuncs[id].name, typ: Typ[Invalid], color_: black}, id} + return &Builtin{object{name: predeclaredFuncs[id].name, typ: Typ[Invalid]}, id} } // Nil represents the predeclared value nil. diff --git a/src/go/types/resolver.go b/src/go/types/resolver.go index a8d11c2aa5..2017b9c881 100644 --- a/src/go/types/resolver.go +++ b/src/go/types/resolver.go @@ -247,7 +247,7 @@ func (check *Checker) collectObjects() { // Be conservative and use the *ast.File extent if we don't have a *token.File. pos, end := file.Pos(), file.End() if f := check.fset.File(file.Pos()); f != nil { - pos, end = token.Pos(f.Base()), token.Pos(f.Base()+f.Size()) + pos, end = token.Pos(f.Base()), f.End() } fileScope := NewScope(pkg.scope, pos, end, check.filename(fileNo)) fileScopes[fileNo] = fileScope diff --git a/src/go/types/scope.go b/src/go/types/scope.go index 81366df741..e44b097dc5 100644 --- a/src/go/types/scope.go +++ b/src/go/types/scope.go @@ -220,10 +220,8 @@ func (*lazyObject) Exported() bool { panic("unreachable") } func (*lazyObject) Id() string { panic("unreachable") } func (*lazyObject) String() string { panic("unreachable") } func (*lazyObject) order() uint32 { panic("unreachable") } -func (*lazyObject) color() color { panic("unreachable") } func (*lazyObject) setType(Type) { panic("unreachable") } func (*lazyObject) setOrder(uint32) { panic("unreachable") } -func (*lazyObject) setColor(color color) { panic("unreachable") } func (*lazyObject) setParent(*Scope) { panic("unreachable") } func (*lazyObject) sameId(*Package, string, bool) bool { panic("unreachable") } func (*lazyObject) scopePos() token.Pos { panic("unreachable") } diff --git a/src/go/types/sizeof_test.go b/src/go/types/sizeof_test.go index 4ff255ffa0..694ab32462 100644 --- a/src/go/types/sizeof_test.go +++ b/src/go/types/sizeof_test.go @@ -35,14 +35,14 @@ func TestSizeof(t *testing.T) { {term{}, 12, 24}, // Objects - {PkgName{}, 44, 80}, - {Const{}, 48, 88}, - {TypeName{}, 40, 72}, - {Var{}, 48, 88}, - {Func{}, 48, 88}, - {Label{}, 44, 80}, - {Builtin{}, 44, 80}, - {Nil{}, 40, 72}, + {PkgName{}, 40, 80}, + {Const{}, 44, 88}, + {TypeName{}, 36, 72}, + {Var{}, 44, 88}, + {Func{}, 44, 88}, + {Label{}, 40, 80}, + {Builtin{}, 40, 80}, + {Nil{}, 36, 72}, // Misc {Scope{}, 44, 88}, diff --git a/src/go/types/typexpr.go b/src/go/types/typexpr.go index 88ec4b77fc..b44fe4d768 100644 --- a/src/go/types/typexpr.go +++ b/src/go/types/typexpr.go @@ -16,7 +16,7 @@ import ( // ident type-checks identifier e and initializes x with the value or type of e. // If an error occurred, x.mode is set to invalid. -// For the meaning of def, see Checker.definedType, below. +// For the meaning of def, see Checker.declaredType, below. // If wantType is set, the identifier e is expected to denote a type. func (check *Checker) ident(x *operand, e *ast.Ident, def *TypeName, wantType bool) { x.mode = invalid @@ -148,14 +148,14 @@ func (check *Checker) ident(x *operand, e *ast.Ident, def *TypeName, wantType bo // typ type-checks the type expression e and returns its type, or Typ[Invalid]. // The type must not be an (uninstantiated) generic type. func (check *Checker) typ(e ast.Expr) Type { - return check.definedType(e, nil) + return check.declaredType(e, nil) } // varType type-checks the type expression e and returns its type, or Typ[Invalid]. // The type must not be an (uninstantiated) generic type and it must not be a // constraint interface. func (check *Checker) varType(e ast.Expr) Type { - typ := check.definedType(e, nil) + typ := check.declaredType(e, nil) check.validVarType(e, typ) return typ } @@ -185,11 +185,11 @@ func (check *Checker) validVarType(e ast.Expr, typ Type) { }).describef(e, "check var type %s", typ) } -// definedType is like typ but also accepts a type name def. -// If def != nil, e is the type specification for the type named def, declared -// in a type declaration, and def.typ.underlying will be set to the type of e -// before any components of e are type-checked. -func (check *Checker) definedType(e ast.Expr, def *TypeName) Type { +// declaredType is like typ but also accepts a type name def. +// If def != nil, e is the type specification for the [Alias] or [Named] type +// named def, and def.typ.fromRHS will be set to the [Type] of e immediately +// after its creation. +func (check *Checker) declaredType(e ast.Expr, def *TypeName) Type { typ := check.typInternal(e, def) assert(isTyped(typ)) if isGeneric(typ) { @@ -228,7 +228,7 @@ func goTypeName(typ Type) string { } // typInternal drives type checking of types. -// Must only be called by definedType or genericType. +// Must only be called by declaredType or genericType. func (check *Checker) typInternal(e0 ast.Expr, def *TypeName) (T Type) { if check.conf._Trace { check.trace(e0.Pos(), "-- type %s", e0) @@ -295,7 +295,7 @@ func (check *Checker) typInternal(e0 ast.Expr, def *TypeName) (T Type) { case *ast.ParenExpr: // Generic types must be instantiated before they can be used in any form. // Consequently, generic types cannot be parenthesized. - return check.definedType(e.X, def) + return check.declaredType(e.X, def) case *ast.ArrayType: if e.Len == nil { diff --git a/src/go/types/universe.go b/src/go/types/universe.go index 70935dc35f..8d2b99cf17 100644 --- a/src/go/types/universe.go +++ b/src/go/types/universe.go @@ -101,7 +101,6 @@ func defPredeclaredTypes() { // interface. { universeAnyNoAlias = NewTypeName(nopos, nil, "any", &Interface{complete: true, tset: &topTypeSet}) - universeAnyNoAlias.setColor(black) // ensure that the any TypeName reports a consistent Parent, after // hijacking Universe.Lookup with gotypesalias=0. universeAnyNoAlias.setParent(Universe) @@ -110,7 +109,6 @@ func defPredeclaredTypes() { // into the Universe, but we lean toward the future and insert the Alias // representation. universeAnyAlias = NewTypeName(nopos, nil, "any", nil) - universeAnyAlias.setColor(black) _ = NewAlias(universeAnyAlias, universeAnyNoAlias.Type().Underlying()) // Link TypeName and Alias def(universeAnyAlias) } @@ -118,7 +116,6 @@ func defPredeclaredTypes() { // type error interface{ Error() string } { obj := NewTypeName(nopos, nil, "error", nil) - obj.setColor(black) typ := (*Checker)(nil).newNamed(obj, nil, nil) // error.Error() string @@ -139,7 +136,6 @@ func defPredeclaredTypes() { // type comparable interface{} // marked as comparable { obj := NewTypeName(nopos, nil, "comparable", nil) - obj.setColor(black) typ := (*Checker)(nil).newNamed(obj, nil, nil) // interface{} // marked as comparable @@ -168,7 +164,7 @@ func defPredeclaredConsts() { } func defPredeclaredNil() { - def(&Nil{object{name: "nil", typ: Typ[UntypedNil], color_: black}}) + def(&Nil{object{name: "nil", typ: Typ[UntypedNil]}}) } // A builtinId is the id of a builtin function. @@ -292,7 +288,7 @@ func init() { // a scope. Objects with exported names are inserted in the unsafe package // scope; other objects are inserted in the universe scope. func def(obj Object) { - assert(obj.color() == black) + assert(obj.Type() != nil) name := obj.Name() if strings.Contains(name, " ") { return // nothing to do diff --git a/src/go/version/version.go b/src/go/version/version.go index 6b8ee67442..cfd2e9ba36 100644 --- a/src/go/version/version.go +++ b/src/go/version/version.go @@ -4,7 +4,7 @@ // Package version provides operations on [Go versions] // in [Go toolchain name syntax]: strings like -// "go1.20", "go1.21.0", "go1.22rc2", and "go1.23.4-bigcorp". +// "go1.20", "go1.21.0", "go1.22rc2", and "go1.23.4-custom". // // [Go versions]: https://go.dev/doc/toolchain#version // [Go toolchain name syntax]: https://go.dev/doc/toolchain#name @@ -15,10 +15,10 @@ import ( "strings" ) -// stripGo converts from a "go1.21-bigcorp" version to a "1.21" version. +// stripGo converts from a "go1.21-custom" version to a "1.21" version. // If v does not start with "go", stripGo returns the empty string (a known invalid version). func stripGo(v string) string { - v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix. + v, _, _ = strings.Cut(v, "-") // strip -custom suffix. if len(v) < 2 || v[:2] != "go" { return "" } diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go index 7f44a9de56..243b787cfc 100644 --- a/src/internal/abi/type.go +++ b/src/internal/abi/type.go @@ -121,8 +121,8 @@ const ( TFlagGCMaskOnDemand TFlag = 1 << 4 // TFlagDirectIface means that a value of this type is stored directly - // in the data field of an interface, instead of indirectly. Normally - // this means the type is pointer-ish. + // in the data field of an interface, instead of indirectly. + // This flag is just a cached computation of Size_ == PtrBytes == goarch.PtrSize. TFlagDirectIface TFlag = 1 << 5 // Leaving this breadcrumb behind for dlv. It should not be used, and no diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index 9dcac00881..da6aac9147 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -84,7 +84,6 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { SIMD: goarch == "amd64", // TODO remove this (default to false) when dev.simd is merged Dwarf5: dwarf5Supported, RandomizedHeapBase64: true, - RuntimeFree: true, SizeSpecializedMalloc: true, GreenTeaGC: true, } diff --git a/src/internal/goarch/goarch_riscv64.go b/src/internal/goarch/goarch_riscv64.go index 3b6da1e02f..468f9a6374 100644 --- a/src/internal/goarch/goarch_riscv64.go +++ b/src/internal/goarch/goarch_riscv64.go @@ -7,7 +7,7 @@ package goarch const ( _ArchFamily = RISCV64 _DefaultPhysPageSize = 4096 - _PCQuantum = 4 + _PCQuantum = 2 _MinFrameSize = 8 _StackAlign = PtrSize ) diff --git a/src/internal/goexperiment/exp_runtimefree_off.go b/src/internal/goexperiment/exp_runtimefree_off.go deleted file mode 100644 index 3affe434f2..0000000000 --- a/src/internal/goexperiment/exp_runtimefree_off.go +++ /dev/null @@ -1,8 +0,0 @@ -// Code generated by mkconsts.go. DO NOT EDIT. - -//go:build !goexperiment.runtimefree - -package goexperiment - -const RuntimeFree = false -const RuntimeFreeInt = 0 diff --git a/src/internal/goexperiment/exp_runtimefree_on.go b/src/internal/goexperiment/exp_runtimefree_on.go deleted file mode 100644 index 176278b542..0000000000 --- a/src/internal/goexperiment/exp_runtimefree_on.go +++ /dev/null @@ -1,8 +0,0 @@ -// Code generated by mkconsts.go. DO NOT EDIT. - -//go:build goexperiment.runtimefree - -package goexperiment - -const RuntimeFree = true -const RuntimeFreeInt = 1 diff --git a/src/internal/goexperiment/exp_runtimefreegc_off.go b/src/internal/goexperiment/exp_runtimefreegc_off.go new file mode 100644 index 0000000000..195f031b0b --- /dev/null +++ b/src/internal/goexperiment/exp_runtimefreegc_off.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build !goexperiment.runtimefreegc + +package goexperiment + +const RuntimeFreegc = false +const RuntimeFreegcInt = 0 diff --git a/src/internal/goexperiment/exp_runtimefreegc_on.go b/src/internal/goexperiment/exp_runtimefreegc_on.go new file mode 100644 index 0000000000..2afe0558ec --- /dev/null +++ b/src/internal/goexperiment/exp_runtimefreegc_on.go @@ -0,0 +1,8 @@ +// Code generated by mkconsts.go. DO NOT EDIT. + +//go:build goexperiment.runtimefreegc + +package goexperiment + +const RuntimeFreegc = true +const RuntimeFreegcInt = 1 diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index da6a6b53ad..2e14d4298a 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -113,8 +113,8 @@ type Flags struct { // platforms. RandomizedHeapBase64 bool - // RuntimeFree enables the runtime to free and reuse memory more eagerly in some circumstances with compiler help. - RuntimeFree bool + // RuntimeFreegc enables the runtime to free and reuse memory more eagerly in some circumstances with compiler help. + RuntimeFreegc bool // SizeSpecializedMalloc enables malloc implementations that are specialized per size class. SizeSpecializedMalloc bool diff --git a/src/internal/pkgbits/pkgbits_test.go b/src/internal/pkgbits/pkgbits_test.go index a4755bd35a..f67267189f 100644 --- a/src/internal/pkgbits/pkgbits_test.go +++ b/src/internal/pkgbits/pkgbits_test.go @@ -28,7 +28,7 @@ func TestRoundTrip(t *testing.T) { r := pr.NewDecoder(pkgbits.SectionMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) if r.Version() != w.Version() { - t.Errorf("Expected reader version %q to be the writer version %q", r.Version(), w.Version()) + t.Errorf("Expected reader version %d to be the writer version %d", r.Version(), w.Version()) } } } diff --git a/src/internal/runtime/cgobench/bench_test.go b/src/internal/runtime/cgobench/bench_test.go index 3b8f9a8ca3..0348ee0f41 100644 --- a/src/internal/runtime/cgobench/bench_test.go +++ b/src/internal/runtime/cgobench/bench_test.go @@ -11,13 +11,13 @@ import ( "testing" ) -func BenchmarkCgoCall(b *testing.B) { +func BenchmarkCall(b *testing.B) { for b.Loop() { cgobench.Empty() } } -func BenchmarkCgoCallParallel(b *testing.B) { +func BenchmarkCallParallel(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { cgobench.Empty() @@ -25,16 +25,30 @@ func BenchmarkCgoCallParallel(b *testing.B) { }) } +func BenchmarkCgoCall(b *testing.B) { + for b.Loop() { + cgobench.EmptyC() + } +} + +func BenchmarkCgoCallParallel(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + cgobench.EmptyC() + } + }) +} + func BenchmarkCgoCallWithCallback(b *testing.B) { for b.Loop() { - cgobench.Callback() + cgobench.CallbackC() } } func BenchmarkCgoCallParallelWithCallback(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { - cgobench.Callback() + cgobench.CallbackC() } }) } diff --git a/src/internal/runtime/cgobench/funcs.go b/src/internal/runtime/cgobench/funcs.go index 91efa51278..b60f6f58fd 100644 --- a/src/internal/runtime/cgobench/funcs.go +++ b/src/internal/runtime/cgobench/funcs.go @@ -19,14 +19,18 @@ static void callback() { */ import "C" -func Empty() { +func EmptyC() { C.empty() } -func Callback() { +func CallbackC() { C.callback() } //export go_empty_callback func go_empty_callback() { } + +//go:noinline +func Empty() { +} diff --git a/src/internal/runtime/maps/table.go b/src/internal/runtime/maps/table.go index fbce099655..8a1932e453 100644 --- a/src/internal/runtime/maps/table.go +++ b/src/internal/runtime/maps/table.go @@ -596,7 +596,7 @@ func (t *table) tombstones() uint16 { return (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - t.growthLeft } -// Clear deletes all entries from the map resulting in an empty map. +// Clear deletes all entries from the table resulting in an empty table. func (t *table) Clear(typ *abi.MapType) { mgl := t.maxGrowthLeft() if t.used == 0 && t.growthLeft == mgl { // no current entries and no tombstones diff --git a/src/internal/trace/traceviewer/format/format.go b/src/internal/trace/traceviewer/format/format.go index 83f3276704..2ec4dd4bdc 100644 --- a/src/internal/trace/traceviewer/format/format.go +++ b/src/internal/trace/traceviewer/format/format.go @@ -74,6 +74,7 @@ type ThreadCountersArg struct { InSyscall int64 } -type ThreadIDArg struct { - ThreadID uint64 +type SchedCtxArg struct { + ThreadID uint64 `json:"thread,omitempty"` + ProcID uint64 `json:"proc,omitempty"` } diff --git a/src/internal/types/testdata/fixedbugs/issue75885.go b/src/internal/types/testdata/fixedbugs/issue75885.go new file mode 100644 index 0000000000..f0cf4a65ed --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue75885.go @@ -0,0 +1,15 @@ +// -gotypesalias=1 + +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A[P any] = P // ERROR "cannot use type parameter declared in alias declaration as RHS" + +func _[P any]() { + type A[P any] = P // ERROR "cannot use type parameter declared in alias declaration as RHS" + type B = P + type C[Q any] = P +} diff --git a/src/internal/types/testdata/fixedbugs/issue76366.go b/src/internal/types/testdata/fixedbugs/issue76366.go new file mode 100644 index 0000000000..b78aa4463f --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue76366.go @@ -0,0 +1,12 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func _() { + type ( + A = int + B = []A + ) +} diff --git a/src/math/arith_s390x.go b/src/math/arith_s390x.go index 129156a9f6..2fda82fff4 100644 --- a/src/math/arith_s390x.go +++ b/src/math/arith_s390x.go @@ -129,7 +129,7 @@ func archExpm1(x float64) float64 func expm1TrampolineSetup(x float64) float64 func expm1Asm(x float64) float64 -const haveArchPow = true +const haveArchPow = false func archPow(x, y float64) float64 func powTrampolineSetup(x, y float64) float64 diff --git a/src/net/http/cgi/child.go b/src/net/http/cgi/child.go index e29fe20d7d..466d42c08e 100644 --- a/src/net/http/cgi/child.go +++ b/src/net/http/cgi/child.go @@ -57,8 +57,11 @@ func RequestFromMap(params map[string]string) (*http.Request, error) { r.Proto = params["SERVER_PROTOCOL"] var ok bool - r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto) - if !ok { + if r.Proto == "INCLUDED" { + // SSI (Server Side Include) use case + // CGI Specification RFC 3875 - section 4.1.16 + r.ProtoMajor, r.ProtoMinor = 1, 0 + } else if r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto); !ok { return nil, errors.New("cgi: invalid SERVER_PROTOCOL version") } diff --git a/src/net/http/cgi/child_test.go b/src/net/http/cgi/child_test.go index 18cf789bd5..f901bec1a8 100644 --- a/src/net/http/cgi/child_test.go +++ b/src/net/http/cgi/child_test.go @@ -154,6 +154,28 @@ func TestRequestWithoutRemotePort(t *testing.T) { } } +// CGI Specification RFC 3875 - section 4.1.16 +// INCLUDED value for SERVER_PROTOCOL must be treated as an HTTP/1.0 request +func TestIncludedServerProtocol(t *testing.T) { + env := map[string]string{ + "REQUEST_METHOD": "GET", + "SERVER_PROTOCOL": "INCLUDED", + } + req, err := RequestFromMap(env) + if req.Proto != "INCLUDED" { + t.Errorf("unexpected change to SERVER_PROTOCOL") + } + if major := req.ProtoMajor; major != 1 { + t.Errorf("ProtoMajor: got %d, want %d", major, 1) + } + if minor := req.ProtoMinor; minor != 0 { + t.Errorf("ProtoMinor: got %d, want %d", minor, 0) + } + if err != nil { + t.Fatalf("expected INCLUDED to be treated as HTTP/1.0 request") + } +} + func TestResponse(t *testing.T) { var tests = []struct { name string diff --git a/src/net/http/http.go b/src/net/http/http.go index e7959fa3b6..d346e60646 100644 --- a/src/net/http/http.go +++ b/src/net/http/http.go @@ -119,10 +119,6 @@ func removeEmptyPort(host string) string { return host } -func isNotToken(r rune) bool { - return !httpguts.IsTokenRune(r) -} - // isToken reports whether v is a valid token (https://www.rfc-editor.org/rfc/rfc2616#section-2.2). func isToken(v string) bool { // For historical reasons, this function is called ValidHeaderFieldName (see issue #67031). diff --git a/src/net/http/netconn_test.go b/src/net/http/netconn_test.go index 52b8069f8b..c5fd61289f 100644 --- a/src/net/http/netconn_test.go +++ b/src/net/http/netconn_test.go @@ -180,9 +180,10 @@ func (c *fakeNetConn) Close() error { c.loc.unlock() // Remote half of the connection reads EOF after reading any remaining data. c.rem.lock() - if c.rem.readErr != nil { + if c.rem.readErr == nil { c.rem.readErr = io.EOF } + c.rem.writeErr = net.ErrClosed c.rem.unlock() if c.autoWait { synctest.Wait() diff --git a/src/net/http/pattern.go b/src/net/http/pattern.go index 8fd120e777..a5063807c6 100644 --- a/src/net/http/pattern.go +++ b/src/net/http/pattern.go @@ -394,14 +394,6 @@ func inverseRelationship(r relationship) relationship { } } -// isLitOrSingle reports whether the segment is a non-dollar literal or a single wildcard. -func isLitOrSingle(seg segment) bool { - if seg.wild { - return !seg.multi - } - return seg.s != "/" -} - // describeConflict returns an explanation of why two patterns conflict. func describeConflict(p1, p2 *pattern) string { mrel := p1.compareMethods(p2) diff --git a/src/net/http/transport.go b/src/net/http/transport.go index a560765d33..4e6b07f34d 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -2110,7 +2110,6 @@ type persistConn struct { numExpectedResponses int closed error // set non-nil when conn is closed, before closech is closed canceledErr error // set non-nil if conn is canceled - broken bool // an error has happened on this connection; marked broken so it's not reused. reused bool // whether conn has had successful request/response and is being reused. // mutateHeaderFunc is an optional func to modify extra // headers on each outbound request before it's written. (the @@ -2925,7 +2924,6 @@ func (pc *persistConn) closeLocked(err error) { if err == nil { panic("nil error") } - pc.broken = true if pc.closed == nil { pc.closed = err pc.t.decConnsPerHost(pc.cacheKey) diff --git a/src/os/os_test.go b/src/os/os_test.go index 536734901b..29f2e6d3b2 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -1192,7 +1192,7 @@ func TestRenameCaseDifference(pt *testing.T) { } if dirNamesLen := len(dirNames); dirNamesLen != 1 { - t.Fatalf("unexpected dirNames len, got %q, want %q", dirNamesLen, 1) + t.Fatalf("unexpected dirNames len, got %d, want %d", dirNamesLen, 1) } if dirNames[0] != to { diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index 8509f00a5e..30ec3fad51 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -6807,7 +6807,7 @@ func TestMakeFuncStackCopy(t *testing.T) { ValueOf(&concrete).Elem().Set(fn) x := concrete(nil, 7) if x != 9 { - t.Errorf("have %#q want 9", x) + t.Errorf("have %d want 9", x) } } diff --git a/src/reflect/type.go b/src/reflect/type.go index 9b8726824e..914b5443f3 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -2528,8 +2528,7 @@ func StructOf(fields []StructField) Type { } switch { - case len(fs) == 1 && fs[0].Typ.IsDirectIface(): - // structs of 1 direct iface type can be direct + case typ.Size_ == goarch.PtrSize && typ.PtrBytes == goarch.PtrSize: typ.TFlag |= abi.TFlagDirectIface default: typ.TFlag &^= abi.TFlagDirectIface @@ -2698,8 +2697,7 @@ func ArrayOf(length int, elem Type) Type { } switch { - case length == 1 && typ.IsDirectIface(): - // array of 1 direct iface type can be direct + case array.Size_ == goarch.PtrSize && array.PtrBytes == goarch.PtrSize: array.TFlag |= abi.TFlagDirectIface default: array.TFlag &^= abi.TFlagDirectIface diff --git a/src/reflect/value.go b/src/reflect/value.go index b5d5aa8bf2..a82d976c47 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -1279,6 +1279,17 @@ func (v Value) Field(i int) Value { fl |= flagStickyRO } } + if fl&flagIndir == 0 && typ.Size() == 0 { + // Special case for picking a field out of a direct struct. + // A direct struct must have a pointer field and possibly a + // bunch of zero-sized fields. We must return the zero-sized + // fields indirectly, as only ptr-shaped things can be direct. + // See issue 74935. + // We use nil instead of v.ptr as it doesn't matter and + // we can avoid pinning a possibly now-unused object. + return Value{typ, nil, fl | flagIndir} + } + // Either flagIndir is set and v.ptr points at struct, // or flagIndir is not set and v.ptr is the actual struct data. // In the former case, we want v.ptr + offset. diff --git a/src/runtime/_mkmalloc/mkmalloc.go b/src/runtime/_mkmalloc/mkmalloc.go index 986b0aa9f8..1f040c8861 100644 --- a/src/runtime/_mkmalloc/mkmalloc.go +++ b/src/runtime/_mkmalloc/mkmalloc.go @@ -254,7 +254,8 @@ func inline(config generatorConfig) []byte { } // Write out the package and import declarations. - out.WriteString("// Code generated by mkmalloc.go; DO NOT EDIT.\n\n") + out.WriteString("// Code generated by mkmalloc.go; DO NOT EDIT.\n") + out.WriteString("// See overview in malloc_stubs.go.\n\n") out.WriteString("package " + f.Name.Name + "\n\n") for _, importDecl := range importDecls { out.Write(mustFormatNode(fset, importDecl)) diff --git a/src/runtime/arena_test.go b/src/runtime/arena_test.go index ca5223b59c..0bb1950464 100644 --- a/src/runtime/arena_test.go +++ b/src/runtime/arena_test.go @@ -36,6 +36,11 @@ type largeScalar [UserArenaChunkBytes + 1]byte type largePointer [UserArenaChunkBytes/unsafe.Sizeof(&smallPointer{}) + 1]*smallPointer func TestUserArena(t *testing.T) { + if Clobberfree() { + // This test crashes with SEGV in clobberfree in mgcsweep.go with GODEBUG=clobberfree=1. + t.Skip("triggers SEGV with GODEBUG=clobberfree=1") + } + // Set GOMAXPROCS to 2 so we don't run too many of these // tests in parallel. defer GOMAXPROCS(GOMAXPROCS(2)) @@ -228,6 +233,11 @@ func runSubTestUserArenaSlice[S comparable](t *testing.T, value []S, parallel bo } func TestUserArenaLiveness(t *testing.T) { + if Clobberfree() { + // This test crashes with SEGV in clobberfree in mgcsweep.go with GODEBUG=clobberfree=1. + t.Skip("triggers SEGV with GODEBUG=clobberfree=1") + } + t.Run("Free", func(t *testing.T) { testUserArenaLiveness(t, false) }) @@ -320,6 +330,11 @@ func testUserArenaLiveness(t *testing.T, useArenaFinalizer bool) { } func TestUserArenaClearsPointerBits(t *testing.T) { + if Clobberfree() { + // This test crashes with SEGV in clobberfree in mgcsweep.go with GODEBUG=clobberfree=1. + t.Skip("triggers SEGV with GODEBUG=clobberfree=1") + } + // This is a regression test for a serious issue wherein if pointer bits // aren't properly cleared, it's possible to allocate scalar data down // into a previously pointer-ful area, causing misinterpretation by the GC. diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index ea85146936..7c746803a8 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -181,6 +181,14 @@ TEXT runtime·rt0_go(SB),NOSPLIT|NOFRAME|TOPFRAME,$0 MOVQ AX, 24(SP) MOVQ BX, 32(SP) + // This is typically the entry point for Go programs. + // Call stack unwinding must not proceed past this frame. + // Set the frame pointer register to 0 so that frame pointer-based unwinders + // (which don't use debug info for performance reasons) + // won't attempt to unwind past this function. + // See go.dev/issue/63630 + MOVQ $0, BP + // create istack out of the given (operating system) stack. // _cgo_init may update stackguard. MOVQ $runtime·g0(SB), DI @@ -408,6 +416,13 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0 RET TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME|NOFRAME,$0 + // This is the root frame of new Go-created OS threads. + // Call stack unwinding must not proceed past this frame. + // Set the frame pointer register to 0 so that frame pointer-based unwinders + // (which don't use debug info for performance reasons) + // won't attempt to unwind past this function. + // See go.dev/issue/63630 + MOVD $0, BP CALL runtime·mstart0(SB) RET // not reached diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index 902a7066aa..01f2690f4e 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -109,6 +109,14 @@ TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0 MOVW R0, 8(RSP) // argc MOVD R1, 16(RSP) // argv + // This is typically the entry point for Go programs. + // Call stack unwinding must not proceed past this frame. + // Set the frame pointer register to 0 so that frame pointer-based unwinders + // (which don't use debug info for performance reasons) + // won't attempt to unwind past this function. + // See go.dev/issue/63630 + MOVD $0, R29 + #ifdef TLS_darwin // Initialize TLS. MOVD ZR, g // clear g, make sure it's not junk. @@ -248,6 +256,13 @@ TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 RET TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0 + // This is the root frame of new Go-created OS threads. + // Call stack unwinding must not proceed past this frame. + // Set the frame pointer register to 0 so that frame pointer-based unwinders + // (which don't use debug info for performance reasons) + // won't attempt to unwind past this function. + // See go.dev/issue/63630 + MOVD $0, R29 BL runtime·mstart0(SB) RET // not reached diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s index 5bd16181ee..428701a503 100644 --- a/src/runtime/asm_riscv64.s +++ b/src/runtime/asm_riscv64.s @@ -623,14 +623,14 @@ TEXT _cgo_topofstack(SB),NOSPLIT,$8 RET // func goexit(neverCallThisFunction) -// The top-most function running on a goroutine -// returns to goexit+PCQuantum. +// The top-most function running on a goroutine, returns to goexit+PCQuantum*2. +// Note that the NOPs are written in a manner that will not be compressed, +// since the offset must be known by the runtime. TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 - MOV ZERO, ZERO // NOP + WORD $0x00000013 // NOP JMP runtime·goexit1(SB) // does not return // traceback from goexit1 must hit code range of goexit - MOV ZERO, ZERO // NOP - + WORD $0x00000013 // NOP // This is called from .init_array and follows the platform, not the Go ABI. TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0 diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index 2b8ca549ad..00e67aeca0 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -413,6 +413,15 @@ func TestRepanickedPanicSandwich(t *testing.T) { } } +func TestDoublePanicWithSameValue(t *testing.T) { + output := runTestProg(t, "testprog", "DoublePanicWithSameValue") + want := `panic: message +` + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + func TestGoexitCrash(t *testing.T) { // External linking brings in cgo, causing deadlock detection not working. testenv.MustInternalLink(t, deadlockBuildTypes) diff --git a/src/runtime/debuglog.go b/src/runtime/debuglog.go index e993e396c1..405f2455c6 100644 --- a/src/runtime/debuglog.go +++ b/src/runtime/debuglog.go @@ -196,7 +196,8 @@ const ( debugLogPtr debugLogString debugLogConstString - debugLogStringOverflow + debugLogHexdump + debugLogOverflow debugLogPC debugLogTraceback @@ -365,7 +366,7 @@ func (l *dloggerImpl) s(x string) *dloggerImpl { l.w.uvarint(uint64(len(b))) l.w.bytes(b) if len(b) != len(x) { - l.w.byte(debugLogStringOverflow) + l.w.byte(debugLogOverflow) l.w.uvarint(uint64(len(x) - len(b))) } } @@ -373,6 +374,32 @@ func (l *dloggerImpl) s(x string) *dloggerImpl { } //go:nosplit +func (l dloggerFake) hexdump(p unsafe.Pointer, bytes uintptr) dloggerFake { return l } + +//go:nosplit +func (l *dloggerImpl) hexdump(p unsafe.Pointer, bytes uintptr) *dloggerImpl { + var b []byte + bb := (*slice)(unsafe.Pointer(&b)) + bb.array = unsafe.Pointer(p) + bb.len, bb.cap = int(bytes), int(bytes) + if len(b) > debugLogStringLimit { + b = b[:debugLogStringLimit] + } + + l.w.byte(debugLogHexdump) + l.w.uvarint(uint64(uintptr(p))) + l.w.uvarint(uint64(len(b))) + l.w.bytes(b) + + if uintptr(len(b)) != bytes { + l.w.byte(debugLogOverflow) + l.w.uvarint(uint64(bytes) - uint64(len(b))) + } + + return l +} + +//go:nosplit func (l dloggerFake) pc(x uintptr) dloggerFake { return l } //go:nosplit @@ -708,9 +735,30 @@ func (r *debugLogReader) printVal() bool { s := *(*string)(unsafe.Pointer(&str)) print(s) - case debugLogStringOverflow: + case debugLogOverflow: print("..(", r.uvarint(), " more bytes)..") + case debugLogHexdump: + p := uintptr(r.uvarint()) + bl := r.uvarint() + if r.begin+bl > r.end { + r.begin = r.end + print("<hexdump length corrupted>") + break + } + println() // Start on a new line + hd := hexdumper{addr: p} + for bl > 0 { + b := r.data.b[r.begin%uint64(len(r.data.b)):] + if uint64(len(b)) > bl { + b = b[:bl] + } + r.begin += uint64(len(b)) + bl -= uint64(len(b)) + hd.write(b) + } + hd.close() + case debugLogPC: printDebugLogPC(uintptr(r.uvarint()), false) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 3a781b7551..6e0360aaca 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -238,6 +238,12 @@ func SetEnvs(e []string) { envs = e } const PtrSize = goarch.PtrSize +const ClobberdeadPtr = clobberdeadPtr + +func Clobberfree() bool { + return debug.clobberfree != 0 +} + var ForceGCPeriod = &forcegcperiod // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises @@ -633,6 +639,34 @@ func RunGetgThreadSwitchTest() { } } +// Expose freegc for testing. +func Freegc(p unsafe.Pointer, size uintptr, noscan bool) { + freegc(p, size, noscan) +} + +// Expose gcAssistBytes for the current g for testing. +func AssistCredit() int64 { + assistG := getg() + if assistG.m.curg != nil { + assistG = assistG.m.curg + } + return assistG.gcAssistBytes +} + +// Expose gcBlackenEnabled for testing. +func GcBlackenEnable() bool { + // Note we do a non-atomic load here. + // Some checks against gcBlackenEnabled (e.g., in mallocgc) + // are currently done via non-atomic load for performance reasons, + // but other checks are done via atomic load (e.g., in mgcmark.go), + // so interpreting this value in a test may be subtle. + return gcBlackenEnabled != 0 +} + +const SizeSpecializedMallocEnabled = sizeSpecializedMallocEnabled + +const RuntimeFreegcEnabled = runtimeFreegcEnabled + const ( PageSize = pageSize PallocChunkPages = pallocChunkPages @@ -1472,6 +1506,15 @@ func Releasem() { releasem(getg().m) } +// GoschedIfBusy is an explicit preemption check to call back +// into the scheduler. This is useful for tests that run code +// which spend most of their time as non-preemptible, as it +// can be placed right after becoming preemptible again to ensure +// that the scheduler gets a chance to preempt the goroutine. +func GoschedIfBusy() { + goschedIfBusy() +} + type PIController struct { piController } @@ -1988,3 +2031,36 @@ func (head *ListHeadManual) Pop() unsafe.Pointer { func (head *ListHeadManual) Remove(p unsafe.Pointer) { head.l.remove(p) } + +func Hexdumper(base uintptr, wordBytes int, mark func(addr uintptr, start func()), data ...[]byte) string { + buf := make([]byte, 0, 2048) + getg().writebuf = buf + h := hexdumper{addr: base, addrBytes: 4, wordBytes: uint8(wordBytes)} + if mark != nil { + h.mark = func(addr uintptr, m hexdumpMarker) { + mark(addr, m.start) + } + } + for _, d := range data { + h.write(d) + } + h.close() + n := len(getg().writebuf) + getg().writebuf = nil + if n == cap(buf) { + panic("Hexdumper buf too small") + } + return string(buf[:n]) +} + +func HexdumpWords(p, bytes uintptr) string { + buf := make([]byte, 0, 2048) + getg().writebuf = buf + hexdumpWords(p, bytes, nil) + n := len(getg().writebuf) + getg().writebuf = nil + if n == cap(buf) { + panic("HexdumpWords buf too small") + } + return string(buf[:n]) +} diff --git a/src/runtime/hexdump.go b/src/runtime/hexdump.go new file mode 100644 index 0000000000..0d7dbb540b --- /dev/null +++ b/src/runtime/hexdump.go @@ -0,0 +1,269 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/goarch" + "unsafe" +) + +// hexdumpWords prints a word-oriented hex dump of [p, p+len). +// +// If mark != nil, it will be passed to hexdumper.mark. +func hexdumpWords(p, len uintptr, mark func(uintptr, hexdumpMarker)) { + printlock() + + // Provide a default annotation + symMark := func(u uintptr, hm hexdumpMarker) { + if mark != nil { + mark(u, hm) + } + + // Can we symbolize this value? + val := *(*uintptr)(unsafe.Pointer(u)) + fn := findfunc(val) + if fn.valid() { + hm.start() + print("<", funcname(fn), "+", hex(val-fn.entry()), ">\n") + } + } + + h := hexdumper{addr: p, mark: symMark} + h.write(unsafe.Slice((*byte)(unsafe.Pointer(p)), len)) + h.close() + printunlock() +} + +// hexdumper is a Swiss-army knife hex dumper. +// +// To use, optionally set addr and wordBytes, then call write repeatedly, +// followed by close. +type hexdumper struct { + // addr is the address to print for the first byte of data. + addr uintptr + + // addrBytes is the number of bytes of addr to print. If this is 0, it + // defaults to goarch.PtrSize. + addrBytes uint8 + + // wordBytes is the number of bytes in a word. If wordBytes is 1, this + // prints a byte-oriented dump. If it's > 1, this interprets the data as a + // sequence of words of the given size. If it's 0, it's treated as + // goarch.PtrSize. + wordBytes uint8 + + // mark is an optional function that can annotate values in the hex dump. + // + // If non-nil, it is called with the address of every complete, aligned word + // in the hex dump. + // + // If it decides to print an annotation, it must first call m.start(), then + // print the annotation, followed by a new line. + mark func(addr uintptr, m hexdumpMarker) + + // Below here is state + + ready int8 // 0=need to init state; 1=need to print header; 2=ready + + // dataBuf accumulates a line at a time of data, in case it's split across + // buffers. + dataBuf [16]byte + dataPos uint8 + dataSkip uint8 // Skip first n bytes of buf on first line + + // toPos maps from byte offset in data to a visual offset in the printed line. + toPos [16]byte +} + +type hexdumpMarker struct { + chars int +} + +func (h *hexdumper) write(data []byte) { + if h.ready == 0 { + h.init() + } + + // Handle leading data + if h.dataPos > 0 { + n := copy(h.dataBuf[h.dataPos:], data) + h.dataPos += uint8(n) + data = data[n:] + if h.dataPos < uint8(len(h.dataBuf)) { + return + } + h.flushLine(h.dataBuf[:]) + h.dataPos = 0 + } + + // Handle full lines in data + for len(data) >= len(h.dataBuf) { + h.flushLine(data[:len(h.dataBuf)]) + data = data[len(h.dataBuf):] + } + + // Handle trailing data + h.dataPos = uint8(copy(h.dataBuf[:], data)) +} + +func (h *hexdumper) close() { + if h.dataPos > 0 { + h.flushLine(h.dataBuf[:h.dataPos]) + } +} + +func (h *hexdumper) init() { + const bytesPerLine = len(h.dataBuf) + + if h.addrBytes == 0 { + h.addrBytes = goarch.PtrSize + } else if h.addrBytes < 0 || h.addrBytes > goarch.PtrSize { + throw("invalid addrBytes") + } + + if h.wordBytes == 0 { + h.wordBytes = goarch.PtrSize + } + wb := int(h.wordBytes) + if wb < 0 || wb >= bytesPerLine || wb&(wb-1) != 0 { + throw("invalid wordBytes") + } + + // Construct position mapping. + for i := range h.toPos { + // First, calculate the "field" within the line, applying byte swizzling. + field := 0 + if goarch.BigEndian { + field = i + } else { + field = i ^ int(wb-1) + } + // Translate this field into a visual offset. + // "00112233 44556677 8899AABB CCDDEEFF" + h.toPos[i] = byte(field*2 + field/4 + field/8) + } + + // The first line may need to skip some fields to get to alignment. + // Round down the starting address. + nAddr := h.addr &^ uintptr(bytesPerLine-1) + // Skip bytes to get to alignment. + h.dataPos = uint8(h.addr - nAddr) + h.dataSkip = uint8(h.addr - nAddr) + h.addr = nAddr + + // We're ready to print the header. + h.ready = 1 +} + +func (h *hexdumper) flushLine(data []byte) { + const bytesPerLine = len(h.dataBuf) + + const maxAddrChars = 2 * goarch.PtrSize + const addrSep = ": " + dataStart := int(2*h.addrBytes) + len(addrSep) + // dataChars uses the same formula to toPos above. We calculate it with the + // "last field", then add the size of the last field. + const dataChars = (bytesPerLine-1)*2 + (bytesPerLine-1)/4 + (bytesPerLine-1)/8 + 2 + const asciiSep = " " + asciiStart := dataStart + dataChars + len(asciiSep) + const asciiChars = bytesPerLine + nlPos := asciiStart + asciiChars + + var lineBuf [maxAddrChars + len(addrSep) + dataChars + len(asciiSep) + asciiChars + 1]byte + clear := func() { + for i := range lineBuf { + lineBuf[i] = ' ' + } + } + clear() + + if h.ready == 1 { + // Print column offsets header. + for offset, pos := range h.toPos { + h.fmtHex(lineBuf[dataStart+int(pos+1):][:1], uint64(offset)) + } + // Print ASCII offsets. + for offset := range asciiChars { + h.fmtHex(lineBuf[asciiStart+offset:][:1], uint64(offset)) + } + lineBuf[nlPos] = '\n' + gwrite(lineBuf[:nlPos+1]) + clear() + h.ready = 2 + } + + // Format address. + h.fmtHex(lineBuf[:2*h.addrBytes], uint64(h.addr)) + copy(lineBuf[2*h.addrBytes:], addrSep) + // Format data in hex and ASCII. + for offset, b := range data { + if offset < int(h.dataSkip) { + continue + } + + pos := h.toPos[offset] + h.fmtHex(lineBuf[dataStart+int(pos):][:2], uint64(b)) + + copy(lineBuf[dataStart+dataChars:], asciiSep) + ascii := uint8('.') + if b >= ' ' && b <= '~' { + ascii = b + } + lineBuf[asciiStart+offset] = ascii + } + // Trim buffer. + end := asciiStart + len(data) + lineBuf[end] = '\n' + buf := lineBuf[:end+1] + + // Print. + gwrite(buf) + + // Print marks. + if h.mark != nil { + clear() + for offset := 0; offset+int(h.wordBytes) <= len(data); offset += int(h.wordBytes) { + if offset < int(h.dataSkip) { + continue + } + addr := h.addr + uintptr(offset) + // Find the position of the left edge of this word + caret := dataStart + int(min(h.toPos[offset], h.toPos[offset+int(h.wordBytes)-1])) + h.mark(addr, hexdumpMarker{caret}) + } + } + + h.addr += uintptr(bytesPerLine) + h.dataPos = 0 + h.dataSkip = 0 +} + +// fmtHex formats v in base 16 into buf. It fills all of buf. If buf is too +// small to represent v, it the output will start with '*'. +func (h *hexdumper) fmtHex(buf []byte, v uint64) { + const dig = "0123456789abcdef" + i := len(buf) - 1 + for ; i >= 0; i-- { + buf[i] = dig[v%16] + v /= 16 + } + if v != 0 { + // Indicate that we couldn't fit the whole number. + buf[0] = '*' + } +} + +func (m hexdumpMarker) start() { + var spaces [64]byte + for i := range spaces { + spaces[i] = ' ' + } + for m.chars > len(spaces) { + gwrite(spaces[:]) + m.chars -= len(spaces) + } + gwrite(spaces[:m.chars]) + print("^ ") +} diff --git a/src/runtime/hexdump_test.go b/src/runtime/hexdump_test.go new file mode 100644 index 0000000000..cc44e48e4b --- /dev/null +++ b/src/runtime/hexdump_test.go @@ -0,0 +1,151 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "fmt" + "internal/abi" + "internal/goarch" + "runtime" + "slices" + "strings" + "testing" + "unsafe" +) + +func TestHexdumper(t *testing.T) { + check := func(label, got, want string) { + got = strings.TrimRight(got, "\n") + want = strings.TrimPrefix(want, "\n") + want = strings.TrimRight(want, "\n") + if got != want { + t.Errorf("%s: got\n%s\nwant\n%s", label, got, want) + } + } + + data := make([]byte, 32) + for i := range data { + data[i] = 0x10 + byte(i) + } + + check("basic", runtime.Hexdumper(0, 1, nil, data), ` + 0 1 2 3 4 5 6 7 8 9 a b c d e f 0123456789abcdef +00000000: 10111213 14151617 18191a1b 1c1d1e1f ................ +00000010: 20212223 24252627 28292a2b 2c2d2e2f !"#$%&'()*+,-./`) + + if !goarch.BigEndian { + // Different word sizes + check("word=4", runtime.Hexdumper(0, 4, nil, data), ` + 3 2 1 0 7 6 5 4 b a 9 8 f e d c 0123456789abcdef +00000000: 13121110 17161514 1b1a1918 1f1e1d1c ................ +00000010: 23222120 27262524 2b2a2928 2f2e2d2c !"#$%&'()*+,-./`) + check("word=8", runtime.Hexdumper(0, 8, nil, data), ` + 7 6 5 4 3 2 1 0 f e d c b a 9 8 0123456789abcdef +00000000: 17161514 13121110 1f1e1d1c 1b1a1918 ................ +00000010: 27262524 23222120 2f2e2d2c 2b2a2928 !"#$%&'()*+,-./`) + } + + // Starting offset + check("offset=1", runtime.Hexdumper(1, 1, nil, data), ` + 0 1 2 3 4 5 6 7 8 9 a b c d e f 0123456789abcdef +00000000: 101112 13141516 1718191a 1b1c1d1e ............... +00000010: 1f202122 23242526 2728292a 2b2c2d2e . !"#$%&'()*+,-. +00000020: 2f /`) + if !goarch.BigEndian { + // ... combined with a word size + check("offset=1 and word=4", runtime.Hexdumper(1, 4, nil, data), ` + 3 2 1 0 7 6 5 4 b a 9 8 f e d c 0123456789abcdef +00000000: 121110 16151413 1a191817 1e1d1c1b ............... +00000010: 2221201f 26252423 2a292827 2e2d2c2b . !"#$%&'()*+,-. +00000020: 2f /`) + } + + // Partial data full of annoying boundaries. + partials := make([][]byte, 0) + for i := 0; i < len(data); i += 2 { + partials = append(partials, data[i:i+2]) + } + check("partials", runtime.Hexdumper(1, 1, nil, partials...), ` + 0 1 2 3 4 5 6 7 8 9 a b c d e f 0123456789abcdef +00000000: 101112 13141516 1718191a 1b1c1d1e ............... +00000010: 1f202122 23242526 2728292a 2b2c2d2e . !"#$%&'()*+,-. +00000020: 2f /`) + + // Marks. + check("marks", runtime.Hexdumper(0, 1, func(addr uintptr, start func()) { + if addr%7 == 0 { + start() + println("mark") + } + }, data), ` + 0 1 2 3 4 5 6 7 8 9 a b c d e f 0123456789abcdef +00000000: 10111213 14151617 18191a1b 1c1d1e1f ................ + ^ mark + ^ mark + ^ mark +00000010: 20212223 24252627 28292a2b 2c2d2e2f !"#$%&'()*+,-./ + ^ mark + ^ mark`) + if !goarch.BigEndian { + check("marks and word=4", runtime.Hexdumper(0, 4, func(addr uintptr, start func()) { + if addr%7 == 0 { + start() + println("mark") + } + }, data), ` + 3 2 1 0 7 6 5 4 b a 9 8 f e d c 0123456789abcdef +00000000: 13121110 17161514 1b1a1918 1f1e1d1c ................ + ^ mark +00000010: 23222120 27262524 2b2a2928 2f2e2d2c !"#$%&'()*+,-./ + ^ mark`) + } +} + +func TestHexdumpWords(t *testing.T) { + if goarch.BigEndian || goarch.PtrSize != 8 { + // We could support these, but it's kind of a pain. + t.Skip("requires 64-bit little endian") + } + + // Most of this is in hexdumper. Here we just test the symbolizer. + + pc := abi.FuncPCABIInternal(TestHexdumpWords) + pcs := slices.Repeat([]uintptr{pc}, 3) + + // Make sure pcs doesn't move around on us. + var p runtime.Pinner + defer p.Unpin() + p.Pin(&pcs[0]) + // Get a 16 byte, 16-byte-aligned chunk of pcs so the hexdump is simple. + start := uintptr(unsafe.Pointer(&pcs[0])) + start = (start + 15) &^ uintptr(15) + + // Do the hex dump. + got := runtime.HexdumpWords(start, 16) + + // Construct the expected output. + pcStr := fmt.Sprintf("%016x", pc) + pcStr = pcStr[:8] + " " + pcStr[8:] // Add middle space + ascii := make([]byte, 8) + for i := range ascii { + b := byte(pc >> (8 * i)) + if b >= ' ' && b <= '~' { + ascii[i] = b + } else { + ascii[i] = '.' + } + } + want := fmt.Sprintf(` + 7 6 5 4 3 2 1 0 f e d c b a 9 8 0123456789abcdef +%016x: %s %s %s%s + ^ <runtime_test.TestHexdumpWords+0x0> + ^ <runtime_test.TestHexdumpWords+0x0> +`, start, pcStr, pcStr, ascii, ascii) + want = strings.TrimPrefix(want, "\n") + + if got != want { + t.Errorf("got\n%s\nwant\n%s", got, want) + } +} diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index fc4f21b532..d49dacaf68 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -1080,7 +1080,8 @@ func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger // // We might consider turning these on by default; many of them previously were. // They account for a few % of mallocgc's cost though, which does matter somewhat -// at scale. +// at scale. (When testing changes to malloc, consider enabling this, and also +// some function-local 'doubleCheck' consts such as in mbitmap.go currently.) const doubleCheckMalloc = false // sizeSpecializedMallocEnabled is the set of conditions where we enable the size-specialized @@ -1089,6 +1090,14 @@ const doubleCheckMalloc = false // properly on plan9, so size-specialized malloc is also disabled on plan9. const sizeSpecializedMallocEnabled = goexperiment.SizeSpecializedMalloc && GOOS != "plan9" && !asanenabled && !raceenabled && !msanenabled && !valgrindenabled +// runtimeFreegcEnabled is the set of conditions where we enable the runtime.freegc +// implementation and the corresponding allocation-related changes: the experiment must be +// enabled, and none of the memory sanitizers should be enabled. We allow the race detector, +// in contrast to sizeSpecializedMallocEnabled. +// TODO(thepudds): it would be nice to check Valgrind integration, though there are some hints +// there might not be any canned tests in tree for Go's integration with Valgrind. +const runtimeFreegcEnabled = goexperiment.RuntimeFreegc && !asanenabled && !msanenabled && !valgrindenabled + // Allocate an object of size bytes. // Small objects are allocated from the per-P cache's free lists. // Large objects (> 32 kB) are allocated straight from the heap. @@ -1150,7 +1159,8 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { size += asanRZ } - // Assist the GC if needed. + // Assist the GC if needed. (On the reuse path, we currently compensate for this; + // changes here might require changes there.) if gcBlackenEnabled != 0 { deductAssistCredit(size) } @@ -1413,6 +1423,16 @@ func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointe size = uintptr(gc.SizeClassToSize[sizeclass]) spc := makeSpanClass(sizeclass, true) span := c.alloc[spc] + + // First, check for a reusable object. + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + // We have a reusable object, use it. + x := mallocgcSmallNoscanReuse(c, span, spc, size, needzero) + mp.mallocing = 0 + releasem(mp) + return x, size + } + v := nextFreeFast(span) if v == 0 { v, span, checkGCTrigger = c.nextFree(spc) @@ -1472,6 +1492,55 @@ func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointe return x, size } +// mallocgcSmallNoscanReuse returns a previously freed noscan object after preparing it for reuse. +// It must only be called if hasReusableNoscan returned true. +func mallocgcSmallNoscanReuse(c *mcache, span *mspan, spc spanClass, size uintptr, needzero bool) unsafe.Pointer { + // TODO(thepudds): could nextFreeFast, nextFree and nextReusable return unsafe.Pointer? + // Maybe doesn't matter. gclinkptr might be for historical reasons. + v, span := c.nextReusableNoScan(span, spc) + x := unsafe.Pointer(v) + + // Compensate for the GC assist credit deducted in mallocgc (before calling us and + // after we return) because this is not a newly allocated object. We use the full slot + // size (elemsize) here because that's what mallocgc deducts overall. Note we only + // adjust this when gcBlackenEnabled is true, which follows mallocgc behavior. + // TODO(thepudds): a follow-up CL adds a more specific test of our assist credit + // handling, including for validating internal fragmentation handling. + if gcBlackenEnabled != 0 { + addAssistCredit(size) + } + + // This is a previously used object, so only check needzero (and not span.needzero) + // for clearing. + if needzero { + memclrNoHeapPointers(x, size) + } + + // See publicationBarrier comment in mallocgcSmallNoscan. + publicationBarrier() + + // Finish and return. Note that we do not update span.freeIndexForScan, profiling info, + // nor do we check gcTrigger. + // TODO(thepudds): the current approach is viable for a GOEXPERIMENT, but + // means we do not profile reused heap objects. Ultimately, we will need a better + // approach for profiling, or at least ensure we are not introducing bias in the + // profiled allocations. + // TODO(thepudds): related, we probably want to adjust how allocs and frees are counted + // in the existing stats. Currently, reused objects are not counted as allocs nor + // frees, but instead roughly appear as if the original heap object lived on. We + // probably will also want some additional runtime/metrics, and generally think about + // user-facing observability & diagnostics, though all this likely can wait for an + // official proposal. + if writeBarrier.enabled { + // Allocate black during GC. + // All slots hold nil so no scanning is needed. + // This may be racing with GC so do it atomically if there can be + // a race marking the bit. + gcmarknewobject(span, uintptr(x)) + } + return x +} + func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { // Set mp.mallocing to keep from being preempted by GC. mp := acquirem() @@ -1816,8 +1885,6 @@ func postMallocgcDebug(x unsafe.Pointer, elemsize uintptr, typ *_type) { // by size bytes, and assists the GC if necessary. // // Caller must be preemptible. -// -// Returns the G for which the assist credit was accounted. func deductAssistCredit(size uintptr) { // Charge the current user G for this allocation. assistG := getg() @@ -1836,6 +1903,267 @@ func deductAssistCredit(size uintptr) { } } +// addAssistCredit is like deductAssistCredit, +// but adds credit rather than removes, +// and never calls gcAssistAlloc. +func addAssistCredit(size uintptr) { + // Credit the current user G. + assistG := getg() + if assistG.m.curg != nil { // TODO(thepudds): do we need to do this? + assistG = assistG.m.curg + } + // Credit the size against the G. + assistG.gcAssistBytes += int64(size) +} + +const ( + // doubleCheckReusable enables some additional invariant checks for the + // runtime.freegc and reusable objects. Note that some of these checks alter timing, + // and it is good to test changes with and without this enabled. + doubleCheckReusable = false + + // debugReusableLog enables some printlns for runtime.freegc and reusable objects. + debugReusableLog = false +) + +// freegc records that a heap object is reusable and available for +// immediate reuse in a subsequent mallocgc allocation, without +// needing to wait for the GC cycle to progress. +// +// The information is recorded in a free list stored in the +// current P's mcache. The caller must pass in the user size +// and whether the object has pointers, which allows a faster free +// operation. +// +// freegc must be called by the effective owner of ptr who knows +// the pointer is logically dead, with no possible aliases that might +// be used past that moment. In other words, ptr must be the +// last and only pointer to its referent. +// +// The intended caller is the compiler. +// +// Note: please do not send changes that attempt to add freegc calls +// to the standard library. +// +// ptr must point to a heap object or into the current g's stack, +// in which case freegc is a no-op. In particular, ptr must not point +// to memory in the data or bss sections, which is partially enforced. +// For objects with a malloc header, ptr should point mallocHeaderSize bytes +// past the base; otherwise, ptr should point to the base of the heap object. +// In other words, ptr should be the same pointer that was returned by mallocgc. +// +// In addition, the caller must know that ptr's object has no specials, such +// as might have been created by a call to SetFinalizer or AddCleanup. +// (Internally, the runtime deals appropriately with internally-created +// specials, such as specials for memory profiling). +// +// If the size of ptr's object is less than 16 bytes or greater than +// 32KiB - gc.MallocHeaderSize bytes, freegc is currently a no-op. It must only +// be called in alloc-safe places. It currently throws if noscan is false +// (support for which is implemented in a later CL in our stack). +// +// Note that freegc accepts an unsafe.Pointer and hence keeps the pointer +// alive. It therefore could be a pessimization in some cases (such +// as a long-lived function) if the caller does not call freegc before +// or roughly when the liveness analysis of the compiler +// would otherwise have determined ptr's object is reclaimable by the GC. +func freegc(ptr unsafe.Pointer, size uintptr, noscan bool) bool { + if !runtimeFreegcEnabled || !reusableSize(size) { + return false + } + if sizeSpecializedMallocEnabled && !noscan { + // TODO(thepudds): temporarily disable freegc with SizeSpecializedMalloc for pointer types + // until we finish integrating. + return false + } + + if ptr == nil { + throw("freegc nil") + } + + // Set mp.mallocing to keep from being preempted by GC. + // Otherwise, the GC could flush our mcache or otherwise cause problems. + mp := acquirem() + if mp.mallocing != 0 { + throw("freegc deadlock") + } + if mp.gsignal == getg() { + throw("freegc during signal") + } + mp.mallocing = 1 + + if mp.curg.stack.lo <= uintptr(ptr) && uintptr(ptr) < mp.curg.stack.hi { + // This points into our stack, so free is a no-op. + mp.mallocing = 0 + releasem(mp) + return false + } + + if doubleCheckReusable { + // TODO(thepudds): we could enforce no free on globals in bss or data. Maybe by + // checking span via spanOf or spanOfHeap, or maybe walk from firstmoduledata + // like isGoPointerWithoutSpan, or activeModules, or something. If so, we might + // be able to delay checking until reuse (e.g., check span just before reusing, + // though currently we don't always need to lookup a span on reuse). If we think + // no usage patterns could result in globals, maybe enforcement for globals could + // be behind -d=checkptr=1 or similar. The compiler can have knowledge of where + // a variable is allocated, but stdlib does not, although there are certain + // usage patterns that cannot result in a global. + // TODO(thepudds): separately, consider a local debugReusableMcacheOnly here + // to ignore freed objects if not in mspan in mcache, maybe when freeing and reading, + // by checking something like s.base() <= uintptr(v) && uintptr(v) < s.limit. Or + // maybe a GODEBUG or compiler debug flag. + span := spanOf(uintptr(ptr)) + if span == nil { + throw("nextReusable: nil span for pointer in free list") + } + if state := span.state.get(); state != mSpanInUse { + throw("nextReusable: span is not in use") + } + } + + if debug.clobberfree != 0 { + clobberfree(ptr, size) + } + + // We first check if p is still in our per-P cache. + // Get our per-P cache for small objects. + c := getMCache(mp) + if c == nil { + throw("freegc called without a P or outside bootstrapping") + } + + v := uintptr(ptr) + if !noscan && !heapBitsInSpan(size) { + // mallocgcSmallScanHeader expects to get the base address of the object back + // from the findReusable funcs (as well as from nextFreeFast and nextFree), and + // not mallocHeaderSize bytes into a object, so adjust that here. + v -= mallocHeaderSize + + // The size class lookup wants size to be adjusted by mallocHeaderSize. + size += mallocHeaderSize + } + + // TODO(thepudds): should verify (behind doubleCheckReusable constant) that our calculated + // sizeclass here matches what's in span found via spanOf(ptr) or findObject(ptr). + var sizeclass uint8 + if size <= gc.SmallSizeMax-8 { + sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] + } else { + sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] + } + + spc := makeSpanClass(sizeclass, noscan) + s := c.alloc[spc] + + if debugReusableLog { + if s.base() <= uintptr(v) && uintptr(v) < s.limit { + println("freegc [in mcache]:", hex(uintptr(v)), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled) + } else { + println("freegc [NOT in mcache]:", hex(uintptr(v)), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled) + } + } + + if noscan { + c.addReusableNoscan(spc, uintptr(v)) + } else { + // TODO(thepudds): implemented in later CL in our stack. + throw("freegc called for object with pointers, not yet implemented") + } + + // For stats, for now we leave allocCount alone, roughly pretending to the rest + // of the system that this potential reuse never happened. + + mp.mallocing = 0 + releasem(mp) + + return true +} + +// nextReusableNoScan returns the next reusable object for a noscan span, +// or 0 if no reusable object is found. +func (c *mcache) nextReusableNoScan(s *mspan, spc spanClass) (gclinkptr, *mspan) { + if !runtimeFreegcEnabled { + return 0, s + } + + // Pop a reusable pointer from the free list for this span class. + v := c.reusableNoscan[spc] + if v == 0 { + return 0, s + } + c.reusableNoscan[spc] = v.ptr().next + + if debugReusableLog { + println("reusing from ptr free list:", hex(v), "sweepgen:", mheap_.sweepgen, "writeBarrier.enabled:", writeBarrier.enabled) + } + if doubleCheckReusable { + doubleCheckNextReusable(v) // debug only sanity check + } + + // For noscan spans, we only need the span if the write barrier is enabled (so that our caller + // can call gcmarknewobject to allocate black). If the write barrier is enabled, we can skip + // looking up the span when the pointer is in a span in the mcache. + if !writeBarrier.enabled { + return v, nil + } + if s.base() <= uintptr(v) && uintptr(v) < s.limit { + // Return the original span. + return v, s + } + + // We must find and return the span. + span := spanOf(uintptr(v)) + if span == nil { + // TODO(thepudds): construct a test that triggers this throw. + throw("nextReusableNoScan: nil span for pointer in reusable object free list") + } + + return v, span +} + +// doubleCheckNextReusable checks some invariants. +// TODO(thepudds): will probably delete some of this. Can mostly be ignored for review. +func doubleCheckNextReusable(v gclinkptr) { + // TODO(thepudds): should probably take the spanClass as well to confirm expected + // sizeclass match. + _, span, objIndex := findObject(uintptr(v), 0, 0) + if span == nil { + throw("nextReusable: nil span for pointer in free list") + } + if state := span.state.get(); state != mSpanInUse { + throw("nextReusable: span is not in use") + } + if uintptr(v) < span.base() || uintptr(v) >= span.limit { + throw("nextReusable: span is not in range") + } + if span.objBase(uintptr(v)) != uintptr(v) { + print("nextReusable: v=", hex(v), " base=", hex(span.objBase(uintptr(v))), "\n") + throw("nextReusable: v is non-base-address for object found on pointer free list") + } + if span.isFree(objIndex) { + throw("nextReusable: pointer on free list is free") + } + + const debugReusableEnsureSwept = false + if debugReusableEnsureSwept { + // Currently disabled. + // Note: ensureSwept here alters behavior (not just an invariant check). + span.ensureSwept() + if span.isFree(objIndex) { + throw("nextReusable: pointer on free list is free after ensureSwept") + } + } +} + +// reusableSize reports if size is a currently supported size for a reusable object. +func reusableSize(size uintptr) bool { + if size < maxTinySize || size > maxSmallSize-mallocHeaderSize { + return false + } + return true +} + // memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers // on chunks of the buffer to be zeroed, with opportunities for preemption // along the way. memclrNoHeapPointers contains no safepoints and also diff --git a/src/runtime/malloc_generated.go b/src/runtime/malloc_generated.go index 2215dbaddb..5abb61257a 100644 --- a/src/runtime/malloc_generated.go +++ b/src/runtime/malloc_generated.go @@ -1,4 +1,5 @@ // Code generated by mkmalloc.go; DO NOT EDIT. +// See overview in malloc_stubs.go. package runtime @@ -6400,6 +6401,32 @@ func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Poin const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -6497,6 +6524,32 @@ func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Poin const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -6594,6 +6647,32 @@ func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Poin const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -6691,6 +6770,32 @@ func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Poin const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -6788,6 +6893,32 @@ func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Poin const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -6885,6 +7016,32 @@ func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Poin const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -6982,6 +7139,32 @@ func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Poin const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -7079,6 +7262,32 @@ func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Poin const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -7176,6 +7385,32 @@ func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -7273,6 +7508,32 @@ func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -7370,6 +7631,32 @@ func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -7467,6 +7754,32 @@ func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -7564,6 +7877,32 @@ func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -7661,6 +8000,32 @@ func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -7758,6 +8123,32 @@ func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -7855,6 +8246,32 @@ func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -7952,6 +8369,32 @@ func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -8049,6 +8492,32 @@ func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -8146,6 +8615,32 @@ func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -8243,6 +8738,32 @@ func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -8340,6 +8861,32 @@ func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -8437,6 +8984,32 @@ func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -8534,6 +9107,32 @@ func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -8631,6 +9230,32 @@ func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) @@ -8728,6 +9353,32 @@ func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Poi const spc = spanClass(sizeclass<<1) | spanClass(1) span := c.alloc[spc] + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + x := v + { + + if valgrindenabled { + valgrindMalloc(x, size) + } + + if gcBlackenEnabled != 0 && elemsize != 0 { + if assistG := getg().m.curg; assistG != nil { + assistG.gcAssistBytes -= int64(elemsize - size) + } + } + + if debug.malloc { + postMallocgcDebug(x, elemsize, typ) + } + return x + } + + } + var nextFreeFastResult gclinkptr if span.allocCache != 0 { theBit := sys.TrailingZeros64(span.allocCache) diff --git a/src/runtime/malloc_stubs.go b/src/runtime/malloc_stubs.go index 224746f3d4..e9752956b8 100644 --- a/src/runtime/malloc_stubs.go +++ b/src/runtime/malloc_stubs.go @@ -7,6 +7,8 @@ // to produce a full mallocgc function that's specialized for a span class // or specific size in the case of the tiny allocator. // +// To generate the specialized mallocgc functions, do 'go run .' inside runtime/_mkmalloc. +// // To assemble a mallocgc function, the mallocStub function is cloned, and the call to // inlinedMalloc is replaced with the inlined body of smallScanNoHeaderStub, // smallNoScanStub or tinyStub, depending on the parameters being specialized. @@ -71,7 +73,8 @@ func mallocStub(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } } - // Assist the GC if needed. + // Assist the GC if needed. (On the reuse path, we currently compensate for this; + // changes here might require changes there.) if gcBlackenEnabled != 0 { deductAssistCredit(size) } @@ -242,6 +245,23 @@ func smallNoScanStub(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, u c := getMCache(mp) const spc = spanClass(sizeclass<<1) | spanClass(noscanint_) span := c.alloc[spc] + + // First, check for a reusable object. + if runtimeFreegcEnabled && c.hasReusableNoscan(spc) { + // We have a reusable object, use it. + v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero) + mp.mallocing = 0 + releasem(mp) + + // TODO(thepudds): note that the generated return path is essentially duplicated + // by the generator. For example, see the two postMallocgcDebug calls and + // related duplicated code on the return path currently in the generated + // mallocgcSmallNoScanSC2 function. One set of those correspond to this + // return here. We might be able to de-duplicate the generated return path + // by updating the generator, perhaps by jumping to a shared return or similar. + return v, elemsize + } + v := nextFreeFastStub(span) if v == 0 { v, span, checkGCTrigger = c.nextFree(spc) diff --git a/src/runtime/malloc_test.go b/src/runtime/malloc_test.go index bf58947bbc..97cf0eed54 100644 --- a/src/runtime/malloc_test.go +++ b/src/runtime/malloc_test.go @@ -16,6 +16,7 @@ import ( "runtime" . "runtime" "strings" + "sync" "sync/atomic" "testing" "time" @@ -234,6 +235,364 @@ func TestTinyAllocIssue37262(t *testing.T) { runtime.Releasem() } +// TestFreegc does basic testing of explicit frees. +func TestFreegc(t *testing.T) { + tests := []struct { + size string + f func(noscan bool) func(*testing.T) + noscan bool + }{ + // Types without pointers. + {"size=16", testFreegc[[16]byte], true}, // smallest we support currently + {"size=17", testFreegc[[17]byte], true}, + {"size=64", testFreegc[[64]byte], true}, + {"size=500", testFreegc[[500]byte], true}, + {"size=512", testFreegc[[512]byte], true}, + {"size=4096", testFreegc[[4096]byte], true}, + {"size=20000", testFreegc[[20000]byte], true}, // not power of 2 or spc boundary + {"size=32KiB-8", testFreegc[[1<<15 - 8]byte], true}, // max noscan small object for 64-bit + } + + // Run the tests twice if not in -short mode or not otherwise saving test time. + // First while manually calling runtime.GC to slightly increase isolation (perhaps making + // problems more reproducible). + for _, tt := range tests { + runtime.GC() + t.Run(fmt.Sprintf("gc=yes/ptrs=%v/%s", !tt.noscan, tt.size), tt.f(tt.noscan)) + } + runtime.GC() + + if testing.Short() || !RuntimeFreegcEnabled || runtime.Raceenabled { + return + } + + // Again, but without manually calling runtime.GC in the loop (perhaps less isolation might + // trigger problems). + for _, tt := range tests { + t.Run(fmt.Sprintf("gc=no/ptrs=%v/%s", !tt.noscan, tt.size), tt.f(tt.noscan)) + } + runtime.GC() +} + +func testFreegc[T comparable](noscan bool) func(*testing.T) { + // We use stressMultiple to influence the duration of the tests. + // When testing freegc changes, stressMultiple can be increased locally + // to test longer or in some cases with more goroutines. + // It can also be helpful to test with GODEBUG=clobberfree=1 and + // with and without doubleCheckMalloc and doubleCheckReusable enabled. + stressMultiple := 10 + if testing.Short() || !RuntimeFreegcEnabled || runtime.Raceenabled { + stressMultiple = 1 + } + + return func(t *testing.T) { + alloc := func() *T { + // Force heap alloc, plus some light validation of zeroed memory. + t.Helper() + p := Escape(new(T)) + var zero T + if *p != zero { + t.Fatalf("allocator returned non-zero memory: %v", *p) + } + return p + } + + free := func(p *T) { + t.Helper() + var zero T + if *p != zero { + t.Fatalf("found non-zero memory before freegc (tests do not modify memory): %v", *p) + } + runtime.Freegc(unsafe.Pointer(p), unsafe.Sizeof(*p), noscan) + } + + t.Run("basic-free", func(t *testing.T) { + // Test that freeing a live heap object doesn't crash. + for range 100 { + p := alloc() + free(p) + } + }) + + t.Run("stack-free", func(t *testing.T) { + // Test that freeing a stack object doesn't crash. + for range 100 { + var x [32]byte + var y [32]*int + runtime.Freegc(unsafe.Pointer(&x), unsafe.Sizeof(x), true) // noscan + runtime.Freegc(unsafe.Pointer(&y), unsafe.Sizeof(y), false) // !noscan + } + }) + + // Check our allocations. These tests rely on the + // current implementation treating a re-used object + // as not adding to the allocation counts seen + // by testing.AllocsPerRun. (This is not the desired + // long-term behavior, but it is the current behavior and + // makes these tests convenient). + + t.Run("allocs-baseline", func(t *testing.T) { + // Baseline result without any explicit free. + allocs := testing.AllocsPerRun(100, func() { + for range 100 { + p := alloc() + _ = p + } + }) + if allocs < 100 { + // TODO(thepudds): we get exactly 100 for almost all the tests, but investigate why + // ~101 allocs for TestFreegc/ptrs=true/size=32KiB-8. + t.Fatalf("expected >=100 allocations, got %v", allocs) + } + }) + + t.Run("allocs-with-free", func(t *testing.T) { + // Same allocations, but now using explicit free so that + // no allocs get reported. (Again, not the desired long-term behavior). + if SizeSpecializedMallocEnabled && !noscan { + // TODO(thepudds): skip at this point in the stack for size-specialized malloc + // with !noscan. Additional integration with sizespecializedmalloc is in a later CL. + t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types") + } + if !RuntimeFreegcEnabled { + t.Skip("skipping alloc tests with runtime.freegc disabled") + } + allocs := testing.AllocsPerRun(100, func() { + for range 100 { + p := alloc() + free(p) + } + }) + if allocs != 0 { + t.Fatalf("expected 0 allocations, got %v", allocs) + } + }) + + t.Run("free-multiple", func(t *testing.T) { + // Multiple allocations outstanding before explicitly freeing, + // but still within the limit of our smallest free list size + // so that no allocs are reported. (Again, not long-term behavior). + if SizeSpecializedMallocEnabled && !noscan { + // TODO(thepudds): skip at this point in the stack for size-specialized malloc + // with !noscan. Additional integration with sizespecializedmalloc is in a later CL. + t.Skip("temporarily skipping alloc tests for GOEXPERIMENT=sizespecializedmalloc for pointer types") + } + if !RuntimeFreegcEnabled { + t.Skip("skipping alloc tests with runtime.freegc disabled") + } + const maxOutstanding = 20 + s := make([]*T, 0, maxOutstanding) + allocs := testing.AllocsPerRun(100*stressMultiple, func() { + s = s[:0] + for range maxOutstanding { + p := alloc() + s = append(s, p) + } + for _, p := range s { + free(p) + } + }) + if allocs != 0 { + t.Fatalf("expected 0 allocations, got %v", allocs) + } + }) + + if runtime.GOARCH == "wasm" { + // TODO(thepudds): for wasm, double-check if just slow, vs. some test logic problem, + // vs. something else. It might have been wasm was slowest with tests that spawn + // many goroutines, which might be expected for wasm. This skip might no longer be + // needed now that we have tuned test execution time more, or perhaps wasm should just + // always run in short mode, which might also let us remove this skip. + t.Skip("skipping remaining freegc tests, was timing out on wasm") + } + + t.Run("free-many", func(t *testing.T) { + // Confirm we are graceful if we have more freed elements at once + // than the max free list size. + s := make([]*T, 0, 1000) + iterations := stressMultiple * stressMultiple // currently 1 (-short) or 100 + for range iterations { + s = s[:0] + for range 1000 { + p := alloc() + s = append(s, p) + } + for _, p := range s { + free(p) + } + } + }) + + t.Run("duplicate-check", func(t *testing.T) { + // A simple duplicate allocation test. We track what should be the set + // of live pointers in a map across a series of allocs and frees, + // and fail if a live pointer value is returned by an allocation. + // TODO: maybe add randomness? allow more live pointers? do across goroutines? + live := make(map[uintptr]bool) + for i := range 100 * stressMultiple { + var s []*T + // Alloc 10 times, tracking the live pointer values. + for j := range 10 { + p := alloc() + uptr := uintptr(unsafe.Pointer(p)) + if live[uptr] { + t.Fatalf("found duplicate pointer (0x%x). i: %d j: %d", uptr, i, j) + } + live[uptr] = true + s = append(s, p) + } + // Explicitly free those pointers, removing them from the live map. + for k := range s { + p := s[k] + s[k] = nil + uptr := uintptr(unsafe.Pointer(p)) + free(p) + delete(live, uptr) + } + } + }) + + t.Run("free-other-goroutine", func(t *testing.T) { + // Use explicit free, but the free happens on a different goroutine than the alloc. + // This also lightly simulates how the free code sees P migration or flushing + // the mcache, assuming we have > 1 P. (Not using testing.AllocsPerRun here). + iterations := 10 * stressMultiple * stressMultiple // currently 10 (-short) or 1000 + for _, capacity := range []int{2} { + for range iterations { + ch := make(chan *T, capacity) + var wg sync.WaitGroup + for range 2 { + wg.Add(1) + go func() { + defer wg.Done() + for p := range ch { + free(p) + } + }() + } + for range 100 { + p := alloc() + ch <- p + } + close(ch) + wg.Wait() + } + } + }) + + t.Run("many-goroutines", func(t *testing.T) { + // Allocate across multiple goroutines, freeing on the same goroutine. + // TODO: probably remove the duplicate checking here; not that useful. + counts := []int{1, 2, 4, 8, 10 * stressMultiple} + for _, goroutines := range counts { + var wg sync.WaitGroup + for range goroutines { + wg.Add(1) + go func() { + defer wg.Done() + live := make(map[uintptr]bool) + for range 100 * stressMultiple { + p := alloc() + uptr := uintptr(unsafe.Pointer(p)) + if live[uptr] { + panic("TestFreeLive: found duplicate pointer") + } + live[uptr] = true + free(p) + delete(live, uptr) + } + }() + } + wg.Wait() + } + }) + + t.Run("assist-credit", func(t *testing.T) { + // Allocate and free using the same span class repeatedly while + // verifying it results in a net zero change in assist credit. + // This helps double-check our manipulation of the assist credit + // during mallocgc/freegc, including in cases when there is + // internal fragmentation when the requested mallocgc size is + // smaller than the size class. + // + // See https://go.dev/cl/717520 for some additional discussion, + // including how we can deliberately cause the test to fail currently + // if we purposefully introduce some assist credit bugs. + if SizeSpecializedMallocEnabled && !noscan { + // TODO(thepudds): skip this test at this point in the stack; later CL has + // integration with sizespecializedmalloc. + t.Skip("temporarily skip assist credit tests for GOEXPERIMENT=sizespecializedmalloc for pointer types") + } + if !RuntimeFreegcEnabled { + t.Skip("skipping assist credit test with runtime.freegc disabled") + } + + // Use a background goroutine to continuously run the GC. + done := make(chan struct{}) + defer close(done) + go func() { + for { + select { + case <-done: + return + default: + runtime.GC() + } + } + }() + + // If making changes related to this test, consider testing locally with + // larger counts, like 100K or 1M. + counts := []int{1, 2, 10, 100 * stressMultiple} + // Dropping down to GOMAXPROCS=1 might help reduce noise. + defer GOMAXPROCS(GOMAXPROCS(1)) + size := int64(unsafe.Sizeof(*new(T))) + for _, count := range counts { + // Start by forcing a GC to reset this g's assist credit + // and perhaps help us get a cleaner measurement of GC cycle count. + runtime.GC() + for i := range count { + // We disable preemption to reduce other code's ability to adjust this g's + // assist credit or otherwise change things while we are measuring. + Acquirem() + + // We do two allocations per loop, with the second allocation being + // the one we measure. The first allocation tries to ensure at least one + // reusable object on the mspan's free list when we do our measured allocation. + p := alloc() + free(p) + + // Now do our primary allocation of interest, bracketed by measurements. + // We measure more than we strictly need (to log details in case of a failure). + creditStart := AssistCredit() + blackenStart := GcBlackenEnable() + p = alloc() + blackenAfterAlloc := GcBlackenEnable() + creditAfterAlloc := AssistCredit() + free(p) + blackenEnd := GcBlackenEnable() + creditEnd := AssistCredit() + + Releasem() + GoschedIfBusy() + + delta := creditEnd - creditStart + if delta != 0 { + t.Logf("assist credit non-zero delta: %d", delta) + t.Logf("\t| size: %d i: %d count: %d", size, i, count) + t.Logf("\t| credit before: %d credit after: %d", creditStart, creditEnd) + t.Logf("\t| alloc delta: %d free delta: %d", + creditAfterAlloc-creditStart, creditEnd-creditAfterAlloc) + t.Logf("\t| gcBlackenEnable (start / after alloc / end): %v/%v/%v", + blackenStart, blackenAfterAlloc, blackenEnd) + t.FailNow() + } + } + } + }) + } +} + func TestPageCacheLeak(t *testing.T) { defer GOMAXPROCS(GOMAXPROCS(1)) leaked := PageCachePagesLeaked() @@ -337,6 +696,13 @@ func BenchmarkMalloc16(b *testing.B) { } } +func BenchmarkMalloc32(b *testing.B) { + for i := 0; i < b.N; i++ { + p := new([4]int64) + Escape(p) + } +} + func BenchmarkMallocTypeInfo8(b *testing.B) { for i := 0; i < b.N; i++ { p := new(struct { @@ -355,6 +721,15 @@ func BenchmarkMallocTypeInfo16(b *testing.B) { } } +func BenchmarkMallocTypeInfo32(b *testing.B) { + for i := 0; i < b.N; i++ { + p := new(struct { + p [32 / unsafe.Sizeof(uintptr(0))]*int + }) + Escape(p) + } +} + type LargeStruct struct { x [16][]byte } diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index cade81031d..82872f1454 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -44,7 +44,17 @@ type mcache struct { // The rest is not accessed on every malloc. - alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass + // alloc contains spans to allocate from, indexed by spanClass. + alloc [numSpanClasses]*mspan + + // TODO(thepudds): better to interleave alloc and reusableScan/reusableNoscan so that + // a single malloc call can often access both in the same cache line for a given spanClass. + // It's not interleaved right now in part to have slightly smaller diff, and might be + // negligible effect on current microbenchmarks. + + // reusableNoscan contains linked lists of reusable noscan heap objects, indexed by spanClass. + // The next pointers are stored in the first word of the heap objects. + reusableNoscan [numSpanClasses]gclinkptr stackcache [_NumStackOrders]stackfreelist @@ -96,6 +106,7 @@ func allocmcache() *mcache { c.alloc[i] = &emptymspan } c.nextSample = nextSample() + return c } @@ -153,6 +164,16 @@ func (c *mcache) refill(spc spanClass) { if s.allocCount != s.nelems { throw("refill of span with free space remaining") } + + // TODO(thepudds): we might be able to allow mallocgcTiny to reuse 16 byte objects from spc==5, + // but for now, just clear our reusable objects for tinySpanClass. + if spc == tinySpanClass { + c.reusableNoscan[spc] = 0 + } + if c.reusableNoscan[spc] != 0 { + throw("refill of span with reusable pointers remaining on pointer free list") + } + if s != &emptymspan { // Mark this span as no longer cached. if s.sweepgen != mheap_.sweepgen+3 { @@ -312,6 +333,13 @@ func (c *mcache) releaseAll() { c.tinyAllocs = 0 memstats.heapStats.release() + // Clear the reusable linked lists. + // For noscan objects, the nodes of the linked lists are the reusable heap objects themselves, + // so we can simply clear the linked list head pointers. + // TODO(thepudds): consider having debug logging of a non-empty reusable lists getting cleared, + // maybe based on the existing debugReusableLog. + clear(c.reusableNoscan[:]) + // Update heapLive and heapScan. gcController.update(dHeapLive, scanAlloc) } @@ -339,3 +367,25 @@ func (c *mcache) prepareForSweep() { stackcache_clear(c) c.flushGen.Store(mheap_.sweepgen) // Synchronizes with gcStart } + +// addReusableNoscan adds a noscan object pointer to the reusable pointer free list +// for a span class. +func (c *mcache) addReusableNoscan(spc spanClass, ptr uintptr) { + if !runtimeFreegcEnabled { + return + } + + // Add to the reusable pointers free list. + v := gclinkptr(ptr) + v.ptr().next = c.reusableNoscan[spc] + c.reusableNoscan[spc] = v +} + +// hasReusableNoscan reports whether there is a reusable object available for +// a noscan spc. +func (c *mcache) hasReusableNoscan(spc spanClass) bool { + if !runtimeFreegcEnabled { + return false + } + return c.reusableNoscan[spc] != 0 +} diff --git a/src/runtime/mcleanup.go b/src/runtime/mcleanup.go index 383217aa05..fc71af9f3f 100644 --- a/src/runtime/mcleanup.go +++ b/src/runtime/mcleanup.go @@ -72,8 +72,9 @@ import ( // pass the object to the [KeepAlive] function after the last point // where the object must remain reachable. func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup { - // Explicitly force ptr to escape to the heap. + // Explicitly force ptr and cleanup to escape to the heap. ptr = abi.Escape(ptr) + cleanup = abi.Escape(cleanup) // The pointer to the object must be valid. if ptr == nil { @@ -82,7 +83,8 @@ func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup { usptr := uintptr(unsafe.Pointer(ptr)) // Check that arg is not equal to ptr. - if kind := abi.TypeOf(arg).Kind(); kind == abi.Pointer || kind == abi.UnsafePointer { + argType := abi.TypeOf(arg) + if kind := argType.Kind(); kind == abi.Pointer || kind == abi.UnsafePointer { if unsafe.Pointer(ptr) == *((*unsafe.Pointer)(unsafe.Pointer(&arg))) { panic("runtime.AddCleanup: ptr is equal to arg, cleanup will never run") } @@ -98,12 +100,23 @@ func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup { return Cleanup{} } - fn := func() { - cleanup(arg) + // Create new storage for the argument. + var argv *S + if size := unsafe.Sizeof(arg); size < maxTinySize && argType.PtrBytes == 0 { + // Side-step the tiny allocator to avoid liveness issues, since this box + // will be treated like a root by the GC. We model the box as an array of + // uintptrs to guarantee maximum allocator alignment. + // + // TODO(mknyszek): Consider just making space in cleanupFn for this. The + // unfortunate part of this is it would grow specialCleanup by 16 bytes, so + // while there wouldn't be an allocation, *every* cleanup would take the + // memory overhead hit. + box := new([maxTinySize / goarch.PtrSize]uintptr) + argv = (*S)(unsafe.Pointer(box)) + } else { + argv = new(S) } - // Closure must escape. - fv := *(**funcval)(unsafe.Pointer(&fn)) - fv = abi.Escape(fv) + *argv = arg // Find the containing object. base, _, _ := findObject(usptr, 0, 0) @@ -120,7 +133,16 @@ func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup { gcCleanups.createGs() } - id := addCleanup(unsafe.Pointer(ptr), fv) + id := addCleanup(unsafe.Pointer(ptr), cleanupFn{ + // Instantiate a caller function to call the cleanup, that is cleanup(*argv). + // + // TODO(mknyszek): This allocates because the generic dictionary argument + // gets closed over, but callCleanup doesn't even use the dictionary argument, + // so theoretically that could be removed, eliminating an allocation. + call: callCleanup[S], + fn: *(**funcval)(unsafe.Pointer(&cleanup)), + arg: unsafe.Pointer(argv), + }) if debug.checkfinalizers != 0 { cleanupFn := *(**funcval)(unsafe.Pointer(&cleanup)) setCleanupContext(unsafe.Pointer(ptr), abi.TypeFor[T](), sys.GetCallerPC(), cleanupFn.fn, id) @@ -131,6 +153,16 @@ func AddCleanup[T, S any](ptr *T, cleanup func(S), arg S) Cleanup { } } +// callCleanup is a helper for calling cleanups in a polymorphic way. +// +// In practice, all it does is call fn(*arg). arg must be a *T. +// +//go:noinline +func callCleanup[T any](fn *funcval, arg unsafe.Pointer) { + cleanup := *(*func(T))(unsafe.Pointer(&fn)) + cleanup(*(*T)(arg)) +} + // Cleanup is a handle to a cleanup call for a specific object. type Cleanup struct { // id is the unique identifier for the cleanup within the arena. @@ -216,7 +248,17 @@ const cleanupBlockSize = 512 // that the cleanup queue does not grow during marking (but it can shrink). type cleanupBlock struct { cleanupBlockHeader - cleanups [(cleanupBlockSize - unsafe.Sizeof(cleanupBlockHeader{})) / goarch.PtrSize]*funcval + cleanups [(cleanupBlockSize - unsafe.Sizeof(cleanupBlockHeader{})) / unsafe.Sizeof(cleanupFn{})]cleanupFn +} + +var cleanupFnPtrMask = [...]uint8{0b111} + +// cleanupFn represents a cleanup function with it's argument, yet to be called. +type cleanupFn struct { + // call is an adapter function that understands how to safely call fn(*arg). + call func(*funcval, unsafe.Pointer) + fn *funcval // cleanup function passed to AddCleanup. + arg unsafe.Pointer // pointer to argument to pass to cleanup function. } var cleanupBlockPtrMask [cleanupBlockSize / goarch.PtrSize / 8]byte @@ -245,8 +287,8 @@ type cleanupBlockHeader struct { // // Must only be called if the GC is in the sweep phase (gcphase == _GCoff), // because it does not synchronize with the garbage collector. -func (b *cleanupBlock) enqueue(fn *funcval) bool { - b.cleanups[b.n] = fn +func (b *cleanupBlock) enqueue(c cleanupFn) bool { + b.cleanups[b.n] = c b.n++ return b.full() } @@ -375,7 +417,7 @@ func (q *cleanupQueue) tryTakeWork() bool { // enqueue queues a single cleanup for execution. // // Called by the sweeper, and only the sweeper. -func (q *cleanupQueue) enqueue(fn *funcval) { +func (q *cleanupQueue) enqueue(c cleanupFn) { mp := acquirem() pp := mp.p.ptr() b := pp.cleanups @@ -396,7 +438,7 @@ func (q *cleanupQueue) enqueue(fn *funcval) { } pp.cleanups = b } - if full := b.enqueue(fn); full { + if full := b.enqueue(c); full { q.full.push(&b.lfnode) pp.cleanups = nil q.addWork(1) @@ -641,7 +683,8 @@ func runCleanups() { gcCleanups.beginRunningCleanups() for i := 0; i < int(b.n); i++ { - fn := b.cleanups[i] + c := b.cleanups[i] + b.cleanups[i] = cleanupFn{} var racectx uintptr if raceenabled { @@ -650,20 +693,15 @@ func runCleanups() { // the same goroutine. // // Synchronize on fn. This would fail to find races on the - // closed-over values in fn (suppose fn is passed to multiple - // AddCleanup calls) if fn was not unique, but it is. Update - // the synchronization on fn if you intend to optimize it - // and store the cleanup function and cleanup argument on the - // queue directly. - racerelease(unsafe.Pointer(fn)) + // closed-over values in fn (suppose arg is passed to multiple + // AddCleanup calls) if arg was not unique, but it is. + racerelease(unsafe.Pointer(c.arg)) racectx = raceEnterNewCtx() - raceacquire(unsafe.Pointer(fn)) + raceacquire(unsafe.Pointer(c.arg)) } // Execute the next cleanup. - cleanup := *(*func())(unsafe.Pointer(&fn)) - cleanup() - b.cleanups[i] = nil + c.call(c.fn, c.arg) if raceenabled { // Restore the old context. diff --git a/src/runtime/mcleanup_test.go b/src/runtime/mcleanup_test.go index 22b9eccd20..341d30afa7 100644 --- a/src/runtime/mcleanup_test.go +++ b/src/runtime/mcleanup_test.go @@ -336,3 +336,31 @@ func TestCleanupLost(t *testing.T) { t.Errorf("expected %d cleanups to be executed, got %d", got, want) } } + +// BenchmarkAddCleanupAndStop benchmarks adding and removing a cleanup +// from the same allocation. +// +// At face value, this benchmark is unrealistic, since no program would +// do this in practice. However, adding cleanups to new allocations in a +// loop is also unrealistic. It adds additional unused allocations, +// exercises uncommon performance pitfalls in AddCleanup (traversing the +// specials list, which should just be its own benchmark), and executing +// cleanups at a frequency that is unlikely to appear in real programs. +// +// This benchmark is still useful however, since we can get a low-noise +// measurement of the cost of AddCleanup and Stop all in one without the +// above pitfalls: we can measure the pure overhead. We can then separate +// out the cost of each in CPU profiles if we so choose (they're not so +// inexpensive as to make this infeasible). +func BenchmarkAddCleanupAndStop(b *testing.B) { + b.ReportAllocs() + + type T struct { + v int + p unsafe.Pointer + } + x := new(T) + for b.Loop() { + runtime.AddCleanup(x, func(int) {}, 14).Stop() + } +} diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 43afbc330b..febcd9558c 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -1727,7 +1727,13 @@ func gcBgMarkWorker(ready chan struct{}) { // the stack (see gopark). Prevent deadlock from recursively // starting GC by disabling preemption. gp.m.preemptoff = "GC worker init" - node := &new(gcBgMarkWorkerNodePadded).gcBgMarkWorkerNode // TODO: technically not allowed in the heap. See comment in tagptr.go. + // TODO: This is technically not allowed in the heap. See comment in tagptr.go. + // + // It is kept alive simply by virtue of being used in the infinite loop + // below. gcBgMarkWorkerPool keeps pointers to nodes that are not + // GC-visible, so this must be kept alive indefinitely (even if + // GOMAXPROCS decreases). + node := &new(gcBgMarkWorkerNodePadded).gcBgMarkWorkerNode gp.m.preemptoff = "" node.gp.set(gp) diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index dd76973c62..714b9a51df 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -204,7 +204,7 @@ func gcMarkRootCheck() { }) } -// ptrmask for an allocation containing a single pointer. +// oneptrmask for an allocation containing a single pointer. var oneptrmask = [...]uint8{1} // markroot scans the i'th root. @@ -251,7 +251,7 @@ func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64 { // N.B. This only needs to synchronize with cleanup execution, which only resets these blocks. // All cleanup queueing happens during sweep. n := uintptr(atomic.Load(&cb.n)) - scanblock(uintptr(unsafe.Pointer(&cb.cleanups[0])), n*goarch.PtrSize, &cleanupBlockPtrMask[0], gcw, nil) + scanblock(uintptr(unsafe.Pointer(&cb.cleanups[0])), n*unsafe.Sizeof(cleanupFn{}), &cleanupBlockPtrMask[0], gcw, nil) } case work.baseSpans <= i && i < work.baseStacks: @@ -489,7 +489,7 @@ func gcScanFinalizer(spf *specialfinalizer, s *mspan, gcw *gcWork) { // gcScanCleanup scans the relevant parts of a cleanup special as a root. func gcScanCleanup(spc *specialCleanup, gcw *gcWork) { // The special itself is a root. - scanblock(uintptr(unsafe.Pointer(&spc.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil) + scanblock(uintptr(unsafe.Pointer(&spc.cleanup)), unsafe.Sizeof(cleanupFn{}), &cleanupFnPtrMask[0], gcw, nil) } // gcAssistAlloc performs GC work to make gp's assist debt positive. @@ -1524,29 +1524,32 @@ func scanConservative(b, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackSca if debugScanConservative { printlock() print("conservatively scanning [", hex(b), ",", hex(b+n), ")\n") - hexdumpWords(b, b+n, func(p uintptr) byte { + hexdumpWords(b, n, func(p uintptr, m hexdumpMarker) { if ptrmask != nil { word := (p - b) / goarch.PtrSize bits := *addb(ptrmask, word/8) if (bits>>(word%8))&1 == 0 { - return '$' + return } } val := *(*uintptr)(unsafe.Pointer(p)) if state != nil && state.stack.lo <= val && val < state.stack.hi { - return '@' + m.start() + println("ptr to stack") + return } span := spanOfHeap(val) if span == nil { - return ' ' + return } idx := span.objIndex(val) if span.isFreeOrNewlyAllocated(idx) { - return ' ' + return } - return '*' + m.start() + println("ptr to heap") }) printunlock() } diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go index 3594b33cfd..fa560f9966 100644 --- a/src/runtime/mgcmark_greenteagc.go +++ b/src/runtime/mgcmark_greenteagc.go @@ -978,7 +978,9 @@ func spanSetScans(spanBase uintptr, nelems uint16, imb *spanInlineMarkBits, toSc } func scanObjectSmall(spanBase, b, objSize uintptr, gcw *gcWork) { - ptrBits := heapBitsSmallForAddrInline(spanBase, b, objSize) + hbitsBase, _ := spanHeapBitsRange(spanBase, gc.PageSize, objSize) + hbits := (*byte)(unsafe.Pointer(hbitsBase)) + ptrBits := extractHeapBitsSmall(hbits, spanBase, b, objSize) gcw.heapScanWork += int64(sys.Len64(uint64(ptrBits)) * goarch.PtrSize) nptrs := 0 n := sys.OnesCount64(uint64(ptrBits)) @@ -1017,12 +1019,14 @@ func scanObjectsSmall(base, objSize uintptr, elems uint16, gcw *gcWork, scans *g break } n := sys.OnesCount64(uint64(bits)) + hbitsBase, _ := spanHeapBitsRange(base, gc.PageSize, objSize) + hbits := (*byte)(unsafe.Pointer(hbitsBase)) for range n { j := sys.TrailingZeros64(uint64(bits)) bits &^= 1 << j b := base + uintptr(i*(goarch.PtrSize*8)+j)*objSize - ptrBits := heapBitsSmallForAddrInline(base, b, objSize) + ptrBits := extractHeapBitsSmall(hbits, base, b, objSize) gcw.heapScanWork += int64(sys.Len64(uint64(ptrBits)) * goarch.PtrSize) n := sys.OnesCount64(uint64(ptrBits)) @@ -1056,10 +1060,7 @@ func scanObjectsSmall(base, objSize uintptr, elems uint16, gcw *gcWork, scans *g } } -func heapBitsSmallForAddrInline(spanBase, addr, elemsize uintptr) uintptr { - hbitsBase, _ := spanHeapBitsRange(spanBase, gc.PageSize, elemsize) - hbits := (*byte)(unsafe.Pointer(hbitsBase)) - +func extractHeapBitsSmall(hbits *byte, spanBase, addr, elemsize uintptr) uintptr { // These objects are always small enough that their bitmaps // fit in a single word, so just load the word or two we need. // diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go index 32c1b941e5..388cce83cd 100644 --- a/src/runtime/mgcpacer.go +++ b/src/runtime/mgcpacer.go @@ -10,7 +10,7 @@ import ( "internal/runtime/atomic" "internal/runtime/math" "internal/strconv" - _ "unsafe" // for go:linkname + _ "unsafe" ) const ( @@ -749,30 +749,33 @@ func (c *gcControllerState) enlistWorker() { } } -// findRunnableGCWorker returns a background mark worker for pp if it -// should be run. This must only be called when gcBlackenEnabled != 0. -func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { +// assignWaitingGCWorker assigns a background mark worker to pp if one should +// be run. +// +// If a worker is selected, it is assigned to pp.nextMarkGCWorker and the P is +// wired as a GC mark worker. The G is still in _Gwaiting. If no worker is +// selected, ok returns false. +// +// If assignedWaitingGCWorker returns true, this P must either: +// - Mark the G as runnable and run it, clearing pp.nextMarkGCWorker. +// - Or, call c.releaseNextGCMarkWorker. +// +// This must only be called when gcBlackenEnabled != 0. +func (c *gcControllerState) assignWaitingGCWorker(pp *p, now int64) (bool, int64) { if gcBlackenEnabled == 0 { throw("gcControllerState.findRunnable: blackening not enabled") } - // Since we have the current time, check if the GC CPU limiter - // hasn't had an update in a while. This check is necessary in - // case the limiter is on but hasn't been checked in a while and - // so may have left sufficient headroom to turn off again. if now == 0 { now = nanotime() } - if gcCPULimiter.needUpdate(now) { - gcCPULimiter.update(now) - } if !gcShouldScheduleWorker(pp) { // No good reason to schedule a worker. This can happen at // the end of the mark phase when there are still // assists tapering off. Don't bother running a worker // now because it'll just return immediately. - return nil, now + return false, now } if c.dedicatedMarkWorkersNeeded.Load() <= 0 && c.fractionalUtilizationGoal == 0 { @@ -783,7 +786,7 @@ func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { // When a dedicated worker stops running, the gcBgMarkWorker loop notes // the need for the worker before returning it to the pool. If we don't // see the need now, we wouldn't have found it in the pool anyway. - return nil, now + return false, now } // Grab a worker before we commit to running below. @@ -800,7 +803,7 @@ func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { // it will always do so with queued global work. Thus, that P // will be immediately eligible to re-run the worker G it was // just using, ensuring work can complete. - return nil, now + return false, now } decIfPositive := func(val *atomic.Int64) bool { @@ -823,7 +826,7 @@ func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { } else if c.fractionalUtilizationGoal == 0 { // No need for fractional workers. gcBgMarkWorkerPool.push(&node.node) - return nil, now + return false, now } else { // Is this P behind on the fractional utilization // goal? @@ -833,12 +836,51 @@ func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { if delta > 0 && float64(pp.gcFractionalMarkTime.Load())/float64(delta) > c.fractionalUtilizationGoal { // Nope. No need to run a fractional worker. gcBgMarkWorkerPool.push(&node.node) - return nil, now + return false, now } // Run a fractional worker. pp.gcMarkWorkerMode = gcMarkWorkerFractionalMode } + pp.nextGCMarkWorker = node + return true, now +} + +// findRunnableGCWorker returns a background mark worker for pp if it +// should be run. +// +// If findRunnableGCWorker returns a G, this P is wired as a GC mark worker and +// must run the G. +// +// This must only be called when gcBlackenEnabled != 0. +// +// This function is allowed to have write barriers because it is called from +// the portion of findRunnable that always has a P. +// +//go:yeswritebarrierrec +func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { + // Since we have the current time, check if the GC CPU limiter + // hasn't had an update in a while. This check is necessary in + // case the limiter is on but hasn't been checked in a while and + // so may have left sufficient headroom to turn off again. + if now == 0 { + now = nanotime() + } + if gcCPULimiter.needUpdate(now) { + gcCPULimiter.update(now) + } + + // If a worker wasn't already assigned by procresize, assign one now. + if pp.nextGCMarkWorker == nil { + ok, now := c.assignWaitingGCWorker(pp, now) + if !ok { + return nil, now + } + } + + node := pp.nextGCMarkWorker + pp.nextGCMarkWorker = nil + // Run the background mark worker. gp := node.gp.ptr() trace := traceAcquire() @@ -850,6 +892,23 @@ func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { return gp, now } +// Release an unused pp.nextGCMarkWorker, if any. +// +// This function is allowed to have write barriers because it is called from +// the portion of schedule. +// +//go:yeswritebarrierrec +func (c *gcControllerState) releaseNextGCMarkWorker(pp *p) { + node := pp.nextGCMarkWorker + if node == nil { + return + } + + c.markWorkerStop(pp.gcMarkWorkerMode, 0) + gcBgMarkWorkerPool.push(&node.node) + pp.nextGCMarkWorker = nil +} + // resetLive sets up the controller state for the next mark phase after the end // of the previous one. Must be called after endCycle and before commit, before // the world is started. diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index c3d6afb90a..4eecb1cfd9 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -885,7 +885,7 @@ func (s *mspan) reportZombies() { if length > 1024 { length = 1024 } - hexdumpWords(addr, addr+length, nil) + hexdumpWords(addr, length, nil) } mbits.advance() abits.advance() diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 711c7790eb..d2ff063b00 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -435,7 +435,7 @@ type mspan struct { // indicating a free object. freeindex is then adjusted so that subsequent scans begin // just past the newly discovered free object. // - // If freeindex == nelems, this span has no free objects. + // If freeindex == nelems, this span has no free objects, though might have reusable objects. // // allocBits is a bitmap of objects in this span. // If n >= freeindex and allocBits[n/8] & (1<<(n%8)) is 0 @@ -2161,7 +2161,7 @@ func removefinalizer(p unsafe.Pointer) { type specialCleanup struct { _ sys.NotInHeap special special - fn *funcval + cleanup cleanupFn // Globally unique ID for the cleanup, obtained from mheap_.cleanupID. id uint64 } @@ -2170,14 +2170,18 @@ type specialCleanup struct { // cleanups are allowed on an object, and even the same pointer. // A cleanup id is returned which can be used to uniquely identify // the cleanup. -func addCleanup(p unsafe.Pointer, f *funcval) uint64 { +func addCleanup(p unsafe.Pointer, c cleanupFn) uint64 { + // TODO(mknyszek): Consider pooling specialCleanups on the P + // so we don't have to take the lock every time. Just locking + // is a considerable part of the cost of AddCleanup. This + // would also require reserving some cleanup IDs on the P. lock(&mheap_.speciallock) s := (*specialCleanup)(mheap_.specialCleanupAlloc.alloc()) mheap_.cleanupID++ // Increment first. ID 0 is reserved. id := mheap_.cleanupID unlock(&mheap_.speciallock) s.special.kind = _KindSpecialCleanup - s.fn = f + s.cleanup = c s.id = id mp := acquirem() @@ -2187,17 +2191,16 @@ func addCleanup(p unsafe.Pointer, f *funcval) uint64 { // situation where it's possible that markrootSpans // has already run but mark termination hasn't yet. if gcphase != _GCoff { - gcw := &mp.p.ptr().gcw // Mark the cleanup itself, since the // special isn't part of the GC'd heap. - scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil) + gcScanCleanup(s, &mp.p.ptr().gcw) } releasem(mp) - // Keep f alive. There's a window in this function where it's - // only reachable via the special while the special hasn't been - // added to the specials list yet. This is similar to a bug + // Keep c and its referents alive. There's a window in this function + // where it's only reachable via the special while the special hasn't + // been added to the specials list yet. This is similar to a bug // discovered for weak handles, see #70455. - KeepAlive(f) + KeepAlive(c) return id } @@ -2534,7 +2537,15 @@ func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr { s := (*specialWeakHandle)(mheap_.specialWeakHandleAlloc.alloc()) unlock(&mheap_.speciallock) - handle := new(atomic.Uintptr) + // N.B. Pad the weak handle to ensure it doesn't share a tiny + // block with any other allocations. This can lead to leaks, such + // as in go.dev/issue/76007. As an alternative, we could consider + // using the currently-unused 8-byte noscan size class. + type weakHandleBox struct { + h atomic.Uintptr + _ [maxTinySize - unsafe.Sizeof(atomic.Uintptr{})]byte + } + handle := &(new(weakHandleBox).h) s.special.kind = _KindSpecialWeakHandle s.handle = handle handle.Store(uintptr(p)) @@ -2792,7 +2803,7 @@ func freeSpecial(s *special, p unsafe.Pointer, size uintptr) { // Cleanups, unlike finalizers, do not resurrect the objects // they're attached to, so we only need to pass the cleanup // function, not the object. - gcCleanups.enqueue(sc.fn) + gcCleanups.enqueue(sc.cleanup) lock(&mheap_.speciallock) mheap_.specialCleanupAlloc.free(unsafe.Pointer(sc)) unlock(&mheap_.speciallock) diff --git a/src/runtime/panic.go b/src/runtime/panic.go index e1105afd0f..ff2dec386f 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -746,7 +746,7 @@ func printpanics(p *_panic) { } print("panic: ") printpanicval(p.arg) - if p.repanicked { + if p.recovered && p.repanicked { print(" [recovered, repanicked]") } else if p.recovered { print(" [recovered]") diff --git a/src/runtime/print.go b/src/runtime/print.go index c01db9d7f9..d2733fb266 100644 --- a/src/runtime/print.go +++ b/src/runtime/print.go @@ -5,7 +5,6 @@ package runtime import ( - "internal/goarch" "internal/strconv" "unsafe" ) @@ -212,43 +211,3 @@ func printeface(e eface) { func printiface(i iface) { print("(", i.tab, ",", i.data, ")") } - -// hexdumpWords prints a word-oriented hex dump of [p, end). -// -// If mark != nil, it will be called with each printed word's address -// and should return a character mark to appear just before that -// word's value. It can return 0 to indicate no mark. -func hexdumpWords(p, end uintptr, mark func(uintptr) byte) { - printlock() - var markbuf [1]byte - markbuf[0] = ' ' - minhexdigits = int(unsafe.Sizeof(uintptr(0)) * 2) - for i := uintptr(0); p+i < end; i += goarch.PtrSize { - if i%16 == 0 { - if i != 0 { - println() - } - print(hex(p+i), ": ") - } - - if mark != nil { - markbuf[0] = mark(p + i) - if markbuf[0] == 0 { - markbuf[0] = ' ' - } - } - gwrite(markbuf[:]) - val := *(*uintptr)(unsafe.Pointer(p + i)) - print(hex(val)) - print(" ") - - // Can we symbolize val? - fn := findfunc(val) - if fn.valid() { - print("<", funcname(fn), "+", hex(val-fn.entry()), "> ") - } - } - minhexdigits = 0 - println() - printunlock() -} diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 21b276cabf..58fb4bd681 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -3120,7 +3120,7 @@ func startm(pp *p, spinning, lockheld bool) { //go:nowritebarrierrec func handoffp(pp *p) { // handoffp must start an M in any situation where - // findrunnable would return a G to run on pp. + // findRunnable would return a G to run on pp. // if it has local work, start it straight away if !runqempty(pp) || !sched.runq.empty() { @@ -3363,7 +3363,7 @@ func findRunnable() (gp *g, inheritTime, tryWakeP bool) { mp := getg().m // The conditions here and in handoffp must agree: if - // findrunnable would return a G to run, handoffp must start + // findRunnable would return a G to run, handoffp must start // an M. top: @@ -3587,7 +3587,7 @@ top: goto top } if releasep() != pp { - throw("findrunnable: wrong p") + throw("findRunnable: wrong p") } now = pidleput(pp, now) unlock(&sched.lock) @@ -3632,7 +3632,7 @@ top: if mp.spinning { mp.spinning = false if sched.nmspinning.Add(-1) < 0 { - throw("findrunnable: negative nmspinning") + throw("findRunnable: negative nmspinning") } // Note the for correctness, only the last M transitioning from @@ -3705,10 +3705,10 @@ top: if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 { sched.pollUntil.Store(pollUntil) if mp.p != 0 { - throw("findrunnable: netpoll with p") + throw("findRunnable: netpoll with p") } if mp.spinning { - throw("findrunnable: netpoll with spinning") + throw("findRunnable: netpoll with spinning") } delay := int64(-1) if pollUntil != 0 { @@ -3974,7 +3974,7 @@ func checkIdleGCNoP() (*p, *g) { // timers and the network poller if there isn't one already. func wakeNetPoller(when int64) { if sched.lastpoll.Load() == 0 { - // In findrunnable we ensure that when polling the pollUntil + // In findRunnable we ensure that when polling the pollUntil // field is either zero or the time to which the current // poll is expected to run. This can have a spurious wakeup // but should never miss a wakeup. @@ -3999,7 +3999,7 @@ func resetspinning() { gp.m.spinning = false nmspinning := sched.nmspinning.Add(-1) if nmspinning < 0 { - throw("findrunnable: negative nmspinning") + throw("findRunnable: negative nmspinning") } // M wakeup policy is deliberately somewhat conservative, so check if we // need to wakeup another P here. See "Worker thread parking/unparking" @@ -4136,11 +4136,23 @@ top: gp, inheritTime, tryWakeP := findRunnable() // blocks until work is available + // May be on a new P. + pp = mp.p.ptr() + // findRunnable may have collected an allp snapshot. The snapshot is // only required within findRunnable. Clear it to all GC to collect the // slice. mp.clearAllpSnapshot() + // If the P was assigned a next GC mark worker but findRunnable + // selected anything else, release the worker so another P may run it. + // + // N.B. If this occurs because a higher-priority goroutine was selected + // (trace reader), then tryWakeP is set, which will wake another P to + // run the worker. If this occurs because the GC is no longer active, + // there is no need to wakep. + gcController.releaseNextGCMarkWorker(pp) + if debug.dontfreezetheworld > 0 && freezing.Load() { // See comment in freezetheworld. We don't want to perturb // scheduler state, so we didn't gcstopm in findRunnable, but @@ -4659,6 +4671,11 @@ func reentersyscall(pc, sp, bp uintptr) { gp.m.locks-- } +// debugExtendGrunningNoP is a debug mode that extends the windows in which +// we're _Grunning without a P in order to try to shake out bugs with code +// assuming this state is impossible. +const debugExtendGrunningNoP = false + // Standard syscall entry used by the go syscall library and normal cgo calls. // // This is exported via linkname to assembly in the syscall package and x/sys. @@ -4771,6 +4788,9 @@ func entersyscallblock() { // <-- // Caution: we're in a small window where we are in _Grunning without a P. // --> + if debugExtendGrunningNoP { + usleep(10) + } casgstatus(gp, _Grunning, _Gsyscall) if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp { systemstack(func() { @@ -4853,6 +4873,9 @@ func exitsyscall() { // Caution: we're in a window where we may be in _Grunning without a P. // Either we will grab a P or call exitsyscall0, where we'll switch to // _Grunnable. + if debugExtendGrunningNoP { + usleep(10) + } // Grab and clear our old P. oldp := gp.m.oldp.ptr() @@ -6026,8 +6049,10 @@ func procresize(nprocs int32) *p { unlock(&allpLock) } + // Assign Ms to Ps with runnable goroutines. var runnablePs *p var runnablePsNeedM *p + var idlePs *p for i := nprocs - 1; i >= 0; i-- { pp := allp[i] if gp.m.p.ptr() == pp { @@ -6035,7 +6060,8 @@ func procresize(nprocs int32) *p { } pp.status = _Pidle if runqempty(pp) { - pidleput(pp, now) + pp.link.set(idlePs) + idlePs = pp continue } @@ -6061,6 +6087,8 @@ func procresize(nprocs int32) *p { pp.link.set(runnablePs) runnablePs = pp } + // Assign Ms to remaining runnable Ps without usable oldm. See comment + // above. for runnablePsNeedM != nil { pp := runnablePsNeedM runnablePsNeedM = pp.link.ptr() @@ -6071,6 +6099,62 @@ func procresize(nprocs int32) *p { runnablePs = pp } + // Now that we've assigned Ms to Ps with runnable goroutines, assign GC + // mark workers to remaining idle Ps, if needed. + // + // By assigning GC workers to Ps here, we slightly speed up starting + // the world, as we will start enough Ps to run all of the user + // goroutines and GC mark workers all at once, rather than using a + // sequence of wakep calls as each P's findRunnable realizes it needs + // to run a mark worker instead of a user goroutine. + // + // By assigning GC workers to Ps only _after_ previously-running Ps are + // assigned Ms, we ensure that goroutines previously running on a P + // continue to run on the same P, with GC mark workers preferring + // previously-idle Ps. This helps prevent goroutines from shuffling + // around too much across STW. + // + // N.B., if there aren't enough Ps left in idlePs for all of the GC + // mark workers, then findRunnable will still choose to run mark + // workers on Ps assigned above. + // + // N.B., we do this during any STW in the mark phase, not just the + // sweep termination STW that starts the mark phase. gcBgMarkWorker + // always preempts by removing itself from the P, so even unrelated + // STWs during the mark require that Ps reselect mark workers upon + // restart. + if gcBlackenEnabled != 0 { + for idlePs != nil { + pp := idlePs + + ok, _ := gcController.assignWaitingGCWorker(pp, now) + if !ok { + // No more mark workers needed. + break + } + + // Got a worker, P is now runnable. + // + // mget may return nil if there aren't enough Ms, in + // which case startTheWorldWithSema will start one. + // + // N.B. findRunnableGCWorker will make the worker G + // itself runnable. + idlePs = pp.link.ptr() + mp := mget() + pp.m.set(mp) + pp.link.set(runnablePs) + runnablePs = pp + } + } + + // Finally, any remaining Ps are truly idle. + for idlePs != nil { + pp := idlePs + idlePs = pp.link.ptr() + pidleput(pp, now) + } + stealOrder.reset(uint32(nprocs)) var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) @@ -6173,6 +6257,10 @@ func releasepNoTrace() *p { print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n") throw("releasep: invalid p state") } + + // P must clear if nextGCMarkWorker if it stops. + gcController.releaseNextGCMarkWorker(pp) + gp.m.p = 0 pp.m = 0 pp.status = _Pidle @@ -7259,7 +7347,7 @@ func pidlegetSpinning(now int64) (*p, int64) { pp, now := pidleget(now) if pp == nil { - // See "Delicate dance" comment in findrunnable. We found work + // See "Delicate dance" comment in findRunnable. We found work // that we cannot take, we must synchronize with non-spinning // Ms that may be preparing to drop their P. sched.needspinning.Store(1) @@ -7497,23 +7585,36 @@ func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) // Try to steal from pp.runnext. if next := pp.runnext; next != 0 { if pp.status == _Prunning { - // Sleep to ensure that pp isn't about to run the g - // we are about to steal. - // The important use case here is when the g running - // on pp ready()s another g and then almost - // immediately blocks. Instead of stealing runnext - // in this window, back off to give pp a chance to - // schedule runnext. This will avoid thrashing gs - // between different Ps. - // A sync chan send/recv takes ~50ns as of time of - // writing, so 3us gives ~50x overshoot. - if !osHasLowResTimer { - usleep(3) - } else { - // On some platforms system timer granularity is - // 1-15ms, which is way too much for this - // optimization. So just yield. - osyield() + if mp := pp.m.ptr(); mp != nil { + if gp := mp.curg; gp == nil || readgstatus(gp)&^_Gscan != _Gsyscall { + // Sleep to ensure that pp isn't about to run the g + // we are about to steal. + // The important use case here is when the g running + // on pp ready()s another g and then almost + // immediately blocks. Instead of stealing runnext + // in this window, back off to give pp a chance to + // schedule runnext. This will avoid thrashing gs + // between different Ps. + // A sync chan send/recv takes ~50ns as of time of + // writing, so 3us gives ~50x overshoot. + // If curg is nil, we assume that the P is likely + // to be in the scheduler. If curg isn't nil and isn't + // in a syscall, then it's either running, waiting, or + // runnable. In this case we want to sleep because the + // P might either call into the scheduler soon (running), + // or already is (since we found a waiting or runnable + // goroutine hanging off of a running P, suggesting it + // either recently transitioned out of running, or will + // transition to running shortly). + if !osHasLowResTimer { + usleep(3) + } else { + // On some platforms system timer granularity is + // 1-15ms, which is way too much for this + // optimization. So just yield. + osyield() + } + } } } if !pp.runnext.cas(next, 0) { diff --git a/src/runtime/proc_test.go b/src/runtime/proc_test.go index b3084f4895..35a1aeab1f 100644 --- a/src/runtime/proc_test.go +++ b/src/runtime/proc_test.go @@ -1221,7 +1221,7 @@ func TestTraceSTW(t *testing.T) { var errors int for i := range runs { - err := runTestTracesSTW(t, i) + err := runTestTracesSTW(t, i, "TraceSTW", "stop-the-world (read mem stats)") if err != nil { t.Logf("Run %d failed: %v", i, err) errors++ @@ -1235,7 +1235,43 @@ func TestTraceSTW(t *testing.T) { } } -func runTestTracesSTW(t *testing.T, run int) (err error) { +// TestTraceGCSTW verifies that goroutines continue running on the same M and P +// after a GC STW. +func TestTraceGCSTW(t *testing.T) { + // Very similar to TestTraceSTW, but using a STW that starts the GC. + // When the GC starts, the background GC mark workers start running, + // which provide an additional source of disturbance to the scheduler. + // + // procresize assigns GC workers to previously-idle Ps to avoid + // changing what the previously-running Ps are doing. + + if testing.Short() { + t.Skip("skipping in -short mode") + } + + if runtime.NumCPU() < 8 { + t.Skip("This test sets GOMAXPROCS=8 and wants to avoid thread descheduling as much as possible. Skip on machines with less than 8 CPUs") + } + + const runs = 50 + + var errors int + for i := range runs { + err := runTestTracesSTW(t, i, "TraceGCSTW", "stop-the-world (GC sweep termination)") + if err != nil { + t.Logf("Run %d failed: %v", i, err) + errors++ + } + } + + pct := float64(errors)/float64(runs) + t.Logf("Errors: %d/%d = %f%%", errors, runs, 100*pct) + if pct > 0.25 { + t.Errorf("Error rate too high") + } +} + +func runTestTracesSTW(t *testing.T, run int, name, stwType string) (err error) { t.Logf("Run %d", run) // By default, TSAN sleeps for 1s at exit to allow background @@ -1243,7 +1279,7 @@ func runTestTracesSTW(t *testing.T, run int) (err error) { // much, since we are running 50 iterations, so disable the sleep. // // Outside of race mode, GORACE does nothing. - buf := []byte(runTestProg(t, "testprog", "TraceSTW", "GORACE=atexit_sleep_ms=0")) + buf := []byte(runTestProg(t, "testprog", name, "GORACE=atexit_sleep_ms=0")) // We locally "fail" the run (return an error) if the trace exhibits // unwanted scheduling. i.e., the target goroutines did not remain on @@ -1253,7 +1289,7 @@ func runTestTracesSTW(t *testing.T, run int) (err error) { // occur, such as a trace parse error. defer func() { if err != nil || t.Failed() { - testtrace.Dump(t, fmt.Sprintf("TestTraceSTW-run%d", run), []byte(buf), false) + testtrace.Dump(t, fmt.Sprintf("Test%s-run%d", name, run), []byte(buf), false) } }() @@ -1509,12 +1545,10 @@ findEnd: break findEnd case trace.EventRangeBegin: r := ev.Range() - if r.Name == "stop-the-world (read mem stats)" { + if r.Name == stwType { // Note when we see the STW begin. This is not // load bearing; it's purpose is simply to fail - // the test if we manage to remove the STW from - // ReadMemStat, so we remember to change this - // test to add some new source of STW. + // the test if we accidentally remove the STW. stwSeen = true } } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 6c955460d4..56082bf7f5 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -854,6 +854,18 @@ type p struct { // mark worker started. gcMarkWorkerStartTime int64 + // nextGCMarkWorker is the next mark worker to run. This may be set + // during start-the-world to assign a worker to this P. The P runs this + // worker on the next call to gcController.findRunnableGCWorker. If the + // P runs something else or stops, it must release this worker via + // gcController.releaseNextGCMarkWorker. + // + // See comment in gcBgMarkWorker about the lifetime of + // gcBgMarkWorkerNode. + // + // Only accessed by this P or during STW. + nextGCMarkWorker *gcBgMarkWorkerNode + // gcw is this P's GC work buffer cache. The work buffer is // filled by write barriers, drained by mutator assists, and // disposed on certain GC state transitions. @@ -1425,9 +1437,9 @@ var ( // must be set. An idle P (passed to pidleput) cannot add new timers while // idle, so if it has no timers at that time, its mask may be cleared. // - // Thus, we get the following effects on timer-stealing in findrunnable: + // Thus, we get the following effects on timer-stealing in findRunnable: // - // - Idle Ps with no timers when they go idle are never checked in findrunnable + // - Idle Ps with no timers when they go idle are never checked in findRunnable // (for work- or timer-stealing; this is the ideal case). // - Running Ps must always be checked. // - Idle Ps whose timers are stolen must continue to be checked until they run diff --git a/src/runtime/slice.go b/src/runtime/slice.go index e31d5dccb2..a9e8fc1610 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -399,3 +399,107 @@ func bytealg_MakeNoZero(len int) []byte { cap := roundupsize(uintptr(len), true) return unsafe.Slice((*byte)(mallocgc(cap, nil, false)), cap)[:len] } + +// moveSlice copies the input slice to the heap and returns it. +// et is the element type of the slice. +func moveSlice(et *_type, old unsafe.Pointer, len, cap int) (unsafe.Pointer, int, int) { + if cap == 0 { + if old != nil { + old = unsafe.Pointer(&zerobase) + } + return old, 0, 0 + } + capmem := uintptr(cap) * et.Size_ + new := mallocgc(capmem, et, true) + bulkBarrierPreWriteSrcOnly(uintptr(new), uintptr(old), capmem, et) + memmove(new, old, capmem) + return new, len, cap +} + +// moveSliceNoScan is like moveSlice except the element type is known to +// not have any pointers. We instead pass in the size of the element. +func moveSliceNoScan(elemSize uintptr, old unsafe.Pointer, len, cap int) (unsafe.Pointer, int, int) { + if cap == 0 { + if old != nil { + old = unsafe.Pointer(&zerobase) + } + return old, 0, 0 + } + capmem := uintptr(cap) * elemSize + new := mallocgc(capmem, nil, false) + memmove(new, old, capmem) + return new, len, cap +} + +// moveSliceNoCap is like moveSlice, but can pick any appropriate capacity +// for the returned slice. +// Elements between len and cap in the returned slice will be zeroed. +func moveSliceNoCap(et *_type, old unsafe.Pointer, len int) (unsafe.Pointer, int, int) { + if len == 0 { + if old != nil { + old = unsafe.Pointer(&zerobase) + } + return old, 0, 0 + } + lenmem := uintptr(len) * et.Size_ + capmem := roundupsize(lenmem, false) + new := mallocgc(capmem, et, true) + bulkBarrierPreWriteSrcOnly(uintptr(new), uintptr(old), lenmem, et) + memmove(new, old, lenmem) + return new, len, int(capmem / et.Size_) +} + +// moveSliceNoCapNoScan is a combination of moveSliceNoScan and moveSliceNoCap. +func moveSliceNoCapNoScan(elemSize uintptr, old unsafe.Pointer, len int) (unsafe.Pointer, int, int) { + if len == 0 { + if old != nil { + old = unsafe.Pointer(&zerobase) + } + return old, 0, 0 + } + lenmem := uintptr(len) * elemSize + capmem := roundupsize(lenmem, true) + new := mallocgc(capmem, nil, false) + memmove(new, old, lenmem) + if capmem > lenmem { + memclrNoHeapPointers(add(new, lenmem), capmem-lenmem) + } + return new, len, int(capmem / elemSize) +} + +// growsliceBuf is like growslice, but we can use the given buffer +// as a backing store if we want. bufPtr must be on the stack. +func growsliceBuf(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type, bufPtr unsafe.Pointer, bufLen int) slice { + if newLen > bufLen { + // Doesn't fit, process like a normal growslice. + return growslice(oldPtr, newLen, oldCap, num, et) + } + oldLen := newLen - num + if oldPtr != bufPtr && oldLen != 0 { + // Move data to start of buffer. + // Note: bufPtr is on the stack, so no write barrier needed. + memmove(bufPtr, oldPtr, uintptr(oldLen)*et.Size_) + } + // Pick a new capacity. + // + // Unlike growslice, we don't need to double the size each time. + // The work done here is not proportional to the length of the slice. + // (Unless the memmove happens above, but that is rare, and in any + // case there are not many elements on this path.) + // + // Instead, we try to just bump up to the next size class. + // This will ensure that we don't waste any space when we eventually + // call moveSlice with the resulting slice. + newCap := int(roundupsize(uintptr(newLen)*et.Size_, !et.Pointers()) / et.Size_) + + // Zero slice beyond newLen. + // The buffer is stack memory, so NoHeapPointers is ok. + // Caller will overwrite [oldLen:newLen], so we don't need to zero that portion. + // If et.Pointers(), buffer is at least initialized so we don't need to + // worry about the caller overwriting junk in [oldLen:newLen]. + if newLen < newCap { + memclrNoHeapPointers(add(bufPtr, uintptr(newLen)*et.Size_), uintptr(newCap-newLen)*et.Size_) + } + + return slice{bufPtr, newLen, newCap} +} diff --git a/src/runtime/slice_test.go b/src/runtime/slice_test.go index cd2bc26d1e..5463b6c02f 100644 --- a/src/runtime/slice_test.go +++ b/src/runtime/slice_test.go @@ -6,6 +6,9 @@ package runtime_test import ( "fmt" + "internal/race" + "internal/testenv" + "runtime" "testing" ) @@ -499,3 +502,319 @@ func BenchmarkAppendInPlace(b *testing.B) { }) } + +//go:noinline +func byteSlice(n int) []byte { + var r []byte + for i := range n { + r = append(r, byte(i)) + } + return r +} +func TestAppendByteInLoop(t *testing.T) { + testenv.SkipIfOptimizationOff(t) + if race.Enabled { + t.Skip("skipping in -race mode") + } + for _, test := range [][3]int{ + {0, 0, 0}, + {1, 1, 8}, + {2, 1, 8}, + {8, 1, 8}, + {9, 1, 16}, + {16, 1, 16}, + {17, 1, 24}, + {24, 1, 24}, + {25, 1, 32}, + {32, 1, 32}, + {33, 1, 64}, // If we up the stack buffer size from 32->64, this line and the next would become 48. + {48, 1, 64}, + {49, 1, 64}, + {64, 1, 64}, + {65, 2, 128}, + } { + n := test[0] + want := test[1] + wantCap := test[2] + var r []byte + got := testing.AllocsPerRun(10, func() { + r = byteSlice(n) + }) + if got != float64(want) { + t.Errorf("for size %d, got %f allocs want %d", n, got, want) + } + if cap(r) != wantCap { + t.Errorf("for size %d, got capacity %d want %d", n, cap(r), wantCap) + } + } +} + +//go:noinline +func ptrSlice(n int, p *[]*byte) { + var r []*byte + for range n { + r = append(r, nil) + } + *p = r +} +func TestAppendPtrInLoop(t *testing.T) { + testenv.SkipIfOptimizationOff(t) + if race.Enabled { + t.Skip("skipping in -race mode") + } + var tests [][3]int + if runtime.PtrSize == 8 { + tests = [][3]int{ + {0, 0, 0}, + {1, 1, 1}, + {2, 1, 2}, + {3, 1, 3}, // This is the interesting case, allocates 24 bytes when before it was 32. + {4, 1, 4}, + {5, 1, 8}, + {6, 1, 8}, + {7, 1, 8}, + {8, 1, 8}, + {9, 2, 16}, + } + } else { + tests = [][3]int{ + {0, 0, 0}, + {1, 1, 2}, + {2, 1, 2}, + {3, 1, 4}, + {4, 1, 4}, + {5, 1, 6}, // These two are also 24 bytes instead of 32. + {6, 1, 6}, // + {7, 1, 8}, + {8, 1, 8}, + {9, 1, 16}, + {10, 1, 16}, + {11, 1, 16}, + {12, 1, 16}, + {13, 1, 16}, + {14, 1, 16}, + {15, 1, 16}, + {16, 1, 16}, + {17, 2, 32}, + } + } + for _, test := range tests { + n := test[0] + want := test[1] + wantCap := test[2] + var r []*byte + got := testing.AllocsPerRun(10, func() { + ptrSlice(n, &r) + }) + if got != float64(want) { + t.Errorf("for size %d, got %f allocs want %d", n, got, want) + } + if cap(r) != wantCap { + t.Errorf("for size %d, got capacity %d want %d", n, cap(r), wantCap) + } + } +} + +//go:noinline +func byteCapSlice(n int) ([]byte, int) { + var r []byte + for i := range n { + r = append(r, byte(i)) + } + return r, cap(r) +} +func TestAppendByteCapInLoop(t *testing.T) { + testenv.SkipIfOptimizationOff(t) + if race.Enabled { + t.Skip("skipping in -race mode") + } + for _, test := range [][3]int{ + {0, 0, 0}, + {1, 1, 8}, + {2, 1, 8}, + {8, 1, 8}, + {9, 1, 16}, + {16, 1, 16}, + {17, 1, 24}, + {24, 1, 24}, + {25, 1, 32}, + {32, 1, 32}, + {33, 1, 64}, + {48, 1, 64}, + {49, 1, 64}, + {64, 1, 64}, + {65, 2, 128}, + } { + n := test[0] + want := test[1] + wantCap := test[2] + var r []byte + got := testing.AllocsPerRun(10, func() { + r, _ = byteCapSlice(n) + }) + if got != float64(want) { + t.Errorf("for size %d, got %f allocs want %d", n, got, want) + } + if cap(r) != wantCap { + t.Errorf("for size %d, got capacity %d want %d", n, cap(r), wantCap) + } + } +} + +func TestAppendGeneric(t *testing.T) { + type I *int + r := testAppendGeneric[I](100) + if len(r) != 100 { + t.Errorf("bad length") + } +} + +//go:noinline +func testAppendGeneric[E any](n int) []E { + var r []E + var z E + for range n { + r = append(r, z) + } + return r +} + +func appendSomeBytes(r []byte, s []byte) []byte { + for _, b := range s { + r = append(r, b) + } + return r +} + +func TestAppendOfArg(t *testing.T) { + r := make([]byte, 24) + for i := 0; i < 24; i++ { + r[i] = byte(i) + } + appendSomeBytes(r, []byte{25, 26, 27}) + // Do the same thing, trying to overwrite any + // stack-allocated buffers used above. + s := make([]byte, 24) + for i := 0; i < 24; i++ { + s[i] = 99 + } + appendSomeBytes(s, []byte{99, 99, 99}) + // Check that we still have the right data. + for i, b := range r { + if b != byte(i) { + t.Errorf("r[%d]=%d, want %d", i, b, byte(i)) + } + } + +} + +func BenchmarkAppendInLoop(b *testing.B) { + for _, size := range []int{0, 1, 8, 16, 32, 64, 128} { + b.Run(fmt.Sprintf("%d", size), + func(b *testing.B) { + b.ReportAllocs() + for b.Loop() { + byteSlice(size) + } + }) + } +} + +func TestMoveToHeapEarly(t *testing.T) { + // Just checking that this compiles. + var x []int + y := x // causes a move2heap in the entry block + for range 5 { + x = append(x, 5) + } + _ = y +} + +func TestMoveToHeapCap(t *testing.T) { + var c int + r := func() []byte { + var s []byte + for i := range 10 { + s = append(s, byte(i)) + } + c = cap(s) + return s + }() + if c != cap(r) { + t.Errorf("got cap=%d, want %d", c, cap(r)) + } + sinkSlice = r +} + +//go:noinline +func runit(f func()) { + f() +} + +func TestMoveToHeapClosure1(t *testing.T) { + var c int + r := func() []byte { + var s []byte + for i := range 10 { + s = append(s, byte(i)) + } + runit(func() { + c = cap(s) + }) + return s + }() + if c != cap(r) { + t.Errorf("got cap=%d, want %d", c, cap(r)) + } + sinkSlice = r +} +func TestMoveToHeapClosure2(t *testing.T) { + var c int + r := func() []byte { + var s []byte + for i := range 10 { + s = append(s, byte(i)) + } + c = func() int { + return cap(s) + }() + return s + }() + if c != cap(r) { + t.Errorf("got cap=%d, want %d", c, cap(r)) + } + sinkSlice = r +} + +//go:noinline +func buildClosure(t *testing.T) ([]byte, func()) { + var s []byte + for i := range 20 { + s = append(s, byte(i)) + } + c := func() { + for i, b := range s { + if b != byte(i) { + t.Errorf("s[%d]=%d, want %d", i, b, i) + } + } + } + return s, c +} + +func TestMoveToHeapClosure3(t *testing.T) { + _, f := buildClosure(t) + overwriteStack(0) + f() +} + +//go:noinline +func overwriteStack(n int) uint64 { + var x [100]uint64 + for i := range x { + x[i] = 0xabcdabcdabcdabcd + } + return x[n] +} + +var sinkSlice []byte diff --git a/src/runtime/sys_riscv64.go b/src/runtime/sys_riscv64.go index e710840819..65dc684c33 100644 --- a/src/runtime/sys_riscv64.go +++ b/src/runtime/sys_riscv64.go @@ -4,7 +4,12 @@ package runtime -import "unsafe" +import ( + "unsafe" + + "internal/abi" + "internal/runtime/sys" +) // adjust Gobuf as if it executed a call to fn with context ctxt // and then did an immediate Gosave. @@ -12,7 +17,9 @@ func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) { if buf.lr != 0 { throw("invalid use of gostartcall") } - buf.lr = buf.pc + // Use double the PC quantum on riscv64, so that we retain + // four byte alignment and use non-compressed instructions. + buf.lr = abi.FuncPCABI0(goexit) + sys.PCQuantum*2 buf.pc = uintptr(fn) buf.ctxt = ctxt } diff --git a/src/runtime/testdata/testprog/crash.go b/src/runtime/testdata/testprog/crash.go index 556215a71e..fcce388871 100644 --- a/src/runtime/testdata/testprog/crash.go +++ b/src/runtime/testdata/testprog/crash.go @@ -22,6 +22,7 @@ func init() { register("RepanickedPanic", RepanickedPanic) register("RepanickedMiddlePanic", RepanickedMiddlePanic) register("RepanickedPanicSandwich", RepanickedPanicSandwich) + register("DoublePanicWithSameValue", DoublePanicWithSameValue) } func test(name string) { @@ -189,3 +190,13 @@ func RepanickedPanicSandwich() { panic("outer") }() } + +// Double panic with same value and not recovered. +// See issue 76099. +func DoublePanicWithSameValue() { + var e any = "message" + defer func() { + panic(e) + }() + panic(e) +} diff --git a/src/runtime/testdata/testprog/gc.go b/src/runtime/testdata/testprog/gc.go index bbe1453401..32e2c5e1b4 100644 --- a/src/runtime/testdata/testprog/gc.go +++ b/src/runtime/testdata/testprog/gc.go @@ -396,7 +396,7 @@ func gcMemoryLimit(gcPercent int) { // should do considerably better than this bound. bound := int64(myLimit + 16<<20) if runtime.GOOS == "darwin" { - bound += 16 << 20 // Be more lax on Darwin, see issue 73136. + bound += 24 << 20 // Be more lax on Darwin, see issue 73136. } start := time.Now() for time.Since(start) < 200*time.Millisecond { diff --git a/src/runtime/testdata/testprog/stw_trace.go b/src/runtime/testdata/testprog/stw_trace.go index 0fed55b875..0fa15da09e 100644 --- a/src/runtime/testdata/testprog/stw_trace.go +++ b/src/runtime/testdata/testprog/stw_trace.go @@ -7,15 +7,18 @@ package main import ( "context" "log" + "math/rand/v2" "os" "runtime" "runtime/debug" + "runtime/metrics" "runtime/trace" "sync/atomic" ) func init() { register("TraceSTW", TraceSTW) + register("TraceGCSTW", TraceGCSTW) } // The parent writes to ping and waits for the children to write back @@ -53,7 +56,7 @@ func TraceSTW() { // https://go.dev/issue/65694). Alternatively, we could just ignore the // trace if the GC runs. runtime.GOMAXPROCS(4) - debug.SetGCPercent(0) + debug.SetGCPercent(-1) if err := trace.Start(os.Stdout); err != nil { log.Fatalf("failed to start tracing: %v", err) @@ -86,6 +89,112 @@ func TraceSTW() { stop.Store(true) } +// Variant of TraceSTW for GC STWs. We want the GC mark workers to start on +// previously-idle Ps, rather than bumping the current P. +func TraceGCSTW() { + ctx := context.Background() + + // The idea here is to have 2 target goroutines that are constantly + // running. When the world restarts after STW, we expect these + // goroutines to continue execution on the same M and P. + // + // Set GOMAXPROCS=8 to make room for the 2 target goroutines, 1 parent, + // 2 dedicated workers, and a bit of slack. + // + // Disable the GC initially so we can be sure it only triggers once we + // are ready. + runtime.GOMAXPROCS(8) + debug.SetGCPercent(-1) + + if err := trace.Start(os.Stdout); err != nil { + log.Fatalf("failed to start tracing: %v", err) + } + defer trace.Stop() + + for i := range 2 { + go traceSTWTarget(i) + } + + // Wait for children to start running. + ping.Store(1) + for pong[0].Load() != 1 {} + for pong[1].Load() != 1 {} + + trace.Log(ctx, "TraceSTW", "start") + + // STW + triggerGC() + + // Make sure to run long enough for the children to schedule again + // after STW. This is included for good measure, but the goroutines + // really ought to have already scheduled since the entire GC + // completed. + ping.Store(2) + for pong[0].Load() != 2 {} + for pong[1].Load() != 2 {} + + trace.Log(ctx, "TraceSTW", "end") + + stop.Store(true) +} + +func triggerGC() { + // Allocate a bunch to trigger the GC rather than using runtime.GC. The + // latter blocks until the GC is complete, which is convenient, but + // messes with scheduling as it gives this P a chance to steal the + // other goroutines before their Ps get up and running again. + + // Bring heap size up prior to enabling the GC to ensure that there is + // a decent amount of work in case the GC triggers immediately upon + // re-enabling. + for range 1000 { + alloc() + } + + sample := make([]metrics.Sample, 1) + sample[0].Name = "/gc/cycles/total:gc-cycles" + metrics.Read(sample) + + start := sample[0].Value.Uint64() + + debug.SetGCPercent(100) + + // Keep allocating until the GC is complete. We really only need to + // continue until the mark workers are scheduled, but there isn't a + // good way to measure that. + for { + metrics.Read(sample) + if sample[0].Value.Uint64() != start { + return + } + + alloc() + } +} + +// Allocate a tree data structure to generate plenty of scan work for the GC. + +type node struct { + children []*node +} + +var gcSink node + +func alloc() { + // 10% chance of adding a node a each layer. + + curr := &gcSink + for { + if len(curr.children) == 0 || rand.Float32() < 0.1 { + curr.children = append(curr.children, new(node)) + return + } + + i := rand.IntN(len(curr.children)) + curr = curr.children[i] + } +} + // Manually insert a morestack call. Leaf functions can omit morestack, but // non-leaf functions should include them. diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 6649f72471..74aaeba876 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -1366,16 +1366,19 @@ func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) { // Print the hex dump. print("stack: frame={sp:", hex(frame.sp), ", fp:", hex(frame.fp), "} stack=[", hex(stk.lo), ",", hex(stk.hi), ")\n") - hexdumpWords(lo, hi, func(p uintptr) byte { - switch p { - case frame.fp: - return '>' - case frame.sp: - return '<' - case bad: - return '!' + hexdumpWords(lo, hi-lo, func(p uintptr, m hexdumpMarker) { + if p == frame.fp { + m.start() + println("FP") + } + if p == frame.sp { + m.start() + println("SP") + } + if p == bad { + m.start() + println("bad") } - return 0 }) } diff --git a/src/vendor/golang.org/x/sys/cpu/cpu.go b/src/vendor/golang.org/x/sys/cpu/cpu.go index 63541994ef..34c9ae76ef 100644 --- a/src/vendor/golang.org/x/sys/cpu/cpu.go +++ b/src/vendor/golang.org/x/sys/cpu/cpu.go @@ -92,6 +92,9 @@ var ARM64 struct { HasSHA2 bool // SHA2 hardware implementation HasCRC32 bool // CRC32 hardware implementation HasATOMICS bool // Atomic memory operation instruction set + HasHPDS bool // Hierarchical permission disables in translations tables + HasLOR bool // Limited ordering regions + HasPAN bool // Privileged access never HasFPHP bool // Half precision floating-point instruction set HasASIMDHP bool // Advanced SIMD half precision instruction set HasCPUID bool // CPUID identification scheme registers diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go index af2aa99f9f..f449c679fe 100644 --- a/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/src/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -65,10 +65,10 @@ func setMinimalFeatures() { func readARM64Registers() { Initialized = true - parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) + parseARM64SystemRegisters(getisar0(), getisar1(), getmmfr1(), getpfr0()) } -func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { +func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) { // ID_AA64ISAR0_EL1 switch extractBits(isar0, 4, 7) { case 1: @@ -152,6 +152,22 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { ARM64.HasI8MM = true } + // ID_AA64MMFR1_EL1 + switch extractBits(mmfr1, 12, 15) { + case 1, 2: + ARM64.HasHPDS = true + } + + switch extractBits(mmfr1, 16, 19) { + case 1: + ARM64.HasLOR = true + } + + switch extractBits(mmfr1, 20, 23) { + case 1, 2, 3: + ARM64.HasPAN = true + } + // ID_AA64PFR0_EL1 switch extractBits(pfr0, 16, 19) { case 0: diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s index 22cc99844a..a4f24b3b0c 100644 --- a/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/src/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -9,31 +9,34 @@ // func getisar0() uint64 TEXT ·getisar0(SB),NOSPLIT,$0-8 // get Instruction Set Attributes 0 into x0 - // mrs x0, ID_AA64ISAR0_EL1 = d5380600 - WORD $0xd5380600 + MRS ID_AA64ISAR0_EL1, R0 MOVD R0, ret+0(FP) RET // func getisar1() uint64 TEXT ·getisar1(SB),NOSPLIT,$0-8 // get Instruction Set Attributes 1 into x0 - // mrs x0, ID_AA64ISAR1_EL1 = d5380620 - WORD $0xd5380620 + MRS ID_AA64ISAR1_EL1, R0 + MOVD R0, ret+0(FP) + RET + +// func getmmfr1() uint64 +TEXT ·getmmfr1(SB),NOSPLIT,$0-8 + // get Memory Model Feature Register 1 into x0 + MRS ID_AA64MMFR1_EL1, R0 MOVD R0, ret+0(FP) RET // func getpfr0() uint64 TEXT ·getpfr0(SB),NOSPLIT,$0-8 // get Processor Feature Register 0 into x0 - // mrs x0, ID_AA64PFR0_EL1 = d5380400 - WORD $0xd5380400 + MRS ID_AA64PFR0_EL1, R0 MOVD R0, ret+0(FP) RET // func getzfr0() uint64 TEXT ·getzfr0(SB),NOSPLIT,$0-8 // get SVE Feature Register 0 into x0 - // mrs x0, ID_AA64ZFR0_EL1 = d5380480 - WORD $0xd5380480 + MRS ID_AA64ZFR0_EL1, R0 MOVD R0, ret+0(FP) RET diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index 6ac6e1efb2..e3fc5a8d31 100644 --- a/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/src/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -8,5 +8,6 @@ package cpu func getisar0() uint64 func getisar1() uint64 +func getmmfr1() uint64 func getpfr0() uint64 func getzfr0() uint64 diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 7f1946780b..8df2079e15 100644 --- a/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/src/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -8,4 +8,5 @@ package cpu func getisar0() uint64 { return 0 } func getisar1() uint64 { return 0 } +func getmmfr1() uint64 { return 0 } func getpfr0() uint64 { return 0 } diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/src/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go index ebfb3fc8e7..19aea0633e 100644 --- a/src/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go +++ b/src/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -167,7 +167,7 @@ func doinit() { setMinimalFeatures() return } - parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64mmfr1, cpuid.aa64pfr0) Initialized = true } diff --git a/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go index 85b64d5ccb..87fd3a7780 100644 --- a/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go +++ b/src/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -59,7 +59,7 @@ func doinit() { if !ok { return } - parseARM64SystemRegisters(isar0, isar1, 0) + parseARM64SystemRegisters(isar0, isar1, 0, 0) Initialized = true } diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index f1e33686ed..bf7a797966 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -15,7 +15,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/lif golang.org/x/net/nettest -# golang.org/x/sys v0.37.0 +# golang.org/x/sys v0.38.0 ## explicit; go 1.24.0 golang.org/x/sys/cpu # golang.org/x/text v0.30.0 diff --git a/src/weak/pointer_test.go b/src/weak/pointer_test.go index da464a8d01..5e8b9bef58 100644 --- a/src/weak/pointer_test.go +++ b/src/weak/pointer_test.go @@ -16,8 +16,10 @@ import ( ) type T struct { - // N.B. This must contain a pointer, otherwise the weak handle might get placed - // in a tiny block making the tests in this package flaky. + // N.B. T is what it is to avoid having test values get tiny-allocated + // in the same block as the weak handle, but since the fix to + // go.dev/issue/76007, this should no longer be possible. + // TODO(mknyszek): Consider using tiny-allocated values for all the tests. t *T a int b int @@ -327,3 +329,37 @@ func TestImmortalPointer(t *testing.T) { t.Errorf("immortal weak pointer to %p has unexpected Value %p", want, got) } } + +func TestPointerTiny(t *testing.T) { + runtime.GC() // Clear tiny-alloc caches. + + const N = 1000 + wps := make([]weak.Pointer[int], N) + for i := range N { + // N.B. *x is just an int, so the value is very likely + // tiny-allocated alongside the weak handle, assuming bug + // from go.dev/issue/76007 exists. + x := new(int) + *x = i + wps[i] = weak.Make(x) + } + + // Get the cleanups to start running. + runtime.GC() + + // Expect at least 3/4ths of the weak pointers to have gone nil. + // + // Note that we provide some leeway since it's possible our allocation + // gets grouped with some other long-lived tiny allocation, but this + // shouldn't be the case for the vast majority of allocations. + n := 0 + for _, wp := range wps { + if wp.Value() == nil { + n++ + } + } + const want = 3 * N / 4 + if n < want { + t.Fatalf("not enough weak pointers are nil: expected at least %v, got %v", want, n) + } +} |
