aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorCherry Mui <cherryyz@google.com>2025-08-04 15:07:05 -0400
committerCherry Mui <cherryyz@google.com>2025-08-04 15:07:05 -0400
commit775fb527458e09258d07a3c92eada92167b3e7d3 (patch)
treea6ce5d30793ee5797673481fa13d4d4179444bd7 /src
parent6b9b59e144a0db697b0e22920ff0b7e0b51c0945 (diff)
parent7a1679d7ae32dd8a01bd355413ee77ba517f5f43 (diff)
downloadgo-775fb527458e09258d07a3c92eada92167b3e7d3.tar.xz
[dev.simd] all: merge master (7a1679d) into dev.simd
Conflicts: - src/cmd/compile/internal/amd64/ssa.go - src/cmd/compile/internal/ssa/rewriteAMD64.go - src/internal/buildcfg/exp.go - src/internal/cpu/cpu.go - src/internal/cpu/cpu_x86.go - src/internal/goexperiment/flags.go Merge List: + 2025-08-04 7a1679d7ae cmd/compile: move s390x over to new bounds check strategy + 2025-08-04 95693816a5 cmd/compile: move riscv64 over to new bounds check strategy + 2025-08-04 d7bd7773eb go/parser: remove safePos + 2025-08-04 4b6cbc377f cmd/cgo/internal/test: use (syntactic) constant for C array bound + 2025-08-03 b2960e3580 cmd/internal/obj/loong64: add {V,XV}{BITCLR/BITSET/BITREV}[I].{B/H/W/D} instructions support + 2025-08-03 abeeef1c08 cmd/compile/internal/test: fix typo in comments + 2025-08-03 d44749b65b cmd/internal/obj/loong64: add [X]VLDREPL.{B/H/W/D} instructions support + 2025-08-03 d6beda863e runtime: add reference to debugPinnerV1 + 2025-08-01 4ab1aec007 cmd/go: modload should use a read-write lock to improve concurrency + 2025-08-01 e666972a67 runtime: deduplicate Windows stdcall + 2025-08-01 ef40549786 runtime,syscall: move loadlibrary and getprocaddress to syscall + 2025-08-01 336931a4ca cmd/go: use os.Rename to move files on Windows + 2025-08-01 eef5f8d930 cmd/compile: enforce that locals are always accessed with SP base register + 2025-08-01 e071617222 cmd/compile: optimize multiplication rules on loong64 + 2025-07-31 eb7f515c4d cmd/compile: use generated loops instead of DUFFZERO on amd64 + 2025-07-31 c0ee2fd4e3 cmd/go: explicitly reject module paths "go" and "toolchain" + 2025-07-30 a4d99770c0 runtime/metrics: add cleanup and finalizer queue metrics + 2025-07-30 70a2ff7648 runtime: add cgo call benchmark + 2025-07-30 69338a335a cmd/go/internal/gover: fix ModIsPrerelease for toolchain versions + 2025-07-30 cedf63616a cmd/compile: add floating point min/max intrinsics on s390x + 2025-07-30 82a1921c3b all: remove redundant Swiss prefixes + 2025-07-30 2ae059ccaf all: remove GOEXPERIMENT=swissmap + 2025-07-30 cc571dab91 cmd/compile: deduplicate instructions when rewrite func results + 2025-07-30 2174a7936c crypto/tls: use standard chacha20-poly1305 cipher suite names + 2025-07-30 8330fb48a6 cmd/compile: move mips32 over to new bounds check strategy + 2025-07-30 9f9d7b50e8 cmd/compile: move mips64 over to new bounds check strategy + 2025-07-30 5216fd570e cmd/compile: move loong64 over to new bounds check strategy + 2025-07-30 89a0af86b8 cmd/compile: allow ops to specify clobbering input registers + 2025-07-30 5e94d72158 cmd/compile: simplify zerorange on arm64 + 2025-07-30 8cd85e602a cmd/compile: check domination of loop return in both controls + 2025-07-30 cefaed0de0 reflect: fix noswiss builder + 2025-07-30 3aa1b00081 regexp: fix compiling alternate patterns of different fold case literals + 2025-07-30 b1e933d955 cmd/compile: avoid extending when already sufficiently masked on loong64 + 2025-07-29 880ca333d7 cmd/compile: removing log2uint32 function + 2025-07-29 1513661dc3 cmd/compile: simplify logX implementations + 2025-07-29 bd94ae8903 cmd/compile: use unsigned power-of-two detector for unsigned mod + 2025-07-29 f3582fc80e cmd/compile: add unsigned power-of-two detector + 2025-07-29 f7d167fe71 internal/abi: move direct/indirect flag from Kind to TFlag + 2025-07-29 e0b07dc22e os/exec: fix incorrect expansion of "", "." and ".." in LookPath + 2025-07-29 25816d401c internal/goexperiment: delete RangeFunc goexperiment + 2025-07-29 7961bf71f8 internal/goexperiment: delete CacheProg goexperiment + 2025-07-29 e15a14c4dd sync: remove synchashtriemap GOEXPERIMENT + 2025-07-29 7dccd6395c cmd/compile: move arm32 over to new bounds check strategy + 2025-07-29 d79405a344 runtime: only deduct assist credit for arenas during GC + 2025-07-29 19a086f716 cmd/go/internal/telemetrystats: count goexperiments + 2025-07-29 aa95ab8215 image: fix formatting of godoc link + 2025-07-29 4c854b7a3e crypto/elliptic: change a variable name that have the same name as keywords + 2025-07-28 b10eb1d042 cmd/compile: simplify zerorange on amd64 + 2025-07-28 f8eae7a3c3 os/user: fix tests to pass on non-english Windows + 2025-07-28 0984264471 internal/poll: remove msg field from Windows' poll.operation + 2025-07-28 d7b4114346 internal/poll: remove rsan field from Windows' poll.operation + 2025-07-28 361b1ab41f internal/poll: remove sa field from Windows' poll.operation + 2025-07-28 9b6bd64e46 internal/poll: remove qty and flags fields from Windows' poll.operation + 2025-07-28 cd3655a824 internal/runtime/maps: fix spelling errors in comments + 2025-07-28 d5dc36af45 runtime: remove openbsd/mips64 related code + 2025-07-28 64ba72474d errors: omit redundant nil check in type assertion for Join + 2025-07-28 e151db3e06 all: omit unnecessary type conversions + 2025-07-28 4569255f8c cmd/compile: cleanup SelectN rules by indexing into args + 2025-07-28 94645d2413 cmd/compile: rewrite cmov(x, x, cond) into x + 2025-07-28 10c5cf68d4 net/http: add proper panic message + 2025-07-28 46b5839231 test/codegen: fix failing condmove wasm tests + 2025-07-28 98f301cf68 runtime,syscall: move SyscallX implementations from runtime to syscall + 2025-07-28 c7ed3a1c5a internal/runtime/syscall/windows: factor out code from runtime + 2025-07-28 e81eac19d3 hash/crc32: fix incorrect checksums with avx512+race + 2025-07-25 6fbad4be75 cmd/compile: remove no-longer-necessary call to calculateDepths + 2025-07-25 5045fdd8ff cmd/compile: fix containsUnavoidableCall computation + 2025-07-25 d28b27cd8e go/types, types2: use nil to represent incomplete explicit aliases + 2025-07-25 7b53d8d06e cmd/compile/internal/types2: add loaded state between loader calls and constraint expansion + 2025-07-25 374e3be2eb os/user: user random name for the test user account + 2025-07-25 1aa154621d runtime: rename scanobject to scanObject + 2025-07-25 41b429881a runtime: duplicate scanobject in greentea and non-greentea files + 2025-07-25 aeb256e98a cmd/compile: remove unused arg from gorecover + 2025-07-25 08376e1a9c runtime: iterate through inlinings when processing recover() + 2025-07-25 c76c3abc54 encoding/json: fix truncated Token error regression in goexperiment.jsonv2 + 2025-07-25 ebdbfccd98 encoding/json/jsontext: preserve buffer capacity in Encoder.Reset + 2025-07-25 91c4f0ccd5 reflect: avoid a bounds check in stack-constrained code + 2025-07-24 3636ced112 encoding/json: fix extra data regression under goexperiment.jsonv2 + 2025-07-24 a6eec8bdc7 encoding/json: reduce error text regressions under goexperiment.jsonv2 + 2025-07-24 0fa88dec1e time: remove redundant uint32 conversion in split + 2025-07-24 ada30b8248 internal/buildcfg: add ability to get GORISCV64 variable in GOGOARCH + 2025-07-24 6f6c6c5782 cmd/internal/obj: rip out argp adjustment for wrapper frames + 2025-07-24 7b50024330 runtime: detect successful recovers differently + 2025-07-24 7b9de668bd unicode/utf8: skip ahead during ascii runs in Valid/ValidString + 2025-07-24 076eae436e cmd/compile: move amd64 and 386 over to new bounds check strategy + 2025-07-24 f703dc5bef cmd/compile: add missing StringLen rule in prove + 2025-07-24 394d0bee8d cmd/compile: move arm64 over to new bounds check strategy + 2025-07-24 3024785b92 cmd/compile,runtime: remember idx+len for bounds check failure with less code + 2025-07-24 741a19ab41 runtime: move bounds check constants to internal/abi + 2025-07-24 ce05ad448f cmd/compile: rewrite condselects into doublings and halvings + 2025-07-24 fcd28070fe cmd/compile: add opt branchelim to rewrite some CondSelect into math + 2025-07-24 f32cf8e4b0 cmd/compile: learn transitive proofs for safe unsigned subs + 2025-07-24 d574856482 cmd/compile: learn transitive proofs for safe negative signed adds + 2025-07-24 1a72920f09 cmd/compile: learn transitive proofs for safe positive signed adds + 2025-07-24 e5f202bb60 cmd/compile: learn transitive proofs for safe unsigned adds + 2025-07-24 bd80f74bc1 cmd/compile: fold shift through AND for slice operations + 2025-07-24 5c45fe1385 internal/runtime/syscall: rename to internal/runtime/syscall/linux + 2025-07-24 592c2db868 cmd/compile: improve loopRotate to handle nested loops + 2025-07-24 dcb479c2f9 cmd/compile: optimize slice bounds checking with SUB/SUBconst comparisons + 2025-07-24 f11599b0b9 internal/poll: remove handle field from Windows' poll.operation + 2025-07-24 f7432e0230 internal/poll: remove fd field from Windows' poll.operation + 2025-07-24 e84ed38641 runtime: add benchmark for small-size memmory operation + 2025-07-24 18dbe5b941 hash/crc32: add AVX512 IEEE CRC32 calculation + 2025-07-24 c641900f72 cmd/compile: prefer base.Fatalf to panic in dwarfgen + 2025-07-24 d71d8aeafd cmd/internal/obj/s390x: add MVCLE instruction + 2025-07-24 b6cf1d94dc runtime: optimize memclr on mips64x + 2025-07-24 a8edd99479 runtime: improvement in memclr for s390x + 2025-07-24 bd04f65511 internal/runtime/exithook: fix a typo + 2025-07-24 5c8624a396 cmd/internal/goobj: make error output clear + 2025-07-24 44d73dfb4e cmd/go/internal/doc: clean up after merge with cmd/internal/doc + 2025-07-24 bd446662dd cmd/internal/doc: merge with cmd/go/internal/doc + 2025-07-24 da8b50c830 cmd/doc: delete + 2025-07-24 6669aa3b14 runtime: randomize heap base address + 2025-07-24 26338a7f69 cmd/compile: use better fatal message for staticValue1 + 2025-07-24 8587ba272e cmd/cgo: compare malloc return value to NULL instead of literal 0 + 2025-07-24 cae45167b7 go/types, types2: better error messages for certain type mismatches + 2025-07-24 2ddf542e4c cmd/compile: use ,ok return idiom for sparsemap.get + 2025-07-24 6505fcbd0a cmd/compile: use generics for sparse map + 2025-07-24 14f5eb7812 cmd/api: rerun updategolden + 2025-07-24 52b6d7f67a runtime: drop NetBSD kernel bug sysmon workaround fixed in NetBSD 9.2 + 2025-07-24 1ebebf1cc1 cmd/go: clean should respect workspaces + 2025-07-24 6536a93547 encoding/json/jsontext: preserve buffer capacity in Decoder.Reset + 2025-07-24 efc37e97c0 cmd/go: always return the cached path from go tool -n + 2025-07-23 98a031193b runtime: check TestUsingVDSO ExitError type assertion + 2025-07-23 6bb42997c8 doc/next: initialize + 2025-07-23 2696a11a97 internal/goversion: update Version to 1.26 + 2025-07-23 489868f776 cmd/link: scope test to linux & net.sendFile + 2025-07-22 71c2bf5513 cmd/compile: fix loclist for heap return vars without optimizations + 2025-07-22 c74399e7f5 net: correct comment for ListenConfig.ListenPacket + 2025-07-22 4ed9943b26 all: go fmt + 2025-07-22 1aaf7422f1 cmd/internal/objabi: remove redundant word in comment + 2025-07-21 d5ec0815e6 runtime: relax TestMemoryLimitNoGCPercent a bit + 2025-07-21 f7cc61e7d7 cmd/compile: for arm64 epilog, do SP increment with a single instruction + 2025-07-21 5dac42363b runtime: fix asan wrapper for riscv64 + 2025-07-21 e5502e0959 cmd/go: check subcommand properties + 2025-07-19 2363897932 cmd/internal/obj: enable got pcrel itype in fips140 for riscv64 + 2025-07-19 e32255fcc0 cmd/compile/internal/ssa: restrict architectures for TestDebugLines_74576 + 2025-07-18 0451816430 os: revert the use of AddCleanup to close files and roots + 2025-07-18 34b70684ba go/types: infer correct type for y in append(bytes, y...) + 2025-07-17 66536242fc cmd/compile/internal/escape: improve DWARF .debug_line numbering for literal rewriting optimizations + 2025-07-16 385000b004 runtime: fix idle time double-counting bug + 2025-07-16 f506ad2644 cmd/compile/internal/escape: speed up analyzing some functions with many closures + 2025-07-16 9c507e7942 cmd/link, runtime: on Wasm, put only function index in method table and func table + 2025-07-16 9782dcfd16 runtime: use 32-bit function index on Wasm + 2025-07-16 c876bf9346 cmd/internal/obj/wasm: use 64-bit instructions for indirect calls + 2025-07-15 b4309ece66 cmd/internal/doc: upgrade godoc pkgsite to 01b046e + 2025-07-15 75a19dbcd7 runtime: use memclrNoHeapPointers to clear inline mark bits + 2025-07-15 6d4a91c7a5 runtime: only clear inline mark bits on span alloc if necessary + 2025-07-15 0c6296ab12 runtime: have mergeInlineMarkBits also clear the inline mark bits + 2025-07-15 397d2117ec runtime: merge inline mark bits with gcmarkBits 8 bytes at a time + 2025-07-15 7dceabd3be runtime/maps: fix typo in group.go comment (instrinsified -> intrinsified) + 2025-07-15 d826bf4d74 os: remove useless error check + 2025-07-14 bb07e55aff runtime: expand GOMAXPROCS documentation + 2025-07-14 9159cd4ec6 encoding/json: decompose legacy options + 2025-07-14 c6556b8eb3 encoding/json/v2: add security section to doc + 2025-07-11 6ebb5f56d9 runtime: gofmt after CL 643897 and CL 662455 + 2025-07-11 1e48ca7020 encoding/json: remove legacy option to EscapeInvalidUTF8 + 2025-07-11 a0a99cb22b encoding/json/v2: report wrapped io.ErrUnexpectedEOF + 2025-07-11 9d04122d24 crypto/rsa: drop contradictory promise to keep PublicKey modulus secret + 2025-07-11 1ca23682dd crypto/rsa: fix documentation formatting + 2025-07-11 4bc3373c8e runtime: turn off large memmove tests under asan/msan Change-Id: I1e32d964eba770b85421efb86b305a2242f24466
Diffstat (limited to 'src')
-rw-r--r--src/bytes/bytes_test.go4
-rw-r--r--src/cmd/api/testdata/src/pkg/p1/golden.txt12
-rw-r--r--src/cmd/api/testdata/src/pkg/p2/golden.txt5
-rw-r--r--src/cmd/api/testdata/src/pkg/p4/golden.txt6
-rw-r--r--src/cmd/asm/internal/asm/testdata/loong64enc1.s60
-rw-r--r--src/cmd/asm/internal/asm/testdata/s390x.s19
-rw-r--r--src/cmd/cgo/internal/test/test.go2
-rw-r--r--src/cmd/cgo/out.go2
-rw-r--r--src/cmd/compile/internal/amd64/ggen.go106
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go215
-rw-r--r--src/cmd/compile/internal/arm/ssa.go170
-rw-r--r--src/cmd/compile/internal/arm64/ggen.go53
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go88
-rw-r--r--src/cmd/compile/internal/dwarfgen/dwarf.go49
-rw-r--r--src/cmd/compile/internal/escape/call.go2
-rw-r--r--src/cmd/compile/internal/escape/escape.go44
-rw-r--r--src/cmd/compile/internal/escape/expr.go2
-rw-r--r--src/cmd/compile/internal/escape/stmt.go2
-rw-r--r--src/cmd/compile/internal/gc/main.go10
-rw-r--r--src/cmd/compile/internal/importer/gcimporter_test.go47
-rw-r--r--src/cmd/compile/internal/importer/testdata/issue63285.go11
-rw-r--r--src/cmd/compile/internal/importer/ureader.go48
-rw-r--r--src/cmd/compile/internal/inline/inl.go5
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go2
-rw-r--r--src/cmd/compile/internal/ir/expr.go6
-rw-r--r--src/cmd/compile/internal/ir/node.go1
-rw-r--r--src/cmd/compile/internal/ir/op_string.go105
-rw-r--r--src/cmd/compile/internal/ir/reassignment.go4
-rw-r--r--src/cmd/compile/internal/ir/symtab.go2
-rw-r--r--src/cmd/compile/internal/liveness/plive.go2
-rw-r--r--src/cmd/compile/internal/loong64/ssa.go89
-rw-r--r--src/cmd/compile/internal/mips/ssa.go170
-rw-r--r--src/cmd/compile/internal/mips64/ssa.go90
-rw-r--r--src/cmd/compile/internal/reflectdata/map.go (renamed from src/cmd/compile/internal/reflectdata/map_swiss.go)84
-rw-r--r--src/cmd/compile/internal/reflectdata/map_noswiss.go305
-rw-r--r--src/cmd/compile/internal/reflectdata/reflect.go19
-rw-r--r--src/cmd/compile/internal/riscv64/ssa.go88
-rw-r--r--src/cmd/compile/internal/rttype/rttype.go6
-rw-r--r--src/cmd/compile/internal/s390x/ssa.go93
-rw-r--r--src/cmd/compile/internal/ssa/_gen/386.rules15
-rw-r--r--src/cmd/compile/internal/ssa/_gen/386Ops.go24
-rw-r--r--src/cmd/compile/internal/ssa/_gen/AMD64.rules41
-rw-r--r--src/cmd/compile/internal/ssa/_gen/AMD64Ops.go41
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM.rules15
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM64.rules20
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM64Ops.go20
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARMOps.go25
-rw-r--r--src/cmd/compile/internal/ssa/_gen/LOONG64.rules20
-rw-r--r--src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go20
-rw-r--r--src/cmd/compile/internal/ssa/_gen/LOONG64latelower.rules6
-rw-r--r--src/cmd/compile/internal/ssa/_gen/MIPS.rules22
-rw-r--r--src/cmd/compile/internal/ssa/_gen/MIPS64.rules8
-rw-r--r--src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go20
-rw-r--r--src/cmd/compile/internal/ssa/_gen/MIPSOps.go30
-rw-r--r--src/cmd/compile/internal/ssa/_gen/RISCV64.rules8
-rw-r--r--src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go20
-rw-r--r--src/cmd/compile/internal/ssa/_gen/S390X.rules11
-rw-r--r--src/cmd/compile/internal/ssa/_gen/S390XOps.go22
-rw-r--r--src/cmd/compile/internal/ssa/_gen/generic.rules42
-rw-r--r--src/cmd/compile/internal/ssa/_gen/main.go12
-rw-r--r--src/cmd/compile/internal/ssa/_gen/rulegen.go18
-rw-r--r--src/cmd/compile/internal/ssa/biasedsparsemap.go18
-rw-r--r--src/cmd/compile/internal/ssa/check.go3
-rw-r--r--src/cmd/compile/internal/ssa/compile.go6
-rw-r--r--src/cmd/compile/internal/ssa/config.go86
-rw-r--r--src/cmd/compile/internal/ssa/deadcode.go2
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go7
-rw-r--r--src/cmd/compile/internal/ssa/debug.go16
-rw-r--r--src/cmd/compile/internal/ssa/debug_lines_test.go31
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go7
-rw-r--r--src/cmd/compile/internal/ssa/likelyadjust.go210
-rw-r--r--src/cmd/compile/internal/ssa/loopbce.go35
-rw-r--r--src/cmd/compile/internal/ssa/looprotate.go84
-rw-r--r--src/cmd/compile/internal/ssa/looprotate_test.go65
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck.go3
-rw-r--r--src/cmd/compile/internal/ssa/op.go53
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go523
-rw-r--r--src/cmd/compile/internal/ssa/prove.go94
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go116
-rw-r--r--src/cmd/compile/internal/ssa/regalloc_test.go30
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go74
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go255
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go252
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go255
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go383
-rw-r--r--src/cmd/compile/internal/ssa/rewriteLOONG64.go259
-rw-r--r--src/cmd/compile/internal/ssa/rewriteLOONG64latelower.go29
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS.go279
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS64.go143
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64.go143
-rw-r--r--src/cmd/compile/internal/ssa/rewriteS390X.go155
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go606
-rw-r--r--src/cmd/compile/internal/ssa/sparsemap.go70
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i74576a.go17
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i74576b.go15
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i74576c.go19
-rw-r--r--src/cmd/compile/internal/ssa/tighten.go1
-rw-r--r--src/cmd/compile/internal/ssa/xposmap.go4
-rw-r--r--src/cmd/compile/internal/ssagen/arch.go9
-rw-r--r--src/cmd/compile/internal/ssagen/intrinsics.go2
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go32
-rw-r--r--src/cmd/compile/internal/test/inl_test.go12
-rw-r--r--src/cmd/compile/internal/test/mulconst_test.go4
-rw-r--r--src/cmd/compile/internal/typecheck/_builtin/runtime.go8
-rw-r--r--src/cmd/compile/internal/typecheck/builtin.go659
-rw-r--r--src/cmd/compile/internal/typecheck/const.go1
-rw-r--r--src/cmd/compile/internal/typecheck/func.go10
-rw-r--r--src/cmd/compile/internal/typecheck/stmt.go2
-rw-r--r--src/cmd/compile/internal/types/fmt.go6
-rw-r--r--src/cmd/compile/internal/types/sizeof_test.go2
-rw-r--r--src/cmd/compile/internal/types/type.go54
-rw-r--r--src/cmd/compile/internal/types2/alias.go32
-rw-r--r--src/cmd/compile/internal/types2/builtins.go7
-rw-r--r--src/cmd/compile/internal/types2/check_test.go6
-rw-r--r--src/cmd/compile/internal/types2/decl.go36
-rw-r--r--src/cmd/compile/internal/types2/expr.go4
-rw-r--r--src/cmd/compile/internal/types2/named.go23
-rw-r--r--src/cmd/compile/internal/types2/object.go2
-rw-r--r--src/cmd/compile/internal/types2/range.go3
-rw-r--r--src/cmd/compile/internal/walk/builtin.go141
-rw-r--r--src/cmd/compile/internal/walk/expr.go4
-rw-r--r--src/cmd/compile/internal/walk/order.go11
-rw-r--r--src/cmd/compile/internal/walk/range.go20
-rw-r--r--src/cmd/compile/internal/walk/stmt.go2
-rw-r--r--src/cmd/compile/internal/walk/walk.go38
-rw-r--r--src/cmd/compile/internal/x86/ssa.go167
-rw-r--r--src/cmd/doc/doc.go55
-rw-r--r--src/cmd/go/internal/base/base.go4
-rw-r--r--src/cmd/go/internal/clean/clean.go1
-rw-r--r--src/cmd/go/internal/doc/dirs.go (renamed from src/cmd/internal/doc/dirs.go)0
-rw-r--r--src/cmd/go/internal/doc/doc.go447
-rw-r--r--src/cmd/go/internal/doc/doc_test.go (renamed from src/cmd/internal/doc/doc_test.go)6
-rw-r--r--src/cmd/go/internal/doc/pkg.go (renamed from src/cmd/internal/doc/pkg.go)0
-rw-r--r--src/cmd/go/internal/doc/pkgsite.go93
-rw-r--r--src/cmd/go/internal/doc/pkgsite_bootstrap.go (renamed from src/cmd/go/internal/doc/doc_bootstrap.go)6
-rw-r--r--src/cmd/go/internal/doc/signal_notunix.go (renamed from src/cmd/internal/doc/signal_notunix.go)0
-rw-r--r--src/cmd/go/internal/doc/signal_unix.go (renamed from src/cmd/internal/doc/signal_unix.go)0
-rw-r--r--src/cmd/go/internal/doc/testdata/merge/aa.go (renamed from src/cmd/internal/doc/testdata/merge/aa.go)0
-rw-r--r--src/cmd/go/internal/doc/testdata/merge/bb.go (renamed from src/cmd/internal/doc/testdata/merge/bb.go)0
-rw-r--r--src/cmd/go/internal/doc/testdata/nested/empty/empty.go (renamed from src/cmd/internal/doc/testdata/nested/empty/empty.go)0
-rw-r--r--src/cmd/go/internal/doc/testdata/nested/ignore.go (renamed from src/cmd/internal/doc/testdata/nested/ignore.go)0
-rw-r--r--src/cmd/go/internal/doc/testdata/nested/nested/real.go (renamed from src/cmd/internal/doc/testdata/nested/nested/real.go)0
-rw-r--r--src/cmd/go/internal/doc/testdata/pkg.go (renamed from src/cmd/internal/doc/testdata/pkg.go)0
-rw-r--r--src/cmd/go/internal/gover/mod.go3
-rw-r--r--src/cmd/go/internal/modcmd/edit.go6
-rw-r--r--src/cmd/go/internal/modload/init.go18
-rw-r--r--src/cmd/go/internal/modload/modfile.go2
-rw-r--r--src/cmd/go/internal/telemetrystats/telemetrystats.go13
-rw-r--r--src/cmd/go/internal/tool/tool.go27
-rw-r--r--src/cmd/go/internal/work/action.go15
-rw-r--r--src/cmd/go/internal/work/buildid.go3
-rw-r--r--src/cmd/go/internal/work/shell.go44
-rw-r--r--src/cmd/go/internal/work/shell_nonwindows.go49
-rw-r--r--src/cmd/go/internal/work/shell_windows.go37
-rw-r--r--src/cmd/go/testdata/script/mod_get_toolchain.txt10
-rw-r--r--src/cmd/go/testdata/script/mod_init_issue74784.txt26
-rw-r--r--src/cmd/go/testdata/script/tool_n_issue72824.txt27
-rw-r--r--src/cmd/internal/doc/main.go532
-rw-r--r--src/cmd/internal/goobj/mkbuiltin.go4
-rw-r--r--src/cmd/internal/obj/arm/obj5.go111
-rw-r--r--src/cmd/internal/obj/arm64/obj7.go160
-rw-r--r--src/cmd/internal/obj/fips140.go1
-rw-r--r--src/cmd/internal/obj/loong64/a.out.go25
-rw-r--r--src/cmd/internal/obj/loong64/anames.go24
-rw-r--r--src/cmd/internal/obj/loong64/asm.go231
-rw-r--r--src/cmd/internal/obj/loong64/doc.go17
-rw-r--r--src/cmd/internal/obj/loong64/obj.go86
-rw-r--r--src/cmd/internal/obj/mips/obj0.go85
-rw-r--r--src/cmd/internal/obj/ppc64/obj9.go92
-rw-r--r--src/cmd/internal/obj/riscv/obj.go79
-rw-r--r--src/cmd/internal/obj/s390x/a.out.go9
-rw-r--r--src/cmd/internal/obj/s390x/anames.go9
-rw-r--r--src/cmd/internal/obj/s390x/asmz.go43
-rw-r--r--src/cmd/internal/obj/s390x/objz.go90
-rw-r--r--src/cmd/internal/obj/s390x/vector.go22
-rw-r--r--src/cmd/internal/obj/wasm/wasmobj.go64
-rw-r--r--src/cmd/internal/obj/x86/obj6.go151
-rw-r--r--src/cmd/internal/objabi/pkgspecial.go6
-rw-r--r--src/cmd/internal/objabi/reloctype.go2
-rw-r--r--src/cmd/internal/testdir/testdir_test.go23
-rw-r--r--src/cmd/link/dwarf_test.go102
-rw-r--r--src/cmd/link/internal/ld/data.go17
-rw-r--r--src/cmd/link/internal/ld/deadcode.go10
-rw-r--r--src/cmd/link/internal/ld/decodesym.go4
-rw-r--r--src/cmd/link/internal/ld/dwarf.go133
-rw-r--r--src/cmd/link/internal/ld/dwarf_test.go11
-rw-r--r--src/cmd/link/internal/ld/pcln.go43
-rw-r--r--src/cmd/link/testdata/dwarf/issue65405/main.go8
-rw-r--r--src/crypto/elliptic/params.go8
-rw-r--r--src/crypto/rsa/rsa.go18
-rw-r--r--src/crypto/tls/cipher_suites.go8
-rw-r--r--src/crypto/tls/handshake_client_test.go4
-rw-r--r--src/crypto/tls/handshake_server_test.go44
-rw-r--r--src/debug/dwarf/entry.go2
-rw-r--r--src/encoding/json/decode_test.go39
-rw-r--r--src/encoding/json/internal/internal.go1
-rw-r--r--src/encoding/json/internal/jsonflags/flags.go24
-rw-r--r--src/encoding/json/internal/jsonwire/encode.go6
-rw-r--r--src/encoding/json/jsontext/decode.go17
-rw-r--r--src/encoding/json/jsontext/decode_test.go83
-rw-r--r--src/encoding/json/jsontext/doc.go4
-rw-r--r--src/encoding/json/jsontext/encode.go9
-rw-r--r--src/encoding/json/jsontext/encode_test.go92
-rw-r--r--src/encoding/json/jsontext/options.go1
-rw-r--r--src/encoding/json/stream_test.go35
-rw-r--r--src/encoding/json/v2/arshal.go32
-rw-r--r--src/encoding/json/v2/arshal_default.go22
-rw-r--r--src/encoding/json/v2/arshal_test.go28
-rw-r--r--src/encoding/json/v2/arshal_time.go6
-rw-r--r--src/encoding/json/v2/doc.go94
-rw-r--r--src/encoding/json/v2/errors.go9
-rw-r--r--src/encoding/json/v2_decode.go18
-rw-r--r--src/encoding/json/v2_decode_test.go63
-rw-r--r--src/encoding/json/v2_diff_test.go4
-rw-r--r--src/encoding/json/v2_encode.go8
-rw-r--r--src/encoding/json/v2_inject.go3
-rw-r--r--src/encoding/json/v2_options.go118
-rw-r--r--src/encoding/json/v2_scanner.go4
-rw-r--r--src/encoding/json/v2_stream.go13
-rw-r--r--src/encoding/json/v2_stream_test.go35
-rw-r--r--src/errors/join.go10
-rw-r--r--src/go/build/deps_test.go4
-rw-r--r--src/go/parser/parser.go25
-rw-r--r--src/go/types/alias.go32
-rw-r--r--src/go/types/alias_test.go85
-rw-r--r--src/go/types/api_test.go36
-rw-r--r--src/go/types/builtins.go7
-rw-r--r--src/go/types/check_test.go6
-rw-r--r--src/go/types/decl.go36
-rw-r--r--src/go/types/expr.go4
-rw-r--r--src/go/types/named.go23
-rw-r--r--src/go/types/object.go2
-rw-r--r--src/go/types/range.go3
-rw-r--r--src/hash/crc32/crc32_amd64.go5
-rw-r--r--src/hash/crc32/crc32_amd64.s49
-rw-r--r--src/hash/crc32/crc32_test.go3
-rw-r--r--src/hash/maphash/maphash_runtime.go8
-rw-r--r--src/image/image.go4
-rw-r--r--src/internal/abi/abi.go4
-rw-r--r--src/internal/abi/bounds.go113
-rw-r--r--src/internal/abi/map.go (renamed from src/internal/abi/map_swiss.go)36
-rw-r--r--src/internal/abi/map_noswiss.go54
-rw-r--r--src/internal/abi/map_select_noswiss.go10
-rw-r--r--src/internal/abi/map_select_swiss.go22
-rw-r--r--src/internal/abi/symtab.go1
-rw-r--r--src/internal/abi/type.go40
-rw-r--r--src/internal/buildcfg/cfg.go2
-rw-r--r--src/internal/buildcfg/exp.go2
-rw-r--r--src/internal/cgrouptest/cgrouptest_linux.go2
-rw-r--r--src/internal/coverage/pkid.go5
-rw-r--r--src/internal/cpu/cpu.go67
-rw-r--r--src/internal/cpu/cpu_x86.go6
-rw-r--r--src/internal/goexperiment/exp_cacheprog_off.go8
-rw-r--r--src/internal/goexperiment/exp_cacheprog_on.go8
-rw-r--r--src/internal/goexperiment/exp_randomizedheapbase64_off.go8
-rw-r--r--src/internal/goexperiment/exp_randomizedheapbase64_on.go8
-rw-r--r--src/internal/goexperiment/exp_rangefunc_off.go8
-rw-r--r--src/internal/goexperiment/exp_rangefunc_on.go8
-rw-r--r--src/internal/goexperiment/exp_swissmap_off.go8
-rw-r--r--src/internal/goexperiment/exp_swissmap_on.go8
-rw-r--r--src/internal/goexperiment/exp_synchashtriemap_off.go8
-rw-r--r--src/internal/goexperiment/exp_synchashtriemap_on.go8
-rw-r--r--src/internal/goexperiment/flags.go17
-rw-r--r--src/internal/goversion/goversion.go2
-rw-r--r--src/internal/poll/fd_windows.go274
-rw-r--r--src/internal/poll/sendfile_windows.go10
-rw-r--r--src/internal/reflectlite/export_test.go2
-rw-r--r--src/internal/reflectlite/value.go6
-rw-r--r--src/internal/runtime/cgobench/bench_test.go26
-rw-r--r--src/internal/runtime/cgobench/funcs.go17
-rw-r--r--src/internal/runtime/cgroup/cgroup_linux.go38
-rw-r--r--src/internal/runtime/cgroup/line_reader.go2
-rw-r--r--src/internal/runtime/exithook/hooks.go2
-rw-r--r--src/internal/runtime/maps/export_noswiss_test.go51
-rw-r--r--src/internal/runtime/maps/export_swiss_test.go19
-rw-r--r--src/internal/runtime/maps/export_test.go15
-rw-r--r--src/internal/runtime/maps/group.go30
-rw-r--r--src/internal/runtime/maps/map.go63
-rw-r--r--src/internal/runtime/maps/map_swiss_test.go267
-rw-r--r--src/internal/runtime/maps/map_test.go255
-rw-r--r--src/internal/runtime/maps/runtime.go338
-rw-r--r--src/internal/runtime/maps/runtime_fast32.go (renamed from src/internal/runtime/maps/runtime_fast32_swiss.go)18
-rw-r--r--src/internal/runtime/maps/runtime_fast64.go (renamed from src/internal/runtime/maps/runtime_fast64_swiss.go)20
-rw-r--r--src/internal/runtime/maps/runtime_faststr.go (renamed from src/internal/runtime/maps/runtime_faststr_swiss.go)26
-rw-r--r--src/internal/runtime/maps/runtime_swiss.go352
-rw-r--r--src/internal/runtime/maps/table.go74
-rw-r--r--src/internal/runtime/maps/table_debug.go10
-rw-r--r--src/internal/runtime/strconv/atoi.go1
-rw-r--r--src/internal/runtime/strconv/atoi_test.go1
-rw-r--r--src/internal/runtime/syscall/linux/asm_linux_386.s (renamed from src/internal/runtime/syscall/asm_linux_386.s)0
-rw-r--r--src/internal/runtime/syscall/linux/asm_linux_amd64.s (renamed from src/internal/runtime/syscall/asm_linux_amd64.s)0
-rw-r--r--src/internal/runtime/syscall/linux/asm_linux_arm.s (renamed from src/internal/runtime/syscall/asm_linux_arm.s)0
-rw-r--r--src/internal/runtime/syscall/linux/asm_linux_arm64.s (renamed from src/internal/runtime/syscall/asm_linux_arm64.s)0
-rw-r--r--src/internal/runtime/syscall/linux/asm_linux_loong64.s (renamed from src/internal/runtime/syscall/asm_linux_loong64.s)0
-rw-r--r--src/internal/runtime/syscall/linux/asm_linux_mips64x.s (renamed from src/internal/runtime/syscall/asm_linux_mips64x.s)0
-rw-r--r--src/internal/runtime/syscall/linux/asm_linux_mipsx.s (renamed from src/internal/runtime/syscall/asm_linux_mipsx.s)0
-rw-r--r--src/internal/runtime/syscall/linux/asm_linux_ppc64x.s (renamed from src/internal/runtime/syscall/asm_linux_ppc64x.s)0
-rw-r--r--src/internal/runtime/syscall/linux/asm_linux_riscv64.s (renamed from src/internal/runtime/syscall/asm_linux_riscv64.s)0
-rw-r--r--src/internal/runtime/syscall/linux/asm_linux_s390x.s (renamed from src/internal/runtime/syscall/asm_linux_s390x.s)0
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux.go (renamed from src/internal/runtime/syscall/defs_linux.go)2
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux_386.go (renamed from src/internal/runtime/syscall/defs_linux_386.go)2
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux_amd64.go (renamed from src/internal/runtime/syscall/defs_linux_amd64.go)2
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux_arm.go (renamed from src/internal/runtime/syscall/defs_linux_arm.go)2
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux_arm64.go (renamed from src/internal/runtime/syscall/defs_linux_arm64.go)2
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux_loong64.go (renamed from src/internal/runtime/syscall/defs_linux_loong64.go)2
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux_mips64x.go (renamed from src/internal/runtime/syscall/defs_linux_mips64x.go)2
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux_mipsx.go (renamed from src/internal/runtime/syscall/defs_linux_mipsx.go)2
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux_ppc64x.go (renamed from src/internal/runtime/syscall/defs_linux_ppc64x.go)2
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux_riscv64.go (renamed from src/internal/runtime/syscall/defs_linux_riscv64.go)2
-rw-r--r--src/internal/runtime/syscall/linux/defs_linux_s390x.go (renamed from src/internal/runtime/syscall/defs_linux_s390x.go)2
-rw-r--r--src/internal/runtime/syscall/linux/syscall_linux.go (renamed from src/internal/runtime/syscall/syscall_linux.go)7
-rw-r--r--src/internal/runtime/syscall/linux/syscall_linux_test.go (renamed from src/internal/runtime/syscall/syscall_linux_test.go)6
-rw-r--r--src/internal/runtime/syscall/windows/asm_windows_386.s48
-rw-r--r--src/internal/runtime/syscall/windows/asm_windows_amd64.s84
-rw-r--r--src/internal/runtime/syscall/windows/asm_windows_arm.s77
-rw-r--r--src/internal/runtime/syscall/windows/asm_windows_arm64.s90
-rw-r--r--src/internal/runtime/syscall/windows/syscall_windows.go44
-rw-r--r--src/internal/syscall/windows/security_windows.go3
-rw-r--r--src/internal/syscall/windows/types_windows.go18
-rw-r--r--src/internal/syscall/windows/zsyscall_windows.go27
-rw-r--r--src/internal/trace/trace_test.go1
-rw-r--r--src/internal/types/errors/codes.go4
-rw-r--r--src/internal/types/testdata/check/builtins1.go4
-rw-r--r--src/internal/types/testdata/check/expr2.go2
-rw-r--r--src/internal/types/testdata/fixedbugs/issue60434.go2
-rw-r--r--src/internal/types/testdata/fixedbugs/issue73428.go13
-rw-r--r--src/net/dial.go2
-rw-r--r--src/net/http/server.go2
-rw-r--r--src/net/tcpconn_keepalive_test.go4
-rw-r--r--src/os/exec/dot_test.go44
-rw-r--r--src/os/exec/exec.go10
-rw-r--r--src/os/exec/lp_plan9.go4
-rw-r--r--src/os/exec/lp_unix.go4
-rw-r--r--src/os/exec/lp_windows.go8
-rw-r--r--src/os/file.go6
-rw-r--r--src/os/file_plan9.go10
-rw-r--r--src/os/file_unix.go11
-rw-r--r--src/os/file_windows.go10
-rw-r--r--src/os/os_windows_test.go3
-rw-r--r--src/os/root_openat.go13
-rw-r--r--src/os/root_unix.go2
-rw-r--r--src/os/root_windows.go2
-rw-r--r--src/os/user/user_windows_test.go190
-rw-r--r--src/reflect/abi.go2
-rw-r--r--src/reflect/all_test.go8
-rw-r--r--src/reflect/badlinkname.go2
-rw-r--r--src/reflect/export_noswiss_test.go25
-rw-r--r--src/reflect/export_swiss_test.go12
-rw-r--r--src/reflect/export_test.go5
-rw-r--r--src/reflect/makefunc.go10
-rw-r--r--src/reflect/map.go (renamed from src/reflect/map_swiss.go)61
-rw-r--r--src/reflect/map_noswiss.go484
-rw-r--r--src/reflect/map_noswiss_test.go60
-rw-r--r--src/reflect/map_test.go (renamed from src/reflect/map_swiss_test.go)7
-rw-r--r--src/reflect/type.go20
-rw-r--r--src/reflect/value.go26
-rw-r--r--src/regexp/find_test.go2
-rw-r--r--src/regexp/syntax/regexp.go2
-rw-r--r--src/runtime/alg.go10
-rw-r--r--src/runtime/arena.go14
-rw-r--r--src/runtime/asan_riscv64.s10
-rw-r--r--src/runtime/asm_386.s194
-rw-r--r--src/runtime/asm_amd64.s89
-rw-r--r--src/runtime/asm_arm.s206
-rw-r--r--src/runtime/asm_arm64.s80
-rw-r--r--src/runtime/asm_loong64.s102
-rw-r--r--src/runtime/asm_mips64x.s100
-rw-r--r--src/runtime/asm_mipsx.s208
-rw-r--r--src/runtime/asm_riscv64.s100
-rw-r--r--src/runtime/asm_s390x.s88
-rw-r--r--src/runtime/asm_wasm.s4
-rw-r--r--src/runtime/cgocall.go26
-rw-r--r--src/runtime/crash_test.go15
-rw-r--r--src/runtime/crash_unix_test.go2
-rw-r--r--src/runtime/debug.go51
-rw-r--r--src/runtime/debuglog.go2
-rw-r--r--src/runtime/defs_openbsd.go1
-rw-r--r--src/runtime/defs_openbsd_mips64.go171
-rw-r--r--src/runtime/ehooks_test.go4
-rw-r--r--src/runtime/error.go56
-rw-r--r--src/runtime/export_debug_test.go4
-rw-r--r--src/runtime/export_map_noswiss_test.go64
-rw-r--r--src/runtime/export_map_swiss_test.go11
-rw-r--r--src/runtime/export_test.go23
-rw-r--r--src/runtime/export_windows_test.go4
-rw-r--r--src/runtime/heapdump.go6
-rw-r--r--src/runtime/linkname_shim.go (renamed from src/runtime/linkname_swiss.go)13
-rw-r--r--src/runtime/malloc.go66
-rw-r--r--src/runtime/map.go (renamed from src/runtime/map_swiss.go)60
-rw-r--r--src/runtime/map_fast32.go (renamed from src/runtime/map_fast32_swiss.go)12
-rw-r--r--src/runtime/map_fast32_noswiss.go493
-rw-r--r--src/runtime/map_fast64.go (renamed from src/runtime/map_fast64_swiss.go)12
-rw-r--r--src/runtime/map_fast64_noswiss.go502
-rw-r--r--src/runtime/map_faststr.go (renamed from src/runtime/map_faststr_swiss.go)10
-rw-r--r--src/runtime/map_faststr_noswiss.go507
-rw-r--r--src/runtime/map_noswiss.go1891
-rw-r--r--src/runtime/map_noswiss_test.go214
-rw-r--r--src/runtime/map_swiss_test.go75
-rw-r--r--src/runtime/map_test.go126
-rw-r--r--src/runtime/mbitmap.go6
-rw-r--r--src/runtime/mcleanup.go4
-rw-r--r--src/runtime/mem_windows.go16
-rw-r--r--src/runtime/memclr_mips64x.s88
-rw-r--r--src/runtime/memclr_s390x.s66
-rw-r--r--src/runtime/memmove_test.go162
-rw-r--r--src/runtime/metrics.go65
-rw-r--r--src/runtime/metrics/description.go33
-rw-r--r--src/runtime/metrics/doc.go27
-rw-r--r--src/runtime/metrics_test.go63
-rw-r--r--src/runtime/mfinal.go10
-rw-r--r--src/runtime/mgclimit.go20
-rw-r--r--src/runtime/mgcmark.go113
-rw-r--r--src/runtime/mgcmark_greenteagc.go136
-rw-r--r--src/runtime/mgcmark_nogreenteagc.go110
-rw-r--r--src/runtime/mgcsweep.go21
-rw-r--r--src/runtime/mheap.go46
-rw-r--r--src/runtime/mpagealloc.go39
-rw-r--r--src/runtime/mwbbuf.go2
-rw-r--r--src/runtime/netpoll_epoll.go34
-rw-r--r--src/runtime/netpoll_windows.go14
-rw-r--r--src/runtime/os3_solaris.go1
-rw-r--r--src/runtime/os_aix.go2
-rw-r--r--src/runtime/os_darwin.go1
-rw-r--r--src/runtime/os_dragonfly.go1
-rw-r--r--src/runtime/os_linux.go11
-rw-r--r--src/runtime/os_linux_riscv64.go4
-rw-r--r--src/runtime/os_netbsd.go12
-rw-r--r--src/runtime/os_openbsd.go52
-rw-r--r--src/runtime/os_openbsd_libc.go60
-rw-r--r--src/runtime/os_openbsd_mips64.go11
-rw-r--r--src/runtime/os_openbsd_syscall.go51
-rw-r--r--src/runtime/os_openbsd_syscall1.go20
-rw-r--r--src/runtime/os_openbsd_syscall2.go102
-rw-r--r--src/runtime/os_solaris.go5
-rw-r--r--src/runtime/os_windows.go270
-rw-r--r--src/runtime/os_windows_arm.go2
-rw-r--r--src/runtime/os_windows_arm64.go2
-rw-r--r--src/runtime/panic.go230
-rw-r--r--src/runtime/panic32.go33
-rw-r--r--src/runtime/pinner.go2
-rw-r--r--src/runtime/plugin.go2
-rw-r--r--src/runtime/proc.go26
-rw-r--r--src/runtime/race.go4
-rw-r--r--src/runtime/race/testdata/rangefunc_test.go2
-rw-r--r--src/runtime/rt0_openbsd_mips64.s36
-rw-r--r--src/runtime/runtime-gdb.py49
-rw-r--r--src/runtime/runtime-gdb_test.go41
-rw-r--r--src/runtime/runtime2.go13
-rw-r--r--src/runtime/set_vma_name_linux.go4
-rw-r--r--src/runtime/signal_mips64x.go2
-rw-r--r--src/runtime/signal_openbsd_mips64.go78
-rw-r--r--src/runtime/signal_windows.go26
-rw-r--r--src/runtime/slice.go2
-rw-r--r--src/runtime/symtab.go32
-rw-r--r--src/runtime/sys_aix_ppc64.s20
-rw-r--r--src/runtime/sys_libc.go2
-rw-r--r--src/runtime/sys_openbsd.go444
-rw-r--r--src/runtime/sys_openbsd1.go46
-rw-r--r--src/runtime/sys_openbsd2.go303
-rw-r--r--src/runtime/sys_openbsd3.go136
-rw-r--r--src/runtime/sys_openbsd_mips64.s388
-rw-r--r--src/runtime/sys_solaris_amd64.s4
-rw-r--r--src/runtime/sys_windows_386.s43
-rw-r--r--src/runtime/sys_windows_amd64.s79
-rw-r--r--src/runtime/sys_windows_arm.s70
-rw-r--r--src/runtime/sys_windows_arm64.s82
-rw-r--r--src/runtime/syscall_windows.go115
-rw-r--r--src/runtime/syscall_windows_test.go3
-rw-r--r--src/runtime/testdata/testprog/coro.go2
-rw-r--r--src/runtime/testdata/testprog/gc.go3
-rw-r--r--src/runtime/testdata/testprogcgo/coro.go2
-rw-r--r--src/runtime/traceallocfree.go2
-rw-r--r--src/runtime/tracebuf.go4
-rw-r--r--src/runtime/tracecpu.go2
-rw-r--r--src/runtime/traceevent.go2
-rw-r--r--src/runtime/traceruntime.go4
-rw-r--r--src/runtime/tracestack.go2
-rw-r--r--src/runtime/tracetype.go2
-rw-r--r--src/runtime/type.go16
-rw-r--r--src/runtime/typekind.go12
-rw-r--r--src/runtime/vdso_test.go2
-rw-r--r--src/sync/hashtriemap.go117
-rw-r--r--src/sync/map.go457
-rw-r--r--src/sync/map_test.go2
-rw-r--r--src/syscall/dirent.go4
-rw-r--r--src/syscall/dll_windows.go111
-rw-r--r--src/syscall/syscall_linux.go4
-rw-r--r--src/time/example_test.go4
-rw-r--r--src/time/sleep.go4
-rw-r--r--src/time/time.go2
-rw-r--r--src/unicode/utf8/utf8.go158
-rw-r--r--src/unicode/utf8/utf8_test.go10
492 files changed, 11919 insertions, 14930 deletions
diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go
index 0f6cf4993a..03f01582c5 100644
--- a/src/bytes/bytes_test.go
+++ b/src/bytes/bytes_test.go
@@ -693,14 +693,14 @@ func bmIndexRuneUnicode(rt *unicode.RangeTable, needle rune) func(b *testing.B,
for _, r16 := range rt.R16 {
for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
if r != needle {
- rs = append(rs, rune(r))
+ rs = append(rs, r)
}
}
}
for _, r32 := range rt.R32 {
for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
if r != needle {
- rs = append(rs, rune(r))
+ rs = append(rs, r)
}
}
}
diff --git a/src/cmd/api/testdata/src/pkg/p1/golden.txt b/src/cmd/api/testdata/src/pkg/p1/golden.txt
index 65c4f35d2c..cf089f6858 100644
--- a/src/cmd/api/testdata/src/pkg/p1/golden.txt
+++ b/src/cmd/api/testdata/src/pkg/p1/golden.txt
@@ -1,6 +1,6 @@
+pkg p1, const A //deprecated
pkg p1, const A = 1
pkg p1, const A ideal-int
-pkg p1, const A //deprecated
pkg p1, const A64 = 1
pkg p1, const A64 int64
pkg p1, const AIsLowerA = 11
@@ -25,8 +25,8 @@ pkg p1, func TakesFunc(func(int) int)
pkg p1, method (*B) JustOnB()
pkg p1, method (*B) OnBothTandBPtr()
pkg p1, method (*Embedded) OnEmbedded()
-pkg p1, method (*S2) SMethod(int8, int16, int64)
pkg p1, method (*S2) SMethod //deprecated
+pkg p1, method (*S2) SMethod(int8, int16, int64)
pkg p1, method (*T) JustOnT()
pkg p1, method (*T) OnBothTandBPtr()
pkg p1, method (B) OnBothTandBVal()
@@ -53,8 +53,8 @@ pkg p1, type Error interface { Error, Temporary }
pkg p1, type Error interface, Error() string
pkg p1, type Error interface, Temporary() bool
pkg p1, type FuncType func(int, int, string) (*B, error)
-pkg p1, type I interface, Get(string) int64
pkg p1, type I interface, Get //deprecated
+pkg p1, type I interface, Get(string) int64
pkg p1, type I interface, GetNamed(string) int64
pkg p1, type I interface, Name() string
pkg p1, type I interface, PackageTwoMeth()
@@ -63,9 +63,9 @@ pkg p1, type I interface, unexported methods
pkg p1, type MyInt int
pkg p1, type Namer interface { Name }
pkg p1, type Namer interface, Name() string
+pkg p1, type Private //deprecated
pkg p1, type Private interface, X()
pkg p1, type Private interface, unexported methods
-pkg p1, type Private //deprecated
pkg p1, type Public interface { X, Y }
pkg p1, type Public interface, X()
pkg p1, type Public interface, Y()
@@ -84,8 +84,8 @@ pkg p1, type TPtrExported struct
pkg p1, type TPtrExported struct, embedded *Embedded
pkg p1, type TPtrUnexported struct
pkg p1, type Time struct
-pkg p1, type URL struct
pkg p1, type URL //deprecated
+pkg p1, type URL struct
pkg p1, var Byte uint8
pkg p1, var ByteConv []uint8
pkg p1, var ByteFunc func(uint8) int32
@@ -97,8 +97,8 @@ pkg p1, var StrConv string
pkg p1, var V string
pkg p1, var V1 uint64
pkg p1, var V2 p2.Twoer
-pkg p1, var VError Error
pkg p1, var VError //deprecated
+pkg p1, var VError Error
pkg p1, var X I
pkg p1, var X0 int64
pkg p1, var Y int
diff --git a/src/cmd/api/testdata/src/pkg/p2/golden.txt b/src/cmd/api/testdata/src/pkg/p2/golden.txt
index 735d668166..0f7de5047e 100644
--- a/src/cmd/api/testdata/src/pkg/p2/golden.txt
+++ b/src/cmd/api/testdata/src/pkg/p2/golden.txt
@@ -1,8 +1,7 @@
-pkg p2, func F() string
pkg p2, func F //deprecated
+pkg p2, func F() string
pkg p2, func G() Twoer
pkg p2, func NewError(string) error
pkg p2, type Twoer interface { PackageTwoMeth }
-pkg p2, type Twoer interface, PackageTwoMeth()
pkg p2, type Twoer interface, PackageTwoMeth //deprecated
-
+pkg p2, type Twoer interface, PackageTwoMeth()
diff --git a/src/cmd/api/testdata/src/pkg/p4/golden.txt b/src/cmd/api/testdata/src/pkg/p4/golden.txt
index 1ceae17386..3195f4228d 100644
--- a/src/cmd/api/testdata/src/pkg/p4/golden.txt
+++ b/src/cmd/api/testdata/src/pkg/p4/golden.txt
@@ -1,6 +1,6 @@
+pkg p4, func Clone //deprecated
+pkg p4, func Clone[$0 interface{ ~[]$1 }, $1 interface{}]($0) $0
pkg p4, func NewPair[$0 interface{ M }, $1 interface{ ~int }]($0, $1) Pair[$0, $1]
-pkg p4, method (Pair[$0, $1]) Second() $1
pkg p4, method (Pair[$0, $1]) First() $0
+pkg p4, method (Pair[$0, $1]) Second() $1
pkg p4, type Pair[$0 interface{ M }, $1 interface{ ~int }] struct
-pkg p4, func Clone[$0 interface{ ~[]$1 }, $1 interface{}]($0) $0
-pkg p4, func Clone //deprecated
diff --git a/src/cmd/asm/internal/asm/testdata/loong64enc1.s b/src/cmd/asm/internal/asm/testdata/loong64enc1.s
index bfff555782..8363996683 100644
--- a/src/cmd/asm/internal/asm/testdata/loong64enc1.s
+++ b/src/cmd/asm/internal/asm/testdata/loong64enc1.s
@@ -510,6 +510,16 @@ lable2:
VMOVQ V3.W[1], V7.W4 // 67e4f772
VMOVQ V4.V[0], V6.V2 // 86f0f772
+ // Load data from memory and broadcast to each element of a vector register: VMOVQ offset(Rj), <Vd>.<T>
+ VMOVQ (R4), V0.B16 // 80008030
+ VMOVQ 1(R4), V1.H8 // 81044030
+ VMOVQ 2(R4), V2.W4 // 82082030
+ VMOVQ 3(R4), V3.V2 // 830c1030
+ XVMOVQ (R4), X0.B32 // 80008032
+ XVMOVQ 1(R4), X1.H16 // 81044032
+ XVMOVQ 2(R4), X2.W8 // 82082032
+ XVMOVQ 3(R4), X3.V4 // 830c1032
+
// VSEQ{B,H,W,V}, XVSEQ{B,H,W,V} instruction
VSEQB V1, V2, V3 // 43040070
VSEQH V1, V2, V3 // 43840070
@@ -1035,3 +1045,53 @@ lable2:
PRELD (R4), $0 // 8000c02a
PRELD -1(R4), $8 // 88fcff2a
PRELD 8(R4), $31 // 9f20c02a
+
+ // [X]{VBITCLR/VBITSET/VBITREV}{B,H,W,V} instructions
+ VBITCLRB V1, V2, V3 // 43040c71
+ VBITCLRH V1, V2, V3 // 43840c71
+ VBITCLRW V1, V2, V3 // 43040d71
+ VBITCLRV V1, V2, V3 // 43840d71
+ VBITSETB V1, V2, V3 // 43040e71
+ VBITSETH V1, V2, V3 // 43840e71
+ VBITSETW V1, V2, V3 // 43040f71
+ VBITSETV V1, V2, V3 // 43840f71
+ VBITREVB V1, V2, V3 // 43041071
+ VBITREVH V1, V2, V3 // 43841071
+ VBITREVW V1, V2, V3 // 43041171
+ VBITREVV V1, V2, V3 // 43841171
+ XVBITCLRB X3, X2, X1 // 410c0c75
+ XVBITCLRH X3, X2, X1 // 418c0c75
+ XVBITCLRW X3, X2, X1 // 410c0d75
+ XVBITCLRV X3, X2, X1 // 418c0d75
+ XVBITSETB X3, X2, X1 // 410c0e75
+ XVBITSETH X3, X2, X1 // 418c0e75
+ XVBITSETW X3, X2, X1 // 410c0f75
+ XVBITSETV X3, X2, X1 // 418c0f75
+ XVBITREVB X3, X2, X1 // 410c1075
+ XVBITREVH X3, X2, X1 // 418c1075
+ XVBITREVW X3, X2, X1 // 410c1175
+ XVBITREVV X3, X2, X1 // 418c1175
+ VBITCLRB $7, V2, V3 // 433c1073
+ VBITCLRH $15, V2, V3 // 437c1073
+ VBITCLRW $31, V2, V3 // 43fc1073
+ VBITCLRV $63, V2, V3 // 43fc1173
+ VBITSETB $7, V2, V3 // 433c1473
+ VBITSETH $15, V2, V3 // 437c1473
+ VBITSETW $31, V2, V3 // 43fc1473
+ VBITSETV $63, V2, V3 // 43fc1573
+ VBITREVB $7, V2, V3 // 433c1873
+ VBITREVH $15, V2, V3 // 437c1873
+ VBITREVW $31, V2, V3 // 43fc1873
+ VBITREVV $63, V2, V3 // 43fc1973
+ XVBITCLRB $7, X2, X1 // 413c1077
+ XVBITCLRH $15, X2, X1 // 417c1077
+ XVBITCLRW $31, X2, X1 // 41fc1077
+ XVBITCLRV $63, X2, X1 // 41fc1177
+ XVBITSETB $7, X2, X1 // 413c1477
+ XVBITSETH $15, X2, X1 // 417c1477
+ XVBITSETW $31, X2, X1 // 41fc1477
+ XVBITSETV $63, X2, X1 // 41fc1577
+ XVBITREVB $7, X2, X1 // 413c1877
+ XVBITREVH $15, X2, X1 // 417c1877
+ XVBITREVW $31, X2, X1 // 41fc1877
+ XVBITREVV $63, X2, X1 // 41fc1977
diff --git a/src/cmd/asm/internal/asm/testdata/s390x.s b/src/cmd/asm/internal/asm/testdata/s390x.s
index 95a8c50dab..93c3ec9ea7 100644
--- a/src/cmd/asm/internal/asm/testdata/s390x.s
+++ b/src/cmd/asm/internal/asm/testdata/s390x.s
@@ -263,10 +263,15 @@ TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16-
NC $8, (R15), n-8(SP) // d407f010f000
OC $8, (R15), n-8(SP) // d607f010f000
MVC $8, (R15), n-8(SP) // d207f010f000
+ MVC $256, 8192(R1), 8192(R2) // b90400a2c2a800002000b90400b1c2b800002000d2ffa000b000
MVCIN $8, (R15), n-8(SP) // e807f010f000
CLC $8, (R15), n-8(SP) // d507f000f010
XC $256, -8(R15), -8(R15) // b90400afc2a8fffffff8d7ffa000a000
- MVC $256, 8192(R1), 8192(R2) // b90400a2c2a800002000b90400b1c2b800002000d2ffa000b000
+ MVCLE 0, R4, R6 // a8640000
+ MVCLE 4095, R4, R6 // a8640fff
+ MVCLE $4095, R4, R6 // a8640fff
+ MVCLE (R3), R4, R6 // a8643000
+ MVCLE 10(R3), R4, R6 // a864300a
CMP R1, R2 // b9200012
CMP R3, $32767 // a73f7fff
@@ -535,6 +540,18 @@ TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16-
VSTRCZBS V18, V20, V22, V24 // e78240306f8a
VSTRCZHS V18, V20, V22, V24 // e78241306f8a
VSTRCZFS V18, V20, V22, V24 // e78242306f8a
+ VFMAXSB $1, V2, V3, V4 // e742301020ef
+ WFMAXSB $2, V5, V6, V7 // e775602820ef
+ WFMAXSB $2, F5, F6, F7 // e775602820ef
+ VFMAXDB $3, V8, V9, V10 // e7a8903030ef
+ WFMAXDB $4, V11, V12, V13 // e7dbc04830ef
+ WFMAXDB $4, F11, F12, F13 // e7dbc04830ef
+ VFMINSB $7, V14, V15, V16 // e70ef07028ee
+ WFMINSB $8, V17, V18, V19 // e73120882eee
+ WFMINSB $8, F1, F2, F3 // e731208820ee
+ VFMINDB $9, V20, V21, V22 // e76450903eee
+ WFMINDB $10, V23, V24, V25 // e79780a83eee
+ WFMINDB $10, F7, F8, F9 // e79780a830ee
RET
RET foo(SB)
diff --git a/src/cmd/cgo/internal/test/test.go b/src/cmd/cgo/internal/test/test.go
index 844b2dd42c..fb4a8250a2 100644
--- a/src/cmd/cgo/internal/test/test.go
+++ b/src/cmd/cgo/internal/test/test.go
@@ -245,7 +245,7 @@ static void *thread(void *p) {
return NULL;
}
void testSendSIG() {
- const int N = 20;
+ enum { N = 20 };
int i;
pthread_t tid[N];
for (i = 0; i < N; i++) {
diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go
index 10870b7c85..dfa54e41d3 100644
--- a/src/cmd/cgo/out.go
+++ b/src/cmd/cgo/out.go
@@ -1812,7 +1812,7 @@ void _cgoPREFIX_Cfunc__Cmalloc(void *v) {
void *ret;
_cgo_tsan_acquire();
ret = malloc(a->p0);
- if (ret == 0 && a->p0 == 0) {
+ if (ret == NULL && a->p0 == 0) {
ret = malloc(1);
}
a->r1 = ret;
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
index 1dc952a455..853a10cb9a 100644
--- a/src/cmd/compile/internal/amd64/ggen.go
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -5,113 +5,23 @@
package amd64
import (
- "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
- "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
-// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
-// See runtime/mkduff.go.
-const (
- dzBlocks = 16 // number of MOV/ADD blocks
- dzBlockLen = 4 // number of clears per block
- dzBlockSize = 23 // size of instructions in a single block
- dzMovSize = 5 // size of single MOV instruction w/ offset
- dzLeaqSize = 4 // size of single LEAQ instruction
- dzClearStep = 16 // number of bytes cleared by each MOV instruction
-
- dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
- dzSize = dzBlocks * dzBlockSize
-)
-
-// dzOff returns the offset for a jump into DUFFZERO.
-// b is the number of bytes to zero.
-func dzOff(b int64) int64 {
- off := int64(dzSize)
- off -= b / dzClearLen * dzBlockSize
- tailLen := b % dzClearLen
- if tailLen >= dzClearStep {
- off -= dzLeaqSize + dzMovSize*(tailLen/dzClearStep)
- }
- return off
-}
-
-// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
-// b is the number of bytes to zero.
-func dzDI(b int64) int64 {
- tailLen := b % dzClearLen
- if tailLen < dzClearStep {
- return 0
- }
- tailSteps := tailLen / dzClearStep
- return -dzClearStep * (dzBlockLen - tailSteps)
-}
-
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
- const (
- r13 = 1 << iota // if R13 is already zeroed.
- )
-
- if cnt == 0 {
- return p
+ if cnt%8 != 0 {
+ panic("zeroed region not aligned")
}
-
- if cnt == 8 {
+ for cnt >= 16 {
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off)
+ off += 16
+ cnt -= 16
+ }
+ if cnt != 0 {
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off)
- } else if cnt <= int64(8*types.RegSize) {
- for i := int64(0); i < cnt/16; i++ {
- p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
- }
-
- if cnt%16 != 0 {
- p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
- }
- } else if cnt <= int64(128*types.RegSize) {
- // Save DI to r12. With the amd64 Go register abi, DI can contain
- // an incoming parameter, whereas R12 is always scratch.
- p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)
- // Emit duffzero call
- p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
- p.To.Sym = ir.Syms.Duffzero
- if cnt%16 != 0 {
- p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
- }
- // Restore DI from r12
- p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R12, 0, obj.TYPE_REG, x86.REG_DI, 0)
-
- } else {
- // When the register ABI is in effect, at this point in the
- // prolog we may have live values in all of RAX,RDI,RCX. Save
- // them off to registers before the REPSTOSQ below, then
- // restore. Note that R12 and R13 are always available as
- // scratch regs; here we also use R15 (this is safe to do
- // since there won't be any globals accessed in the prolog).
- // See rewriteToUseGot() in obj6.go for more on r15 use.
-
- // Save rax/rdi/rcx
- p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)
- p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_REG, x86.REG_R13, 0)
- p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_CX, 0, obj.TYPE_REG, x86.REG_R15, 0)
-
- // Set up the REPSTOSQ and kick it off.
- p = pp.Append(p, x86.AXORL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_REG, x86.REG_AX, 0)
- p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
- p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
- p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
-
- // Restore rax/rdi/rcx
- p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R12, 0, obj.TYPE_REG, x86.REG_DI, 0)
- p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_REG, x86.REG_AX, 0)
- p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R15, 0, obj.TYPE_REG, x86.REG_CX, 0)
-
- // Record the fact that r13 is no longer zero.
- *state &= ^uint32(r13)
}
-
return p
}
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
index 9e772a7169..8847580e25 100644
--- a/src/cmd/compile/internal/amd64/ssa.go
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -17,6 +17,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
+ "internal/abi"
)
// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
@@ -147,6 +148,15 @@ func memIdx(a *obj.Addr, v *ssa.Value) {
// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
// See runtime/mkduff.go.
+const (
+ dzBlocks = 16 // number of MOV/ADD blocks
+ dzBlockLen = 4 // number of clears per block
+ dzBlockSize = 23 // size of instructions in a single block
+ dzMovSize = 5 // size of single MOV instruction w/ offset
+ dzLeaqSize = 4 // size of single LEAQ instruction
+ dzClearStep = 16 // number of bytes cleared by each MOV instruction
+)
+
func duffStart(size int64) int64 {
x, _ := duff(size)
return x
@@ -1001,26 +1011,103 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
- case ssa.OpAMD64DUFFZERO:
+
+ case ssa.OpAMD64LoweredZero:
if s.ABI != obj.ABIInternal {
// zero X15 manually
opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
}
- off := duffStart(v.AuxInt)
- adj := duffAdj(v.AuxInt)
- var p *obj.Prog
- if adj != 0 {
- p = s.Prog(x86.ALEAQ)
- p.From.Type = obj.TYPE_MEM
- p.From.Offset = adj
- p.From.Reg = x86.REG_DI
- p.To.Type = obj.TYPE_REG
- p.To.Reg = x86.REG_DI
+ ptrReg := v.Args[0].Reg()
+ n := v.AuxInt
+ if n < 16 {
+ v.Fatalf("Zero too small %d", n)
}
- p = s.Prog(obj.ADUFFZERO)
- p.To.Type = obj.TYPE_ADDR
- p.To.Sym = ir.Syms.Duffzero
- p.To.Offset = off
+ zero16 := func(off int64) {
+ zero16(s, ptrReg, off)
+ }
+
+ // Generate zeroing instructions.
+ var off int64
+ for n >= 16 {
+ zero16(off)
+ off += 16
+ n -= 16
+ }
+ if n != 0 {
+ // use partially overlapped write.
+ // TODO: n <= 8, use smaller write?
+ zero16(off + n - 16)
+ }
+
+ case ssa.OpAMD64LoweredZeroLoop:
+ if s.ABI != obj.ABIInternal {
+ // zero X15 manually
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
+ ptrReg := v.Args[0].Reg()
+ countReg := v.RegTmp()
+ n := v.AuxInt
+ loopSize := int64(64)
+ if n < 3*loopSize {
+ // - a loop count of 0 won't work.
+ // - a loop count of 1 is useless.
+ // - a loop count of 2 is a code size ~tie
+ // 4 instructions to implement the loop
+ // 4 instructions in the loop body
+ // vs
+ // 8 instructions in the straightline code
+ // Might as well use straightline code.
+ v.Fatalf("ZeroLoop size too small %d", n)
+ }
+ zero16 := func(off int64) {
+ zero16(s, ptrReg, off)
+ }
+
+ // Put iteration count in a register.
+ // MOVL $n, countReg
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = n / loopSize
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = countReg
+ cntInit := p
+
+ // Zero loopSize bytes starting at ptrReg.
+ for i := range loopSize / 16 {
+ zero16(i * 16)
+ }
+ // ADDQ $loopSize, ptrReg
+ p = s.Prog(x86.AADDQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = loopSize
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ptrReg
+ // DECL countReg
+ p = s.Prog(x86.ADECL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = countReg
+ // Jump to first instruction in loop if we're not done yet.
+ // JNE head
+ p = s.Prog(x86.AJNE)
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(cntInit.Link)
+
+ // Multiples of the loop size are now done.
+ n %= loopSize
+
+ // Write any fractional portion.
+ var off int64
+ for n >= 16 {
+ zero16(off)
+ off += 16
+ n -= 16
+ }
+ if n != 0 {
+ // Use partially-overlapping write.
+ // TODO: n <= 8, use smaller write?
+ zero16(off + n - 16)
+ }
+
case ssa.OpAMD64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
@@ -1151,12 +1238,91 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+ case ssa.OpAMD64LoweredPanicBoundsRR, ssa.OpAMD64LoweredPanicBoundsRC, ssa.OpAMD64LoweredPanicBoundsCR, ssa.OpAMD64LoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpAMD64LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - x86.REG_AX)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - x86.REG_AX)
+ case ssa.OpAMD64LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - x86.REG_AX)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(yVal)
+ }
+ case ssa.OpAMD64LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - x86.REG_AX)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(xVal)
+ }
+ case ssa.OpAMD64LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
@@ -1931,6 +2097,17 @@ func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg in
return p
}
+// zero 16 bytes at reg+off.
+func zero16(s *ssagen.State, reg int16, off int64) {
+ // MOVUPS X15, off(ptrReg)
+ p := s.Prog(x86.AMOVUPS)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_X15
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = reg
+ p.To.Offset = off
+}
+
// XXX maybe make this part of v.Reg?
// On the other hand, it is architecture-specific.
func simdReg(v *ssa.Value) int16 {
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
index f129ab493d..91ef31c215 100644
--- a/src/cmd/compile/internal/arm/ssa.go
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -18,6 +18,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
+ "internal/abi"
)
// loadByType returns the load instruction of the given type.
@@ -712,18 +713,167 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Name = obj.NAME_EXTERN
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpARMLoweredPanicBoundsA, ssa.OpARMLoweredPanicBoundsB, ssa.OpARMLoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(8) // space used in callee args area by assembly stubs
- case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
- p := s.Prog(obj.ACALL)
+
+ case ssa.OpARMLoweredPanicBoundsRR, ssa.OpARMLoweredPanicBoundsRC, ssa.OpARMLoweredPanicBoundsCR, ssa.OpARMLoweredPanicBoundsCC,
+ ssa.OpARMLoweredPanicExtendRR, ssa.OpARMLoweredPanicExtendRC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ extend := false
+ switch v.Op {
+ case ssa.OpARMLoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - arm.REG_R0)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - arm.REG_R0)
+ case ssa.OpARMLoweredPanicExtendRR:
+ extend = true
+ xIsReg = true
+ hi := int(v.Args[0].Reg() - arm.REG_R0)
+ lo := int(v.Args[1].Reg() - arm.REG_R0)
+ xVal = hi<<2 + lo // encode 2 register numbers
+ yIsReg = true
+ yVal = int(v.Args[2].Reg() - arm.REG_R0)
+ case ssa.OpARMLoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - arm.REG_R0)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0 + int16(yVal)
+ }
+ case ssa.OpARMLoweredPanicExtendRC:
+ extend = true
+ xIsReg = true
+ hi := int(v.Args[0].Reg() - arm.REG_R0)
+ lo := int(v.Args[1].Reg() - arm.REG_R0)
+ xVal = hi<<2 + lo // encode 2 register numbers
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ for yVal == hi || yVal == lo {
+ yVal++
+ }
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0 + int16(yVal)
+ }
+ case ssa.OpARMLoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - arm.REG_R0)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else if signed && int64(int32(c)) == c || !signed && int64(uint32(c)) == c {
+ // Move constant to a register
+ xIsReg = true
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0 + int16(xVal)
+ } else {
+ // Move constant to two registers
+ extend = true
+ xIsReg = true
+ hi := 0
+ lo := 1
+ if hi == yVal {
+ hi = 2
+ }
+ if lo == yVal {
+ lo = 2
+ }
+ xVal = hi<<2 + lo
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c >> 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0 + int16(hi)
+ p = s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(int32(c))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0 + int16(lo)
+ }
+ case ssa.OpARMLoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else if signed && int64(int32(c)) == c || !signed && int64(uint32(c)) == c {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0 + int16(xVal)
+ } else {
+ // Move constant to two registers
+ extend = true
+ xIsReg = true
+ hi := 0
+ lo := 1
+ xVal = hi<<2 + lo
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c >> 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0 + int16(hi)
+ p = s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(int32(c))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0 + int16(lo)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 2
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
- s.UseArgs(12) // space used in callee args area by assembly stubs
+ if extend {
+ p.To.Sym = ir.Syms.PanicExtend
+ } else {
+ p.To.Sym = ir.Syms.PanicBounds
+ }
+
case ssa.OpARMDUFFZERO:
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
index a681adcb7f..1402746700 100644
--- a/src/cmd/compile/internal/arm64/ggen.go
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -5,9 +5,7 @@
package arm64
import (
- "cmd/compile/internal/ir"
"cmd/compile/internal/objw"
- "cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
)
@@ -22,47 +20,20 @@ func padframe(frame int64) int64 {
}
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
- if cnt == 0 {
- return p
+ if cnt%8 != 0 {
+ panic("zeroed region not aligned")
}
- if cnt < int64(4*types.PtrSize) {
- for i := int64(0); i < cnt; i += int64(types.PtrSize) {
- p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
- }
- } else if cnt <= int64(128*types.PtrSize) {
- if cnt%(2*int64(types.PtrSize)) != 0 {
- p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
- off += int64(types.PtrSize)
- cnt -= int64(types.PtrSize)
- }
- p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
- p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
- p.Reg = arm64.REG_R20
- p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ir.Syms.Duffzero
- p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
- } else {
- // Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
- // We are at the function entry, where no register is live, so it is okay to clobber
- // other registers
- const rtmp = arm64.REG_R20
- p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
- p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
- p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
- p.Reg = arm64.REGRT1
- p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
- p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
- p.Reg = arm64.REGRT1
- p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
- p.Scond = arm64.C_XPRE
- p1 := p
- p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
- p.Reg = arm64.REGRT2
- p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
- p.To.SetTarget(p1)
+ off += 8 // return address was ignored in offset calculation
+ for cnt >= 16 && off < 512 {
+ p = pp.Append(p, arm64.ASTP, obj.TYPE_REGREG, arm64.REGZERO, arm64.REGZERO, obj.TYPE_MEM, arm64.REGSP, off)
+ off += 16
+ cnt -= 16
+ }
+ for cnt != 0 {
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, off)
+ off += 8
+ cnt -= 8
}
-
return p
}
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
index 753cef743b..be7887318a 100644
--- a/src/cmd/compile/internal/arm64/ssa.go
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -16,6 +16,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
+ "internal/abi"
)
// loadByType returns the load instruction of the given type.
@@ -1122,12 +1123,91 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpARM64LoweredPanicBoundsA, ssa.OpARM64LoweredPanicBoundsB, ssa.OpARM64LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+ case ssa.OpARM64LoweredPanicBoundsRR, ssa.OpARM64LoweredPanicBoundsRC, ssa.OpARM64LoweredPanicBoundsCR, ssa.OpARM64LoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpARM64LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - arm64.REG_R0)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - arm64.REG_R0)
+ case ssa.OpARM64LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - arm64.REG_R0)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REG_R0 + int16(yVal)
+ }
+ case ssa.OpARM64LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - arm64.REG_R0)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REG_R0 + int16(xVal)
+ }
+ case ssa.OpARM64LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REG_R0 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REG_R0 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
+
case ssa.OpARM64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(arm64.AMOVB)
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
index fa13f07fdf..6ab39d2aaa 100644
--- a/src/cmd/compile/internal/dwarfgen/dwarf.go
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -203,7 +203,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
continue
}
if n.Class != ir.PPARAMOUT || !n.IsOutputParamInRegisters() {
- panic("invalid ir.Name on debugInfo.RegOutputParams list")
+ base.Fatalf("invalid ir.Name on debugInfo.RegOutputParams list")
}
dcl = append(dcl, n)
}
@@ -248,11 +248,6 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
tag = dwarf.DW_TAG_formal_parameter
}
- if n.Esc() == ir.EscHeap {
- // The variable in question has been promoted to the heap.
- // Its address is in n.Heapaddr.
- // TODO(thanm): generate a better location expression
- }
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.InlFormal() || n.InlLocal() {
@@ -263,7 +258,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
}
}
declpos := base.Ctxt.InnermostPos(n.Pos())
- vars = append(vars, &dwarf.Var{
+ dvar := &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: isReturnValue,
Tag: tag,
@@ -277,8 +272,19 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
ChildIndex: -1,
DictIndex: n.DictIndex,
ClosureOffset: closureOffset(n, closureVars),
- })
- // Record go type of to insure that it gets emitted by the linker.
+ }
+ if n.Esc() == ir.EscHeap {
+ if n.Heapaddr == nil {
+ base.Fatalf("invalid heap allocated var without Heapaddr")
+ }
+ debug := fn.DebugInfo.(*ssa.FuncDebug)
+ list := createHeapDerefLocationList(n, debug.EntryID)
+ dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
+ debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
+ }
+ }
+ vars = append(vars, dvar)
+ // Record go type to ensure that it gets emitted by the linker.
fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
}
@@ -550,11 +556,34 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID, closureVars
return dvar
}
+// createHeapDerefLocationList creates a location list for a heap-escaped variable
+// that describes "dereference pointer at stack offset"
+func createHeapDerefLocationList(n *ir.Name, entryID ssa.ID) []byte {
+ // Get the stack offset where the heap pointer is stored
+ heapPtrOffset := n.Heapaddr.FrameOffset()
+ if base.Ctxt.Arch.FixedFrameSize == 0 {
+ heapPtrOffset -= int64(types.PtrSize)
+ }
+ if buildcfg.FramePointerEnabled {
+ heapPtrOffset -= int64(types.PtrSize)
+ }
+
+ // Create a location expression: DW_OP_fbreg <offset> DW_OP_deref
+ var locExpr []byte
+ var sizeIdx int
+ locExpr, sizeIdx = ssa.SetupLocList(base.Ctxt, entryID, locExpr, ssa.BlockStart.ID, ssa.FuncEnd.ID)
+ locExpr = append(locExpr, dwarf.DW_OP_fbreg)
+ locExpr = dwarf.AppendSleb128(locExpr, heapPtrOffset)
+ locExpr = append(locExpr, dwarf.DW_OP_deref)
+ base.Ctxt.Arch.ByteOrder.PutUint16(locExpr[sizeIdx:], uint16(len(locExpr)-sizeIdx-2))
+ return locExpr
+}
+
// RecordFlags records the specified command-line flags to be placed
// in the DWARF info.
func RecordFlags(flags ...string) {
if base.Ctxt.Pkgpath == "" {
- panic("missing pkgpath")
+ base.Fatalf("missing pkgpath")
}
type BoolFlag interface {
diff --git a/src/cmd/compile/internal/escape/call.go b/src/cmd/compile/internal/escape/call.go
index 58c44eb9bb..f9351de975 100644
--- a/src/cmd/compile/internal/escape/call.go
+++ b/src/cmd/compile/internal/escape/call.go
@@ -192,7 +192,7 @@ func (e *escape) call(ks []hole, call ir.Node) {
e.discard(call.X)
e.discard(call.Y)
- case ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+ case ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVER:
call := call.(*ir.CallExpr)
for _, arg := range call.Args {
e.discard(arg)
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
index 72d40bd258..6b34830b3d 100644
--- a/src/cmd/compile/internal/escape/escape.go
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -122,17 +122,24 @@ type escape struct {
}
func Funcs(all []*ir.Func) {
- ir.VisitFuncsBottomUp(all, Batch)
+ // Make a cache of ir.ReassignOracles. The cache is lazily populated.
+ // TODO(thepudds): consider adding a field on ir.Func instead. We might also be able
+ // to use that field elsewhere, like in walk. See discussion in https://go.dev/cl/688075.
+ reassignOracles := make(map[*ir.Func]*ir.ReassignOracle)
+
+ ir.VisitFuncsBottomUp(all, func(list []*ir.Func, recursive bool) {
+ Batch(list, reassignOracles)
+ })
}
// Batch performs escape analysis on a minimal batch of
// functions.
-func Batch(fns []*ir.Func, recursive bool) {
+func Batch(fns []*ir.Func, reassignOracles map[*ir.Func]*ir.ReassignOracle) {
var b batch
b.heapLoc.attrs = attrEscapes | attrPersists | attrMutates | attrCalls
b.mutatorLoc.attrs = attrMutates
b.calleeLoc.attrs = attrCalls
- b.reassignOracles = make(map[*ir.Func]*ir.ReassignOracle)
+ b.reassignOracles = reassignOracles
// Construct data-flow graph from syntax trees.
for _, fn := range fns {
@@ -531,19 +538,9 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) {
if n == nil || fn == nil {
return
}
- if n.Op() != ir.OMAKESLICE && n.Op() != ir.OCONVIFACE {
- return
- }
- // Look up a cached ReassignOracle for the function, lazily computing one if needed.
- ro := b.reassignOracle(fn)
- if ro == nil {
- base.Fatalf("no ReassignOracle for function %v with closure parent %v", fn, fn.ClosureParent)
- }
-
- assignTemp := func(n ir.Node, init *ir.Nodes) {
+ assignTemp := func(pos src.XPos, n ir.Node, init *ir.Nodes) {
// Preserve any side effects of n by assigning it to an otherwise unused temp.
- pos := n.Pos()
tmp := typecheck.TempAt(pos, fn, n.Type())
init.Append(typecheck.Stmt(ir.NewDecl(pos, ir.ODCL, tmp)))
init.Append(typecheck.Stmt(ir.NewAssignStmt(pos, tmp, n)))
@@ -561,6 +558,11 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) {
}
if (*r).Op() != ir.OLITERAL {
+ // Look up a cached ReassignOracle for the function, lazily computing one if needed.
+ ro := b.reassignOracle(fn)
+ if ro == nil {
+ base.Fatalf("no ReassignOracle for function %v with closure parent %v", fn, fn.ClosureParent)
+ }
if s := ro.StaticValue(*r); s.Op() == ir.OLITERAL {
lit, ok := s.(*ir.BasicLit)
if !ok || lit.Val().Kind() != constant.Int {
@@ -572,8 +574,8 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) {
return
}
// Preserve any side effects of the original expression, then replace it.
- assignTemp(*r, n.PtrInit())
- *r = lit
+ assignTemp(n.Pos(), *r, n.PtrInit())
+ *r = ir.NewBasicLit(n.Pos(), (*r).Type(), lit.Val())
}
}
}
@@ -582,6 +584,12 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) {
// a literal to avoid heap allocating the underlying interface value.
conv := n.(*ir.ConvExpr)
if conv.X.Op() != ir.OLITERAL && !conv.X.Type().IsInterface() {
+ // TODO(thepudds): likely could avoid some work by tightening the check of conv.X's type.
+ // Look up a cached ReassignOracle for the function, lazily computing one if needed.
+ ro := b.reassignOracle(fn)
+ if ro == nil {
+ base.Fatalf("no ReassignOracle for function %v with closure parent %v", fn, fn.ClosureParent)
+ }
v := ro.StaticValue(conv.X)
if v != nil && v.Op() == ir.OLITERAL && ir.ValidTypeForConst(conv.X.Type(), v.Val()) {
if !base.LiteralAllocHash.MatchPos(n.Pos(), nil) {
@@ -592,9 +600,9 @@ func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) {
base.WarnfAt(n.Pos(), "rewriting OCONVIFACE value from %v (%v) to %v (%v)", conv.X, conv.X.Type(), v, v.Type())
}
// Preserve any side effects of the original expression, then replace it.
- assignTemp(conv.X, conv.PtrInit())
+ assignTemp(conv.Pos(), conv.X, conv.PtrInit())
v := v.(*ir.BasicLit)
- conv.X = ir.NewBasicLit(conv.X.Pos(), conv.X.Type(), v.Val())
+ conv.X = ir.NewBasicLit(conv.Pos(), conv.X.Type(), v.Val())
typecheck.Expr(conv)
}
}
diff --git a/src/cmd/compile/internal/escape/expr.go b/src/cmd/compile/internal/escape/expr.go
index f479a2913a..1521c2edd1 100644
--- a/src/cmd/compile/internal/escape/expr.go
+++ b/src/cmd/compile/internal/escape/expr.go
@@ -139,7 +139,7 @@ func (e *escape) exprSkipInit(k hole, n ir.Node) {
e.discard(n.X)
case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OINLCALL,
- ir.OLEN, ir.OCAP, ir.OMIN, ir.OMAX, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVERFP,
+ ir.OLEN, ir.OCAP, ir.OMIN, ir.OMAX, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVER,
ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING, ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
e.call([]hole{k}, n)
diff --git a/src/cmd/compile/internal/escape/stmt.go b/src/cmd/compile/internal/escape/stmt.go
index b766864a30..2388873caf 100644
--- a/src/cmd/compile/internal/escape/stmt.go
+++ b/src/cmd/compile/internal/escape/stmt.go
@@ -183,7 +183,7 @@ func (e *escape) stmt(n ir.Node) {
dsts[i] = res.Nname.(*ir.Name)
}
e.assignList(dsts, n.Results, "return", n)
- case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLEAR, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLEAR, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTLN, ir.ORECOVER:
e.call(nil, n)
case ir.OGO, ir.ODEFER:
n := n.(*ir.GoDeferStmt)
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index 20899df04d..918d3f3514 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -104,12 +104,10 @@ func Main(archInit func(*ssagen.ArchInfo)) {
ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime")
ir.Pkgs.Runtime.Prefix = "runtime"
- if buildcfg.Experiment.SwissMap {
- // Pseudo-package that contains the compiler's builtin
- // declarations for maps.
- ir.Pkgs.InternalMaps = types.NewPkg("go.internal/runtime/maps", "internal/runtime/maps")
- ir.Pkgs.InternalMaps.Prefix = "internal/runtime/maps"
- }
+ // Pseudo-package that contains the compiler's builtin
+ // declarations for maps.
+ ir.Pkgs.InternalMaps = types.NewPkg("go.internal/runtime/maps", "internal/runtime/maps")
+ ir.Pkgs.InternalMaps.Prefix = "internal/runtime/maps"
// pseudo-packages used in symbol tables
ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")
diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go
index 965c5d1a84..11e4ee6b58 100644
--- a/src/cmd/compile/internal/importer/gcimporter_test.go
+++ b/src/cmd/compile/internal/importer/gcimporter_test.go
@@ -673,3 +673,50 @@ type S struct {
}
wg.Wait()
}
+
+func TestIssue63285(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ tmpdir := t.TempDir()
+ testoutdir := filepath.Join(tmpdir, "testdata")
+ if err := os.Mkdir(testoutdir, 0700); err != nil {
+ t.Fatalf("making output dir: %v", err)
+ }
+
+ compile(t, "testdata", "issue63285.go", testoutdir, nil)
+
+ issue63285, err := Import(make(map[string]*types2.Package), "./testdata/issue63285", tmpdir, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ check := func(pkgname, src string, imports importMap) (*types2.Package, error) {
+ f, err := syntax.Parse(syntax.NewFileBase(pkgname), strings.NewReader(src), nil, nil, 0)
+ if err != nil {
+ return nil, err
+ }
+ config := &types2.Config{
+ Importer: imports,
+ }
+ return config.Check(pkgname, []*syntax.File{f}, nil)
+ }
+
+ const pSrc = `package p
+
+import "issue63285"
+
+var _ issue63285.A[issue63285.B[any]]
+`
+
+ importer := importMap{
+ "issue63285": issue63285,
+ }
+ if _, err := check("p", pSrc, importer); err != nil {
+ t.Errorf("Check failed: %v", err)
+ }
+}
diff --git a/src/cmd/compile/internal/importer/testdata/issue63285.go b/src/cmd/compile/internal/importer/testdata/issue63285.go
new file mode 100644
index 0000000000..54a06b7b88
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/issue63285.go
@@ -0,0 +1,11 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue63285
+
+type A[_ B[any]] struct{}
+
+type B[_ any] interface {
+ f() A[B[any]]
+}
diff --git a/src/cmd/compile/internal/importer/ureader.go b/src/cmd/compile/internal/importer/ureader.go
index 6012d283ac..f6df56b70e 100644
--- a/src/cmd/compile/internal/importer/ureader.go
+++ b/src/cmd/compile/internal/importer/ureader.go
@@ -67,7 +67,8 @@ type reader struct {
p *pkgReader
- dict *readerDict
+ dict *readerDict
+ delayed []func()
}
type readerDict struct {
@@ -420,7 +421,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types2.Package, string) {
pos := r.pos()
var tparams []*types2.TypeParam
if r.Version().Has(pkgbits.AliasTypeParamNames) {
- tparams = r.typeParamNames()
+ tparams = r.typeParamNames(false)
}
typ := r.typ()
return newAliasTypeName(pr.enableAlias, pos, objPkg, objName, typ, tparams)
@@ -433,28 +434,28 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types2.Package, string) {
case pkgbits.ObjFunc:
pos := r.pos()
- tparams := r.typeParamNames()
+ tparams := r.typeParamNames(false)
sig := r.signature(nil, nil, tparams)
return types2.NewFunc(pos, objPkg, objName, sig)
case pkgbits.ObjType:
pos := r.pos()
- return types2.NewTypeNameLazy(pos, objPkg, objName, func(named *types2.Named) (tparams []*types2.TypeParam, underlying types2.Type, methods []*types2.Func) {
- tparams = r.typeParamNames()
+ return types2.NewTypeNameLazy(pos, objPkg, objName, func(_ *types2.Named) ([]*types2.TypeParam, types2.Type, []*types2.Func, []func()) {
+ tparams := r.typeParamNames(true)
// TODO(mdempsky): Rewrite receiver types to underlying is an
// Interface? The go/types importer does this (I think because
// unit tests expected that), but cmd/compile doesn't care
// about it, so maybe we can avoid worrying about that here.
- underlying = r.typ().Underlying()
+ underlying := r.typ().Underlying()
- methods = make([]*types2.Func, r.Len())
+ methods := make([]*types2.Func, r.Len())
for i := range methods {
- methods[i] = r.method()
+ methods[i] = r.method(true)
}
- return
+ return tparams, underlying, methods, r.delayed
})
case pkgbits.ObjVar:
@@ -497,7 +498,7 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
return &dict
}
-func (r *reader) typeParamNames() []*types2.TypeParam {
+func (r *reader) typeParamNames(isLazy bool) []*types2.TypeParam {
r.Sync(pkgbits.SyncTypeParamNames)
// Note: This code assumes it only processes objects without
@@ -523,19 +524,38 @@ func (r *reader) typeParamNames() []*types2.TypeParam {
r.dict.tparams[i] = types2.NewTypeParam(tname, nil)
}
- for i, bound := range r.dict.bounds {
- r.dict.tparams[i].SetConstraint(r.p.typIdx(bound, r.dict))
+ // Type parameters that are read by lazy loaders cannot have their
+ // constraints set eagerly; do them after loading (go.dev/issue/63285).
+ if isLazy {
+ // The reader dictionary will continue mutating before we have time
+ // to call delayed functions; must make a local copy of both the type
+ // parameters and their (unexpanded) constraints.
+ bounds := make([]types2.Type, len(r.dict.bounds))
+ for i, bound := range r.dict.bounds {
+ bounds[i] = r.p.typIdx(bound, r.dict)
+ }
+
+ tparams := r.dict.tparams
+ r.delayed = append(r.delayed, func() {
+ for i, bound := range bounds {
+ tparams[i].SetConstraint(bound)
+ }
+ })
+ } else {
+ for i, bound := range r.dict.bounds {
+ r.dict.tparams[i].SetConstraint(r.p.typIdx(bound, r.dict))
+ }
}
return r.dict.tparams
}
-func (r *reader) method() *types2.Func {
+func (r *reader) method(isLazy bool) *types2.Func {
r.Sync(pkgbits.SyncMethod)
pos := r.pos()
pkg, name := r.selector()
- rtparams := r.typeParamNames()
+ rtparams := r.typeParamNames(isLazy)
sig := r.signature(r.param(), rtparams, nil)
_ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index 459c2498fc..c06f76fe9f 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -605,10 +605,7 @@ opSwitch:
v.budget -= inlineExtraPanicCost
case ir.ORECOVER:
- base.FatalfAt(n.Pos(), "ORECOVER missed typecheck")
- case ir.ORECOVERFP:
- // recover matches the argument frame pointer to find
- // the right panic value, so it needs an argument frame.
+ // TODO: maybe we could allow inlining of recover() now?
v.reason = "call to recover"
return true
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go
index d86fd7d71b..22312e2241 100644
--- a/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go
@@ -335,7 +335,7 @@ func (ffa *funcFlagsAnalyzer) nodeVisitPost(n ir.Node) {
ir.OPRINTLN, ir.OPRINT, ir.OLABEL, ir.OCALLINTER, ir.ODEFER,
ir.OSEND, ir.ORECV, ir.OSELRECV2, ir.OGO, ir.OAPPEND, ir.OAS2DOTTYPE,
ir.OAS2MAPR, ir.OGETG, ir.ODELETE, ir.OINLMARK, ir.OAS2RECV,
- ir.OMIN, ir.OMAX, ir.OMAKE, ir.ORECOVERFP, ir.OGETCALLERSP:
+ ir.OMIN, ir.OMAX, ir.OMAKE, ir.OGETCALLERSP:
// these should all be benign/uninteresting
case ir.OTAILCALL, ir.OJUMPTABLE, ir.OTYPESW:
// don't expect to see these at all.
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index e27e4336c9..d07e522d95 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -213,7 +213,7 @@ func (n *CallExpr) SetOp(op Op) {
ODELETE,
OGETG, OGETCALLERSP,
OMAKE, OMAX, OMIN, OPRINT, OPRINTLN,
- ORECOVER, ORECOVERFP:
+ ORECOVER:
n.op = op
}
}
@@ -912,12 +912,12 @@ FindRHS:
break FindRHS
}
}
- base.Fatalf("%v missing from LHS of %v", n, defn)
+ base.FatalfAt(defn.Pos(), "%v missing from LHS of %v", n, defn)
default:
return nil
}
if rhs == nil {
- base.Fatalf("RHS is nil: %v", defn)
+ base.FatalfAt(defn.Pos(), "RHS is nil: %v", defn)
}
if Reassigned(n) {
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
index 058ada5ac3..003ec15de1 100644
--- a/src/cmd/compile/internal/ir/node.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -234,7 +234,6 @@ const (
OSLICEHEADER // sliceheader{Ptr, Len, Cap} (Ptr is unsafe.Pointer, Len is length, Cap is capacity)
OSTRINGHEADER // stringheader{Ptr, Len} (Ptr is unsafe.Pointer, Len is length)
ORECOVER // recover()
- ORECOVERFP // recover(Args) w/ explicit FP argument
ORECV // <-X
ORUNESTR // Type(X) (Type is string, X is rune)
OSELRECV2 // like OAS2: Lhs = Rhs where len(Lhs)=2, len(Rhs)=1, Rhs[0].Op = ORECV (appears as .Var of OCASE)
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
index a1806d1349..7494beee4c 100644
--- a/src/cmd/compile/internal/ir/op_string.go
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -108,62 +108,61 @@ func _() {
_ = x[OSLICEHEADER-97]
_ = x[OSTRINGHEADER-98]
_ = x[ORECOVER-99]
- _ = x[ORECOVERFP-100]
- _ = x[ORECV-101]
- _ = x[ORUNESTR-102]
- _ = x[OSELRECV2-103]
- _ = x[OMIN-104]
- _ = x[OMAX-105]
- _ = x[OREAL-106]
- _ = x[OIMAG-107]
- _ = x[OCOMPLEX-108]
- _ = x[OUNSAFEADD-109]
- _ = x[OUNSAFESLICE-110]
- _ = x[OUNSAFESLICEDATA-111]
- _ = x[OUNSAFESTRING-112]
- _ = x[OUNSAFESTRINGDATA-113]
- _ = x[OMETHEXPR-114]
- _ = x[OMETHVALUE-115]
- _ = x[OBLOCK-116]
- _ = x[OBREAK-117]
- _ = x[OCASE-118]
- _ = x[OCONTINUE-119]
- _ = x[ODEFER-120]
- _ = x[OFALL-121]
- _ = x[OFOR-122]
- _ = x[OGOTO-123]
- _ = x[OIF-124]
- _ = x[OLABEL-125]
- _ = x[OGO-126]
- _ = x[ORANGE-127]
- _ = x[ORETURN-128]
- _ = x[OSELECT-129]
- _ = x[OSWITCH-130]
- _ = x[OTYPESW-131]
- _ = x[OINLCALL-132]
- _ = x[OMAKEFACE-133]
- _ = x[OITAB-134]
- _ = x[OIDATA-135]
- _ = x[OSPTR-136]
- _ = x[OCFUNC-137]
- _ = x[OCHECKNIL-138]
- _ = x[ORESULT-139]
- _ = x[OINLMARK-140]
- _ = x[OLINKSYMOFFSET-141]
- _ = x[OJUMPTABLE-142]
- _ = x[OINTERFACESWITCH-143]
- _ = x[ODYNAMICDOTTYPE-144]
- _ = x[ODYNAMICDOTTYPE2-145]
- _ = x[ODYNAMICTYPE-146]
- _ = x[OTAILCALL-147]
- _ = x[OGETG-148]
- _ = x[OGETCALLERSP-149]
- _ = x[OEND-150]
+ _ = x[ORECV-100]
+ _ = x[ORUNESTR-101]
+ _ = x[OSELRECV2-102]
+ _ = x[OMIN-103]
+ _ = x[OMAX-104]
+ _ = x[OREAL-105]
+ _ = x[OIMAG-106]
+ _ = x[OCOMPLEX-107]
+ _ = x[OUNSAFEADD-108]
+ _ = x[OUNSAFESLICE-109]
+ _ = x[OUNSAFESLICEDATA-110]
+ _ = x[OUNSAFESTRING-111]
+ _ = x[OUNSAFESTRINGDATA-112]
+ _ = x[OMETHEXPR-113]
+ _ = x[OMETHVALUE-114]
+ _ = x[OBLOCK-115]
+ _ = x[OBREAK-116]
+ _ = x[OCASE-117]
+ _ = x[OCONTINUE-118]
+ _ = x[ODEFER-119]
+ _ = x[OFALL-120]
+ _ = x[OFOR-121]
+ _ = x[OGOTO-122]
+ _ = x[OIF-123]
+ _ = x[OLABEL-124]
+ _ = x[OGO-125]
+ _ = x[ORANGE-126]
+ _ = x[ORETURN-127]
+ _ = x[OSELECT-128]
+ _ = x[OSWITCH-129]
+ _ = x[OTYPESW-130]
+ _ = x[OINLCALL-131]
+ _ = x[OMAKEFACE-132]
+ _ = x[OITAB-133]
+ _ = x[OIDATA-134]
+ _ = x[OSPTR-135]
+ _ = x[OCFUNC-136]
+ _ = x[OCHECKNIL-137]
+ _ = x[ORESULT-138]
+ _ = x[OINLMARK-139]
+ _ = x[OLINKSYMOFFSET-140]
+ _ = x[OJUMPTABLE-141]
+ _ = x[OINTERFACESWITCH-142]
+ _ = x[ODYNAMICDOTTYPE-143]
+ _ = x[ODYNAMICDOTTYPE2-144]
+ _ = x[ODYNAMICTYPE-145]
+ _ = x[OTAILCALL-146]
+ _ = x[OGETG-147]
+ _ = x[OGETCALLERSP-148]
+ _ = x[OEND-149]
}
-const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLEARCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTLNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERSTRINGHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2MINMAXREALIMAGCOMPLEXUNSAFEADDUNSAFESLICEUNSAFESLICEDATAUNSAFESTRINGUNSAFESTRINGDATAMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWINLCALLMAKEFACEITABIDATASPTRCFUNCCHECKNILRESULTINLMARKLINKSYMOFFSETJUMPTABLEINTERFACESWITCHDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERSPEND"
+const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLEARCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTLNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERSTRINGHEADERRECOVERRECVRUNESTRSELRECV2MINMAXREALIMAGCOMPLEXUNSAFEADDUNSAFESLICEUNSAFESLICEDATAUNSAFESTRINGUNSAFESTRINGDATAMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWINLCALLMAKEFACEITABIDATASPTRCFUNCCHECKNILRESULTINLMARKLINKSYMOFFSETJUMPTABLEINTERFACESWITCHDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERSPEND"
-var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 129, 141, 143, 146, 156, 163, 170, 177, 181, 185, 193, 201, 210, 213, 218, 223, 230, 237, 243, 252, 260, 268, 274, 278, 287, 294, 298, 301, 308, 314, 317, 323, 330, 338, 342, 349, 357, 359, 361, 363, 365, 367, 369, 374, 379, 387, 390, 399, 402, 406, 414, 421, 430, 443, 446, 449, 452, 455, 458, 461, 467, 470, 473, 479, 483, 486, 490, 495, 500, 507, 512, 516, 521, 529, 537, 543, 552, 563, 575, 582, 591, 595, 602, 610, 613, 616, 620, 624, 631, 640, 651, 666, 678, 694, 702, 711, 716, 721, 725, 733, 738, 742, 745, 749, 751, 756, 758, 763, 769, 775, 781, 787, 794, 802, 806, 811, 815, 820, 828, 834, 841, 854, 863, 878, 892, 907, 918, 926, 930, 941, 944}
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 129, 141, 143, 146, 156, 163, 170, 177, 181, 185, 193, 201, 210, 213, 218, 223, 230, 237, 243, 252, 260, 268, 274, 278, 287, 294, 298, 301, 308, 314, 317, 323, 330, 338, 342, 349, 357, 359, 361, 363, 365, 367, 369, 374, 379, 387, 390, 399, 402, 406, 414, 421, 430, 443, 446, 449, 452, 455, 458, 461, 467, 470, 473, 479, 483, 486, 490, 495, 500, 507, 512, 516, 521, 529, 537, 543, 552, 563, 575, 582, 586, 593, 601, 604, 607, 611, 615, 622, 631, 642, 657, 669, 685, 693, 702, 707, 712, 716, 724, 729, 733, 736, 740, 742, 747, 749, 754, 760, 766, 772, 778, 785, 793, 797, 802, 806, 811, 819, 825, 832, 845, 854, 869, 883, 898, 909, 917, 921, 932, 935}
func (i Op) String() string {
if i >= Op(len(_Op_index)-1) {
diff --git a/src/cmd/compile/internal/ir/reassignment.go b/src/cmd/compile/internal/ir/reassignment.go
index ff54f708c2..ba14d078a2 100644
--- a/src/cmd/compile/internal/ir/reassignment.go
+++ b/src/cmd/compile/internal/ir/reassignment.go
@@ -178,12 +178,12 @@ FindRHS:
break FindRHS
}
}
- base.Fatalf("%v missing from LHS of %v", n, defn)
+ base.FatalfAt(defn.Pos(), "%v missing from LHS of %v", n, defn)
default:
return nil
}
if rhs == nil {
- base.Fatalf("RHS is nil: %v", defn)
+ base.FatalfAt(defn.Pos(), "RHS is nil: %v", defn)
}
if _, ok := ro.singleDef[n]; !ok {
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
index e2da710f02..ee0f52fbf3 100644
--- a/src/cmd/compile/internal/ir/symtab.go
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -37,6 +37,8 @@ type symsStruct struct {
Msanmove *obj.LSym
Newobject *obj.LSym
Newproc *obj.LSym
+ PanicBounds *obj.LSym
+ PanicExtend *obj.LSym
Panicdivide *obj.LSym
Panicshift *obj.LSym
PanicdottypeE *obj.LSym
diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go
index b9d3030e96..6c1435c724 100644
--- a/src/cmd/compile/internal/liveness/plive.go
+++ b/src/cmd/compile/internal/liveness/plive.go
@@ -769,7 +769,7 @@ func (lv *Liveness) epilogue() {
// its stack copy is not live.
continue
}
- // Note: zeroing is handled by zeroResults in walk.go.
+ // Note: zeroing is handled by zeroResults in ../ssagen/ssa.go.
livedefer.Set(int32(i))
}
if n.IsOutputParamHeapAddr() {
diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go
index a71a5c3e43..2d986a5ff4 100644
--- a/src/cmd/compile/internal/loong64/ssa.go
+++ b/src/cmd/compile/internal/loong64/ssa.go
@@ -16,6 +16,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/loong64"
+ "internal/abi"
)
// isFPreg reports whether r is an FP register.
@@ -663,12 +664,92 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0x1A
- case ssa.OpLOONG64LoweredPanicBoundsA, ssa.OpLOONG64LoweredPanicBoundsB, ssa.OpLOONG64LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+ case ssa.OpLOONG64LoweredPanicBoundsRR, ssa.OpLOONG64LoweredPanicBoundsRC, ssa.OpLOONG64LoweredPanicBoundsCR, ssa.OpLOONG64LoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpLOONG64LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - loong64.REG_R4)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - loong64.REG_R4)
+ case ssa.OpLOONG64LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - loong64.REG_R4)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = loong64.REG_R4 + int16(yVal)
+ }
+ case ssa.OpLOONG64LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - loong64.REG_R4)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = loong64.REG_R4 + int16(xVal)
+ }
+ case ssa.OpLOONG64LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = loong64.REG_R4 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = loong64.REG_R4 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
+
case ssa.OpLOONG64LoweredAtomicLoad8, ssa.OpLOONG64LoweredAtomicLoad32, ssa.OpLOONG64LoweredAtomicLoad64:
// MOVB (Rarg0), Rout
// DBAR 0x14
diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go
index 9762554829..7390db2945 100644
--- a/src/cmd/compile/internal/mips/ssa.go
+++ b/src/cmd/compile/internal/mips/ssa.go
@@ -15,6 +15,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
+ "internal/abi"
)
// isFPreg reports whether r is an FP register.
@@ -486,18 +487,167 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Name = obj.NAME_EXTERN
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpMIPSLoweredPanicBoundsA, ssa.OpMIPSLoweredPanicBoundsB, ssa.OpMIPSLoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(8) // space used in callee args area by assembly stubs
- case ssa.OpMIPSLoweredPanicExtendA, ssa.OpMIPSLoweredPanicExtendB, ssa.OpMIPSLoweredPanicExtendC:
- p := s.Prog(obj.ACALL)
+
+ case ssa.OpMIPSLoweredPanicBoundsRR, ssa.OpMIPSLoweredPanicBoundsRC, ssa.OpMIPSLoweredPanicBoundsCR, ssa.OpMIPSLoweredPanicBoundsCC,
+ ssa.OpMIPSLoweredPanicExtendRR, ssa.OpMIPSLoweredPanicExtendRC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ extend := false
+ switch v.Op {
+ case ssa.OpMIPSLoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - mips.REG_R1)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - mips.REG_R1)
+ case ssa.OpMIPSLoweredPanicExtendRR:
+ extend = true
+ xIsReg = true
+ hi := int(v.Args[0].Reg() - mips.REG_R1)
+ lo := int(v.Args[1].Reg() - mips.REG_R1)
+ xVal = hi<<2 + lo // encode 2 register numbers
+ yIsReg = true
+ yVal = int(v.Args[2].Reg() - mips.REG_R1)
+ case ssa.OpMIPSLoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - mips.REG_R1)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(yVal)
+ }
+ case ssa.OpMIPSLoweredPanicExtendRC:
+ extend = true
+ xIsReg = true
+ hi := int(v.Args[0].Reg() - mips.REG_R1)
+ lo := int(v.Args[1].Reg() - mips.REG_R1)
+ xVal = hi<<2 + lo // encode 2 register numbers
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ for yVal == hi || yVal == lo {
+ yVal++
+ }
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(yVal)
+ }
+ case ssa.OpMIPSLoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - mips.REG_R1)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else if signed && int64(int32(c)) == c || !signed && int64(uint32(c)) == c {
+ // Move constant to a register
+ xIsReg = true
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(xVal)
+ } else {
+ // Move constant to two registers
+ extend = true
+ xIsReg = true
+ hi := 0
+ lo := 1
+ if hi == yVal {
+ hi = 2
+ }
+ if lo == yVal {
+ lo = 2
+ }
+ xVal = hi<<2 + lo
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c >> 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(hi)
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(int32(c))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(lo)
+ }
+ case ssa.OpMIPSLoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else if signed && int64(int32(c)) == c || !signed && int64(uint32(c)) == c {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(xVal)
+ } else {
+ // Move constant to two registers
+ extend = true
+ xIsReg = true
+ hi := 0
+ lo := 1
+ xVal = hi<<2 + lo
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c >> 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(hi)
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(int32(c))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(lo)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 2
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
- s.UseArgs(12) // space used in callee args area by assembly stubs
+ if extend {
+ p.To.Sym = ir.Syms.PanicExtend
+ } else {
+ p.To.Sym = ir.Syms.PanicBounds
+ }
+
case ssa.OpMIPSLoweredAtomicLoad8,
ssa.OpMIPSLoweredAtomicLoad32:
s.Prog(mips.ASYNC)
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
index af94c16f6d..37e2274ae8 100644
--- a/src/cmd/compile/internal/mips64/ssa.go
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -15,6 +15,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
+ "internal/abi"
)
// isFPreg reports whether r is an FP register.
@@ -507,12 +508,93 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Name = obj.NAME_EXTERN
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpMIPS64LoweredPanicBoundsA, ssa.OpMIPS64LoweredPanicBoundsB, ssa.OpMIPS64LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+
+ case ssa.OpMIPS64LoweredPanicBoundsRR, ssa.OpMIPS64LoweredPanicBoundsRC, ssa.OpMIPS64LoweredPanicBoundsCR, ssa.OpMIPS64LoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpMIPS64LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - mips.REG_R1)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - mips.REG_R1)
+ case ssa.OpMIPS64LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - mips.REG_R1)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(yVal)
+ }
+ case ssa.OpMIPS64LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - mips.REG_R1)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(xVal)
+ }
+ case ssa.OpMIPS64LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
+
case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
as := mips.AMOVV
switch v.Op {
diff --git a/src/cmd/compile/internal/reflectdata/map_swiss.go b/src/cmd/compile/internal/reflectdata/map.go
index 54266a604a..2b43d4af27 100644
--- a/src/cmd/compile/internal/reflectdata/map_swiss.go
+++ b/src/cmd/compile/internal/reflectdata/map.go
@@ -15,10 +15,10 @@ import (
"internal/abi"
)
-// SwissMapGroupType makes the map slot group type given the type of the map.
-func SwissMapGroupType(t *types.Type) *types.Type {
- if t.MapType().SwissGroup != nil {
- return t.MapType().SwissGroup
+// MapGroupType makes the map slot group type given the type of the map.
+func MapGroupType(t *types.Type) *types.Type {
+ if t.MapType().Group != nil {
+ return t.MapType().Group
}
// Builds a type representing a group structure for the given map type.
@@ -29,7 +29,7 @@ func SwissMapGroupType(t *types.Type) *types.Type {
//
// type group struct {
// ctrl uint64
- // slots [abi.SwissMapGroupSlots]struct {
+ // slots [abi.MapGroupSlots]struct {
// key keyType
// elem elemType
// }
@@ -39,10 +39,10 @@ func SwissMapGroupType(t *types.Type) *types.Type {
elemtype := t.Elem()
types.CalcSize(keytype)
types.CalcSize(elemtype)
- if keytype.Size() > abi.SwissMapMaxKeyBytes {
+ if keytype.Size() > abi.MapMaxKeyBytes {
keytype = types.NewPtr(keytype)
}
- if elemtype.Size() > abi.SwissMapMaxElemBytes {
+ if elemtype.Size() > abi.MapMaxElemBytes {
elemtype = types.NewPtr(elemtype)
}
@@ -53,7 +53,7 @@ func SwissMapGroupType(t *types.Type) *types.Type {
slot := types.NewStruct(slotFields)
slot.SetNoalg(true)
- slotArr := types.NewArray(slot, abi.SwissMapGroupSlots)
+ slotArr := types.NewArray(slot, abi.MapGroupSlots)
slotArr.SetNoalg(true)
fields := []*types.Field{
@@ -76,25 +76,25 @@ func SwissMapGroupType(t *types.Type) *types.Type {
// the end to ensure pointers are valid.
base.Fatalf("bad group size for %v", t)
}
- if t.Key().Size() > abi.SwissMapMaxKeyBytes && !keytype.IsPtr() {
+ if t.Key().Size() > abi.MapMaxKeyBytes && !keytype.IsPtr() {
base.Fatalf("key indirect incorrect for %v", t)
}
- if t.Elem().Size() > abi.SwissMapMaxElemBytes && !elemtype.IsPtr() {
+ if t.Elem().Size() > abi.MapMaxElemBytes && !elemtype.IsPtr() {
base.Fatalf("elem indirect incorrect for %v", t)
}
- t.MapType().SwissGroup = group
+ t.MapType().Group = group
group.StructType().Map = t
return group
}
-var cachedSwissTableType *types.Type
+var cachedMapTableType *types.Type
-// swissTableType returns a type interchangeable with internal/runtime/maps.table.
+// mapTableType returns a type interchangeable with internal/runtime/maps.table.
// Make sure this stays in sync with internal/runtime/maps/table.go.
-func swissTableType() *types.Type {
- if cachedSwissTableType != nil {
- return cachedSwissTableType
+func mapTableType() *types.Type {
+ if cachedMapTableType != nil {
+ return cachedMapTableType
}
// type table struct {
@@ -135,17 +135,17 @@ func swissTableType() *types.Type {
base.Fatalf("internal/runtime/maps.table size not correct: got %d, want %d", table.Size(), size)
}
- cachedSwissTableType = table
+ cachedMapTableType = table
return table
}
-var cachedSwissMapType *types.Type
+var cachedMapType *types.Type
-// SwissMapType returns a type interchangeable with internal/runtime/maps.Map.
+// MapType returns a type interchangeable with internal/runtime/maps.Map.
// Make sure this stays in sync with internal/runtime/maps/map.go.
-func SwissMapType() *types.Type {
- if cachedSwissMapType != nil {
- return cachedSwissMapType
+func MapType() *types.Type {
+ if cachedMapType != nil {
+ return cachedMapType
}
// type Map struct {
@@ -191,23 +191,23 @@ func SwissMapType() *types.Type {
base.Fatalf("internal/runtime/maps.Map size not correct: got %d, want %d", m.Size(), size)
}
- cachedSwissMapType = m
+ cachedMapType = m
return m
}
-var cachedSwissIterType *types.Type
+var cachedMapIterType *types.Type
-// SwissMapIterType returns a type interchangeable with runtime.hiter.
-// Make sure this stays in sync with runtime/map.go.
-func SwissMapIterType() *types.Type {
- if cachedSwissIterType != nil {
- return cachedSwissIterType
+// MapIterType returns a type interchangeable with internal/runtime/maps.Iter.
+// Make sure this stays in sync with internal/runtime/maps/table.go.
+func MapIterType() *types.Type {
+ if cachedMapIterType != nil {
+ return cachedMapIterType
}
// type Iter struct {
// key unsafe.Pointer // *Key
// elem unsafe.Pointer // *Elem
- // typ unsafe.Pointer // *SwissMapType
+ // typ unsafe.Pointer // *MapType
// m *Map
//
// groupSlotOffset uint64
@@ -231,13 +231,13 @@ func SwissMapIterType() *types.Type {
makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
makefield("typ", types.Types[types.TUNSAFEPTR]),
- makefield("m", types.NewPtr(SwissMapType())),
+ makefield("m", types.NewPtr(MapType())),
makefield("groupSlotOffset", types.Types[types.TUINT64]),
makefield("dirOffset", types.Types[types.TUINT64]),
makefield("clearSeq", types.Types[types.TUINT64]),
makefield("globalDepth", types.Types[types.TUINT8]),
makefield("dirIdx", types.Types[types.TINT]),
- makefield("tab", types.NewPtr(swissTableType())),
+ makefield("tab", types.NewPtr(mapTableType())),
makefield("group", types.Types[types.TUNSAFEPTR]),
makefield("entryIdx", types.Types[types.TUINT64]),
}
@@ -257,13 +257,13 @@ func SwissMapIterType() *types.Type {
base.Fatalf("internal/runtime/maps.Iter size not correct: got %d, want %d", iter.Size(), size)
}
- cachedSwissIterType = iter
+ cachedMapIterType = iter
return iter
}
-func writeSwissMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
- // internal/abi.SwissMapType
- gtyp := SwissMapGroupType(t)
+func writeMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
+ // internal/abi.MapType
+ gtyp := MapGroupType(t)
s1 := writeType(t.Key())
s2 := writeType(t.Elem())
s3 := writeType(gtyp)
@@ -287,16 +287,16 @@ func writeSwissMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
c.Field("ElemOff").WriteUintptr(uint64(elemOff))
var flags uint32
if needkeyupdate(t.Key()) {
- flags |= abi.SwissMapNeedKeyUpdate
+ flags |= abi.MapNeedKeyUpdate
}
if hashMightPanic(t.Key()) {
- flags |= abi.SwissMapHashMightPanic
+ flags |= abi.MapHashMightPanic
}
- if t.Key().Size() > abi.SwissMapMaxKeyBytes {
- flags |= abi.SwissMapIndirectKey
+ if t.Key().Size() > abi.MapMaxKeyBytes {
+ flags |= abi.MapIndirectKey
}
- if t.Elem().Size() > abi.SwissMapMaxKeyBytes {
- flags |= abi.SwissMapIndirectElem
+ if t.Elem().Size() > abi.MapMaxKeyBytes {
+ flags |= abi.MapIndirectElem
}
c.Field("Flags").WriteUint32(flags)
diff --git a/src/cmd/compile/internal/reflectdata/map_noswiss.go b/src/cmd/compile/internal/reflectdata/map_noswiss.go
deleted file mode 100644
index a6fab4cbac..0000000000
--- a/src/cmd/compile/internal/reflectdata/map_noswiss.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package reflectdata
-
-import (
- "internal/abi"
-
- "cmd/compile/internal/base"
- "cmd/compile/internal/ir"
- "cmd/compile/internal/rttype"
- "cmd/compile/internal/types"
- "cmd/internal/obj"
- "cmd/internal/objabi"
- "cmd/internal/src"
-)
-
-// OldMapBucketType makes the map bucket type given the type of the map.
-func OldMapBucketType(t *types.Type) *types.Type {
- // Builds a type representing a Bucket structure for
- // the given map type. This type is not visible to users -
- // we include only enough information to generate a correct GC
- // program for it.
- // Make sure this stays in sync with runtime/map.go.
- //
- // A "bucket" is a "struct" {
- // tophash [abi.OldMapBucketCount]uint8
- // keys [abi.OldMapBucketCount]keyType
- // elems [abi.OldMapBucketCount]elemType
- // overflow *bucket
- // }
- if t.MapType().OldBucket != nil {
- return t.MapType().OldBucket
- }
-
- keytype := t.Key()
- elemtype := t.Elem()
- types.CalcSize(keytype)
- types.CalcSize(elemtype)
- if keytype.Size() > abi.OldMapMaxKeyBytes {
- keytype = types.NewPtr(keytype)
- }
- if elemtype.Size() > abi.OldMapMaxElemBytes {
- elemtype = types.NewPtr(elemtype)
- }
-
- field := make([]*types.Field, 0, 5)
-
- // The first field is: uint8 topbits[BUCKETSIZE].
- arr := types.NewArray(types.Types[types.TUINT8], abi.OldMapBucketCount)
- field = append(field, makefield("topbits", arr))
-
- arr = types.NewArray(keytype, abi.OldMapBucketCount)
- arr.SetNoalg(true)
- keys := makefield("keys", arr)
- field = append(field, keys)
-
- arr = types.NewArray(elemtype, abi.OldMapBucketCount)
- arr.SetNoalg(true)
- elems := makefield("elems", arr)
- field = append(field, elems)
-
- // If keys and elems have no pointers, the map implementation
- // can keep a list of overflow pointers on the side so that
- // buckets can be marked as having no pointers.
- // Arrange for the bucket to have no pointers by changing
- // the type of the overflow field to uintptr in this case.
- // See comment on hmap.overflow in runtime/map.go.
- otyp := types.Types[types.TUNSAFEPTR]
- if !elemtype.HasPointers() && !keytype.HasPointers() {
- otyp = types.Types[types.TUINTPTR]
- }
- overflow := makefield("overflow", otyp)
- field = append(field, overflow)
-
- // link up fields
- bucket := types.NewStruct(field[:])
- bucket.SetNoalg(true)
- types.CalcSize(bucket)
-
- // Check invariants that map code depends on.
- if !types.IsComparable(t.Key()) {
- base.Fatalf("unsupported map key type for %v", t)
- }
- if abi.OldMapBucketCount < 8 {
- base.Fatalf("bucket size %d too small for proper alignment %d", abi.OldMapBucketCount, 8)
- }
- if uint8(keytype.Alignment()) > abi.OldMapBucketCount {
- base.Fatalf("key align too big for %v", t)
- }
- if uint8(elemtype.Alignment()) > abi.OldMapBucketCount {
- base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.OldMapBucketCount)
- }
- if keytype.Size() > abi.OldMapMaxKeyBytes {
- base.Fatalf("key size too large for %v", t)
- }
- if elemtype.Size() > abi.OldMapMaxElemBytes {
- base.Fatalf("elem size too large for %v", t)
- }
- if t.Key().Size() > abi.OldMapMaxKeyBytes && !keytype.IsPtr() {
- base.Fatalf("key indirect incorrect for %v", t)
- }
- if t.Elem().Size() > abi.OldMapMaxElemBytes && !elemtype.IsPtr() {
- base.Fatalf("elem indirect incorrect for %v", t)
- }
- if keytype.Size()%keytype.Alignment() != 0 {
- base.Fatalf("key size not a multiple of key align for %v", t)
- }
- if elemtype.Size()%elemtype.Alignment() != 0 {
- base.Fatalf("elem size not a multiple of elem align for %v", t)
- }
- if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
- base.Fatalf("bucket align not multiple of key align %v", t)
- }
- if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
- base.Fatalf("bucket align not multiple of elem align %v", t)
- }
- if keys.Offset%keytype.Alignment() != 0 {
- base.Fatalf("bad alignment of keys in bmap for %v", t)
- }
- if elems.Offset%elemtype.Alignment() != 0 {
- base.Fatalf("bad alignment of elems in bmap for %v", t)
- }
-
- // Double-check that overflow field is final memory in struct,
- // with no padding at end.
- if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
- base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d",
- t, overflow.Offset, bucket.Size()-int64(types.PtrSize))
- }
-
- t.MapType().OldBucket = bucket
-
- bucket.StructType().Map = t
- return bucket
-}
-
-var oldHmapType *types.Type
-
-// OldMapType returns a type interchangeable with runtime.hmap.
-// Make sure this stays in sync with runtime/map.go.
-func OldMapType() *types.Type {
- if oldHmapType != nil {
- return oldHmapType
- }
-
- // build a struct:
- // type hmap struct {
- // count int
- // flags uint8
- // B uint8
- // noverflow uint16
- // hash0 uint32
- // buckets unsafe.Pointer
- // oldbuckets unsafe.Pointer
- // nevacuate uintptr
- // clearSeq uint64
- // extra unsafe.Pointer // *mapextra
- // }
- // must match runtime/map.go:hmap.
- fields := []*types.Field{
- makefield("count", types.Types[types.TINT]),
- makefield("flags", types.Types[types.TUINT8]),
- makefield("B", types.Types[types.TUINT8]),
- makefield("noverflow", types.Types[types.TUINT16]),
- makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
- makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
- makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
- makefield("nevacuate", types.Types[types.TUINTPTR]),
- makefield("clearSeq", types.Types[types.TUINT64]),
- makefield("extra", types.Types[types.TUNSAFEPTR]),
- }
-
- n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
- hmap := types.NewNamed(n)
- n.SetType(hmap)
- n.SetTypecheck(1)
-
- hmap.SetUnderlying(types.NewStruct(fields))
- types.CalcSize(hmap)
-
- // The size of hmap should be 56 bytes on 64 bit
- // and 36 bytes on 32 bit platforms.
- if size := int64(2*8 + 5*types.PtrSize); hmap.Size() != size {
- base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
- }
-
- oldHmapType = hmap
- return hmap
-}
-
-var oldHiterType *types.Type
-
-// OldMapIterType returns a type interchangeable with runtime.hiter.
-// Make sure this stays in sync with runtime/map.go.
-func OldMapIterType() *types.Type {
- if oldHiterType != nil {
- return oldHiterType
- }
-
- hmap := OldMapType()
-
- // build a struct:
- // type hiter struct {
- // key unsafe.Pointer // *Key
- // elem unsafe.Pointer // *Elem
- // t unsafe.Pointer // *OldMapType
- // h *hmap
- // buckets unsafe.Pointer
- // bptr unsafe.Pointer // *bmap
- // overflow unsafe.Pointer // *[]*bmap
- // oldoverflow unsafe.Pointer // *[]*bmap
- // startBucket uintptr
- // offset uint8
- // wrapped bool
- // B uint8
- // i uint8
- // bucket uintptr
- // checkBucket uintptr
- // clearSeq uint64
- // }
- // must match runtime/map.go:hiter.
- fields := []*types.Field{
- makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
- makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
- makefield("t", types.Types[types.TUNSAFEPTR]),
- makefield("h", types.NewPtr(hmap)),
- makefield("buckets", types.Types[types.TUNSAFEPTR]),
- makefield("bptr", types.Types[types.TUNSAFEPTR]),
- makefield("overflow", types.Types[types.TUNSAFEPTR]),
- makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
- makefield("startBucket", types.Types[types.TUINTPTR]),
- makefield("offset", types.Types[types.TUINT8]),
- makefield("wrapped", types.Types[types.TBOOL]),
- makefield("B", types.Types[types.TUINT8]),
- makefield("i", types.Types[types.TUINT8]),
- makefield("bucket", types.Types[types.TUINTPTR]),
- makefield("checkBucket", types.Types[types.TUINTPTR]),
- makefield("clearSeq", types.Types[types.TUINT64]),
- }
-
- // build iterator struct holding the above fields
- n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
- hiter := types.NewNamed(n)
- n.SetType(hiter)
- n.SetTypecheck(1)
-
- hiter.SetUnderlying(types.NewStruct(fields))
- types.CalcSize(hiter)
- if hiter.Size() != int64(8+12*types.PtrSize) {
- base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 8+12*types.PtrSize)
- }
-
- oldHiterType = hiter
- return hiter
-}
-
-func writeOldMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) {
- // internal/abi.OldMapType
- s1 := writeType(t.Key())
- s2 := writeType(t.Elem())
- s3 := writeType(OldMapBucketType(t))
- hasher := genhash(t.Key())
-
- c.Field("Key").WritePtr(s1)
- c.Field("Elem").WritePtr(s2)
- c.Field("Bucket").WritePtr(s3)
- c.Field("Hasher").WritePtr(hasher)
- var flags uint32
- // Note: flags must match maptype accessors in ../../../../runtime/type.go
- // and maptype builder in ../../../../reflect/type.go:MapOf.
- if t.Key().Size() > abi.OldMapMaxKeyBytes {
- c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
- flags |= 1 // indirect key
- } else {
- c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
- }
-
- if t.Elem().Size() > abi.OldMapMaxElemBytes {
- c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
- flags |= 2 // indirect value
- } else {
- c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
- }
- c.Field("BucketSize").WriteUint16(uint16(OldMapBucketType(t).Size()))
- if types.IsReflexive(t.Key()) {
- flags |= 4 // reflexive key
- }
- if needkeyupdate(t.Key()) {
- flags |= 8 // need key update
- }
- if hashMightPanic(t.Key()) {
- flags |= 16 // hash might panic
- }
- c.Field("Flags").WriteUint32(flags)
-
- if u := t.Underlying(); u != t {
- // If t is a named map type, also keep the underlying map
- // type live in the binary. This is important to make sure that
- // a named map and that same map cast to its underlying type via
- // reflection, use the same hash function. See issue 37716.
- lsym.AddRel(base.Ctxt, obj.Reloc{Type: objabi.R_KEEP, Sym: writeType(u)})
- }
-}
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index fb60569670..4d1d780190 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -8,7 +8,6 @@ import (
"encoding/binary"
"fmt"
"internal/abi"
- "internal/buildcfg"
"slices"
"sort"
"strings"
@@ -491,6 +490,9 @@ func dcommontype(c rttype.Cursor, t *types.Type) {
exported = types.IsExported(t.Elem().Sym().Name)
}
}
+ if types.IsDirectIface(t) {
+ tflag |= abi.TFlagDirectIface
+ }
if tflag != abi.TFlag(uint8(tflag)) {
// this should optimize away completely
@@ -511,9 +513,6 @@ func dcommontype(c rttype.Cursor, t *types.Type) {
c.Field("FieldAlign_").WriteUint8(uint8(t.Alignment()))
kind := kinds[t.Kind()]
- if types.IsDirectIface(t) {
- kind |= abi.KindDirectIface
- }
c.Field("Kind_").WriteUint8(uint8(kind))
c.Field("Equal").WritePtr(eqfunc)
@@ -773,11 +772,7 @@ func writeType(t *types.Type) *obj.LSym {
rt = rttype.InterfaceType
dataAdd = len(imethods(t)) * int(rttype.IMethod.Size())
case types.TMAP:
- if buildcfg.Experiment.SwissMap {
- rt = rttype.SwissMapType
- } else {
- rt = rttype.OldMapType
- }
+ rt = rttype.MapType
case types.TPTR:
rt = rttype.PtrType
// TODO: use rttype.Type for Elem() is ANY?
@@ -877,11 +872,7 @@ func writeType(t *types.Type) *obj.LSym {
}
case types.TMAP:
- if buildcfg.Experiment.SwissMap {
- writeSwissMapType(t, lsym, c)
- } else {
- writeOldMapType(t, lsym, c)
- }
+ writeMapType(t, lsym, c)
case types.TPTR:
// internal/abi.PtrType
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
index 21edcabc58..f54ea47c88 100644
--- a/src/cmd/compile/internal/riscv64/ssa.go
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -14,6 +14,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
+ "internal/abi"
)
// ssaRegToReg maps ssa register numbers to obj register numbers.
@@ -508,12 +509,91 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Name = obj.NAME_EXTERN
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+
+ case ssa.OpRISCV64LoweredPanicBoundsRR, ssa.OpRISCV64LoweredPanicBoundsRC, ssa.OpRISCV64LoweredPanicBoundsCR, ssa.OpRISCV64LoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpRISCV64LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - riscv.REG_X5)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - riscv.REG_X5)
+ case ssa.OpRISCV64LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - riscv.REG_X5)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(yVal)
+ }
+ case ssa.OpRISCV64LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - riscv.REG_X5)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(xVal)
+ }
+ case ssa.OpRISCV64LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_X5 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
case ssa.OpRISCV64LoweredAtomicLoad8:
s.Prog(riscv.AFENCE)
diff --git a/src/cmd/compile/internal/rttype/rttype.go b/src/cmd/compile/internal/rttype/rttype.go
index aaf98dda15..925d3901d4 100644
--- a/src/cmd/compile/internal/rttype/rttype.go
+++ b/src/cmd/compile/internal/rttype/rttype.go
@@ -27,8 +27,7 @@ var ArrayType *types.Type
var ChanType *types.Type
var FuncType *types.Type
var InterfaceType *types.Type
-var OldMapType *types.Type
-var SwissMapType *types.Type
+var MapType *types.Type
var PtrType *types.Type
var SliceType *types.Type
var StructType *types.Type
@@ -55,8 +54,7 @@ func Init() {
ChanType = FromReflect(reflect.TypeOf(abi.ChanType{}))
FuncType = FromReflect(reflect.TypeOf(abi.FuncType{}))
InterfaceType = FromReflect(reflect.TypeOf(abi.InterfaceType{}))
- OldMapType = FromReflect(reflect.TypeOf(abi.OldMapType{}))
- SwissMapType = FromReflect(reflect.TypeOf(abi.SwissMapType{}))
+ MapType = FromReflect(reflect.TypeOf(abi.MapType{}))
PtrType = FromReflect(reflect.TypeOf(abi.PtrType{}))
SliceType = FromReflect(reflect.TypeOf(abi.SliceType{}))
StructType = FromReflect(reflect.TypeOf(abi.StructType{}))
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
index 4d24881dba..86efde4fa0 100644
--- a/src/cmd/compile/internal/s390x/ssa.go
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -15,6 +15,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
+ "internal/abi"
)
// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
@@ -281,6 +282,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
case ssa.OpS390XCPSDR:
p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
p.Reg = v.Args[0].Reg()
+ case ssa.OpS390XWFMAXDB, ssa.OpS390XWFMAXSB,
+ ssa.OpS390XWFMINDB, ssa.OpS390XWFMINSB:
+ p := opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), 1 /* Java Math.Max() */)
+ p.AddRestSource(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()})
case ssa.OpS390XDIVD, ssa.OpS390XDIVW,
ssa.OpS390XDIVDU, ssa.OpS390XDIVWU,
ssa.OpS390XMODD, ssa.OpS390XMODW,
@@ -569,12 +574,92 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.To.Name = obj.NAME_EXTERN
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpS390XLoweredPanicBoundsA, ssa.OpS390XLoweredPanicBoundsB, ssa.OpS390XLoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+
+ case ssa.OpS390XLoweredPanicBoundsRR, ssa.OpS390XLoweredPanicBoundsRC, ssa.OpS390XLoweredPanicBoundsCR, ssa.OpS390XLoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpS390XLoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - s390x.REG_R0)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - s390x.REG_R0)
+ case ssa.OpS390XLoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - s390x.REG_R0)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(yVal)
+ }
+ case ssa.OpS390XLoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - s390x.REG_R0)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(xVal)
+ }
+ case ssa.OpS390XLoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REG_R0 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
+
case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT,
ssa.OpS390XNEG, ssa.OpS390XNEGW,
ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
diff --git a/src/cmd/compile/internal/ssa/_gen/386.rules b/src/cmd/compile/internal/ssa/_gen/386.rules
index 97414913e4..5f11502419 100644
--- a/src/cmd/compile/internal/ssa/_gen/386.rules
+++ b/src/cmd/compile/internal/ssa/_gen/386.rules
@@ -363,13 +363,16 @@
// Write barrier.
(WB ...) => (LoweredWB ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(PanicExtend ...) => (LoweredPanicExtendRR ...)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+(LoweredPanicBoundsRR [kind] x (MOVLconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:int64(c)}} mem)
+(LoweredPanicBoundsRR [kind] (MOVLconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(c)}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVLconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(c), Cy:p.C}} mem)
+
+(LoweredPanicExtendRR [kind] hi lo (MOVLconst [c]) mem) => (LoweredPanicExtendRC [kind] hi lo {PanicBoundsC{C:int64(c)}} mem)
+(LoweredPanicExtendRR [kind] (MOVLconst [hi]) (MOVLconst [lo]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(hi)<<32 + int64(uint32(lo))}} y mem)
+(LoweredPanicExtendRC [kind] {p} (MOVLconst [hi]) (MOVLconst [lo]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(hi)<<32+int64(uint32(lo)), Cy:p.C}} mem)
// ***************************
// Above: lowering rules
diff --git a/src/cmd/compile/internal/ssa/_gen/386Ops.go b/src/cmd/compile/internal/ssa/_gen/386Ops.go
index a976a91fb8..60599a33ab 100644
--- a/src/cmd/compile/internal/ssa/_gen/386Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/386Ops.go
@@ -76,7 +76,6 @@ func init() {
cx = buildReg("CX")
dx = buildReg("DX")
bx = buildReg("BX")
- si = buildReg("SI")
gp = buildReg("AX CX DX BX BP SI DI")
fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7")
gpsp = gp | buildReg("SP")
@@ -523,16 +522,19 @@ func init() {
// Returns a pointer to a write barrier buffer in DI.
{name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: callerSave &^ gp, outputs: []regMask{buildReg("DI")}}, clobberFlags: true, aux: "Int64"},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{dx, bx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- // Extend ops are the same as Bounds ops except the indexes are 64-bit.
- {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, dx, bx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
- {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, cx, dx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
- {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, ax, cx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{gp, gp}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
+
+ // Same as above, but the x value is 64 bits.
+ {name: "LoweredPanicExtendRR", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{ax | cx | dx | bx, ax | cx | dx | bx, gp}}, typ: "Mem", call: true}, // arg0=x_hi, arg1=x_lo, arg2=y, arg3=mem, returns memory.
+ {name: "LoweredPanicExtendRC", argLength: 3, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{ax | cx | dx | bx, ax | cx | dx | bx}}, typ: "Mem", call: true}, // arg0=x_hi, arg1=x_lo, arg2=mem, returns memory.
// Constant flag values. For any comparison, there are 5 possible
// outcomes: the three from the signed total order (<,==,>) and the
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
index 5dafc4b563..dd9deef4af 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
@@ -375,34 +375,17 @@
(MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr
(MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
-// Adjust zeros to be a multiple of 16 bytes.
-(Zero [s] destptr mem) && s%16 != 0 && s > 16 =>
- (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
- (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+// Zeroing up to 192 bytes uses straightline code.
+(Zero [s] destptr mem) && s >= 16 && s < 192 => (LoweredZero [s] destptr mem)
-(Zero [16] destptr mem) =>
- (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)
-(Zero [32] destptr mem) =>
- (MOVOstoreconst [makeValAndOff(0,16)] destptr
- (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
-(Zero [48] destptr mem) =>
- (MOVOstoreconst [makeValAndOff(0,32)] destptr
- (MOVOstoreconst [makeValAndOff(0,16)] destptr
- (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))
-(Zero [64] destptr mem) =>
- (MOVOstoreconst [makeValAndOff(0,48)] destptr
- (MOVOstoreconst [makeValAndOff(0,32)] destptr
- (MOVOstoreconst [makeValAndOff(0,16)] destptr
- (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))))
-
-// Medium zeroing uses a duff device.
-(Zero [s] destptr mem)
- && s > 64 && s <= 1024 && s%16 == 0 =>
- (DUFFZERO [s] destptr mem)
+// Zeroing up to ~1KB uses a small loop.
+(Zero [s] destptr mem) && s >= 192 && s <= repZeroThreshold => (LoweredZeroLoop [s] destptr mem)
// Large zeroing uses REP STOSQ.
-(Zero [s] destptr mem)
- && s > 1024 && s%8 == 0 =>
+(Zero [s] destptr mem) && s > repZeroThreshold && s%8 != 0 =>
+ (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [s] destptr mem) && s > repZeroThreshold && s%8 == 0 =>
(REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
// Lowering constants
@@ -558,9 +541,11 @@
// Write barrier.
(WB ...) => (LoweredWB ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVQconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVQconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVQconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVQconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// lowering rotates
(RotateLeft8 ...) => (ROLB ...)
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
index 402f50bfc2..bc30e6574f 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
@@ -121,7 +121,6 @@ func init() {
ax = buildReg("AX")
cx = buildReg("CX")
dx = buildReg("DX")
- bx = buildReg("BX")
gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15")
g = buildReg("g")
fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14")
@@ -964,15 +963,30 @@ func init() {
// auxint = # of bytes to zero
// returns mem
{
- name: "DUFFZERO",
+ name: "LoweredZero",
aux: "Int64",
argLength: 2,
reg: regInfo{
- inputs: []regMask{buildReg("DI")},
- clobbers: buildReg("DI"),
+ inputs: []regMask{gp},
},
- //faultOnNilArg0: true, // Note: removed for 73748. TODO: reenable at some point
- unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ faultOnNilArg0: true,
+ },
+
+ // arg0 = pointer to start of memory to zero
+ // arg1 = mem
+ // auxint = # of bytes to zero
+ // returns mem
+ {
+ name: "LoweredZeroLoop",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp},
+ clobbersArg0: true,
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ needIntTemp: true,
},
// arg0 = address of memory to zero
@@ -1060,12 +1074,15 @@ func init() {
{name: "LoweredHasCPUFeature", argLength: 0, reg: gp01, rematerializeable: true, typ: "UInt64", aux: "Sym", symEffect: "None"},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{dx, bx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{gp, gp}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// Constant flag values. For any comparison, there are 5 possible
// outcomes: the three from the signed total order (<,==,>) and the
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM.rules b/src/cmd/compile/internal/ssa/_gen/ARM.rules
index 431a12f35b..18b5d6bba6 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/_gen/ARM.rules
@@ -395,13 +395,16 @@
// Write barrier.
(WB ...) => (LoweredWB ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(PanicExtend ...) => (LoweredPanicExtendRR ...)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+(LoweredPanicBoundsRR [kind] x (MOVWconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:int64(c)}} mem)
+(LoweredPanicBoundsRR [kind] (MOVWconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(c)}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVWconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(c), Cy:p.C}} mem)
+
+(LoweredPanicExtendRR [kind] hi lo (MOVWconst [c]) mem) => (LoweredPanicExtendRC [kind] hi lo {PanicBoundsC{C:int64(c)}} mem)
+(LoweredPanicExtendRR [kind] (MOVWconst [hi]) (MOVWconst [lo]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(hi)<<32 + int64(uint32(lo))}} y mem)
+(LoweredPanicExtendRC [kind] {p} (MOVWconst [hi]) (MOVWconst [lo]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(hi)<<32+int64(uint32(lo)), Cy:p.C}} mem)
// Optimizations
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64.rules b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
index 01fe3a74f7..15ba10e216 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
@@ -601,9 +601,11 @@
// Publication barrier (0xe is ST option)
(PubBarrier mem) => (DMB [0xe] mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// Optimizations
@@ -683,6 +685,14 @@
((EQ|NE) (CMPconst [0] x) yes no) => ((Z|NZ) x yes no)
((EQ|NE) (CMPWconst [0] x) yes no) => ((ZW|NZW) x yes no)
+((ULE|UGT) (CMPconst [0] x)) => ((EQ|NE) (CMPconst [0] x))
+((ULE|UGT) (CMPWconst [0] x)) => ((EQ|NE) (CMPWconst [0] x))
+
+((Z|NZ) sub:(SUB x y)) && sub.Uses == 1 => ((EQ|NE) (CMP x y))
+((ZW|NZW) sub:(SUB x y)) && sub.Uses == 1 => ((EQ|NE) (CMPW x y))
+((Z|NZ) sub:(SUBconst [c] y)) && sub.Uses == 1 => ((EQ|NE) (CMPconst [c] y))
+((ZW|NZW) sub:(SUBconst [c] y)) && sub.Uses == 1 => ((EQ|NE) (CMPWconst [int32(c)] y))
+
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN a (MUL <x.Type> x y)) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMP a (MUL <x.Type> x y)) yes no)
((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW a (MULW <x.Type> x y)) yes no)
@@ -1658,6 +1668,10 @@
(SRLconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVDconst [0])
(SRLconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVDconst [0])
+// Special cases for slice operations
+(ADD x0 x1:(ANDshiftRA x2:(SLLconst [sl] y) z [63])) && x1.Uses == 1 && x2.Uses == 1 => (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
+(ADD x0 x1:(ANDshiftLL x2:(SRAconst [63] z) y [sl])) && x1.Uses == 1 && x2.Uses == 1 => (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
+
// bitfield ops
// sbfiz
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
index ebb7ed5299..69db139ff0 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
@@ -144,11 +144,8 @@ func init() {
gpspsbg = gpspg | buildReg("SB")
fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
- r0 = buildReg("R0")
- r1 = buildReg("R1")
- r2 = buildReg("R2")
- r3 = buildReg("R3")
rz = buildReg("ZERO")
+ first16 = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15")
)
// Common regInfo
var (
@@ -760,12 +757,15 @@ func init() {
// Returns a pointer to a write barrier buffer in R25.
{name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R16 R17 R30"), outputs: []regMask{buildReg("R25")}}, clobberFlags: true, aux: "Int64"},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{first16, first16}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// Prefetch instruction
// Do prefetch arg0 address with option aux. arg0=addr, arg1=memory, aux=option.
diff --git a/src/cmd/compile/internal/ssa/_gen/ARMOps.go b/src/cmd/compile/internal/ssa/_gen/ARMOps.go
index 3ad96fcac0..01cd48835e 100644
--- a/src/cmd/compile/internal/ssa/_gen/ARMOps.go
+++ b/src/cmd/compile/internal/ssa/_gen/ARMOps.go
@@ -94,11 +94,11 @@ func init() {
gpspsbg = gpspg | buildReg("SB")
fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ lr = buildReg("R14")
r0 = buildReg("R0")
r1 = buildReg("R1")
r2 = buildReg("R2")
r3 = buildReg("R3")
- r4 = buildReg("R4")
)
// Common regInfo
var (
@@ -540,16 +540,19 @@ func init() {
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- // Extend ops are the same as Bounds ops except the indexes are 64-bit.
- {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r2, r3}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
- {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r1, r2}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
- {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r0, r1}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{gp &^ lr, gp &^ lr}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp &^ lr}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp &^ lr}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
+
+ // Same as above, but the x value is 64 bits.
+ {name: "LoweredPanicExtendRR", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r0 | r1 | r2 | r3, r0 | r1 | r2 | r3, gp}}, typ: "Mem", call: true}, // arg0=x_hi, arg1=x_lo, arg2=y, arg3=mem, returns memory.
+ {name: "LoweredPanicExtendRC", argLength: 3, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{r0 | r1 | r2 | r3, r0 | r1 | r2 | r3}}, typ: "Mem", call: true}, // arg0=x_hi, arg1=x_lo, arg2=mem, returns memory.
// Constant flag value.
// Note: there's an "unordered" outcome for floating-point
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
index cab63a503f..9d0ad0148f 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
@@ -527,9 +527,11 @@
// Publication barrier as intrinsic
(PubBarrier ...) => (LoweredPubBarrier ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVVconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVVconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVVconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVVconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
(CondSelect <t> x y cond) => (OR (MASKEQZ <t> x cond) (MASKNEZ <t> y cond))
@@ -748,10 +750,10 @@
(SRLVconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVVconst [0])
// mul by constant
-(MULV x (MOVVconst [-1])) => (NEGV x)
(MULV _ (MOVVconst [0])) => (MOVVconst [0])
(MULV x (MOVVconst [1])) => x
-(MULV x (MOVVconst [c])) && isPowerOfTwo(c) => (SLLVconst [log64(c)] x)
+
+(MULV x (MOVVconst [c])) && canMulStrengthReduce(config, c) => {mulStrengthReduce(v, x, c)}
// div by constant
(DIVVU x (MOVVconst [1])) => x
@@ -843,6 +845,14 @@
(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
+// Avoid extending when already sufficiently masked.
+(MOVBreg x:(ANDconst [c] y)) && c >= 0 && int64(int8(c)) == c => x
+(MOVHreg x:(ANDconst [c] y)) && c >= 0 && int64(int16(c)) == c => x
+(MOVWreg x:(ANDconst [c] y)) && c >= 0 && int64(int32(c)) == c => x
+(MOVBUreg x:(ANDconst [c] y)) && c >= 0 && int64(uint8(c)) == c => x
+(MOVHUreg x:(ANDconst [c] y)) && c >= 0 && int64(uint16(c)) == c => x
+(MOVWUreg x:(ANDconst [c] y)) && c >= 0 && int64(uint32(c)) == c => x
+
// Prefetch instructions (hint specified using aux field)
// For PRELD{,X} A value of hint indicates:
// hint=0 is defined as load prefetch to L1-cache
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
index 5ef304b4f9..0e204c4a3c 100644
--- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
@@ -130,10 +130,7 @@ func init() {
gpspsbg = gpspg | buildReg("SB")
fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
- r1 = buildReg("R20")
- r2 = buildReg("R21")
- r3 = buildReg("R23")
- r4 = buildReg("R24")
+ first16 = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19")
)
// Common regInfo
var (
@@ -563,12 +560,15 @@ func init() {
// Do data barrier. arg0=memorys
{name: "LoweredPubBarrier", argLength: 1, asm: "DBAR", hasSideEffects: true},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{first16, first16}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// Prefetch instruction
// Do prefetch arg0 address with option aux. arg0=addr, arg1=memory, aux=option.
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64latelower.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64latelower.rules
new file mode 100644
index 0000000000..95844381c2
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64latelower.rules
@@ -0,0 +1,6 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Prefer addition when shifting left by one.
+(SLLVconst [1] x) => (ADDV x x)
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS.rules b/src/cmd/compile/internal/ssa/_gen/MIPS.rules
index a9bac5fabe..80bf9017f5 100644
--- a/src/cmd/compile/internal/ssa/_gen/MIPS.rules
+++ b/src/cmd/compile/internal/ssa/_gen/MIPS.rules
@@ -423,13 +423,17 @@
// Publication barrier as intrinsic
(PubBarrier ...) => (LoweredPubBarrier ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
-(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(PanicExtend ...) => (LoweredPanicExtendRR ...)
+
+(LoweredPanicBoundsRR [kind] x (MOVWconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:int64(c)}} mem)
+(LoweredPanicBoundsRR [kind] (MOVWconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(c)}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVWconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(c), Cy:p.C}} mem)
+
+(LoweredPanicExtendRR [kind] hi lo (MOVWconst [c]) mem) => (LoweredPanicExtendRC [kind] hi lo {PanicBoundsC{C:int64(c)}} mem)
+(LoweredPanicExtendRR [kind] (MOVWconst [hi]) (MOVWconst [lo]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(hi)<<32 + int64(uint32(lo))}} y mem)
+(LoweredPanicExtendRC [kind] {p} (MOVWconst [hi]) (MOVWconst [lo]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(hi)<<32+int64(uint32(lo)), Cy:p.C}} mem)
// Optimizations
@@ -607,13 +611,13 @@
(Select0 (MULTU (MOVWconst [1]) _ )) => (MOVWconst [0])
(Select1 (MULTU (MOVWconst [-1]) x )) => (NEG <x.Type> x)
(Select0 (MULTU (MOVWconst [-1]) x )) => (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
-(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
-(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x)
+(Select1 (MULTU (MOVWconst [c]) x )) && isUnsignedPowerOfTwo(uint32(c)) => (SLLconst [int32(log32u(uint32(c)))] x)
+(Select0 (MULTU (MOVWconst [c]) x )) && isUnsignedPowerOfTwo(uint32(c)) => (SRLconst [int32(32-log32u(uint32(c)))] x)
(MUL (MOVWconst [0]) _ ) => (MOVWconst [0])
(MUL (MOVWconst [1]) x ) => x
(MUL (MOVWconst [-1]) x ) => (NEG x)
-(MUL (MOVWconst [c]) x ) && isPowerOfTwo(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
+(MUL (MOVWconst [c]) x ) && isUnsignedPowerOfTwo(uint32(c)) => (SLLconst [int32(log32u(uint32(c)))] x)
// generic simplifications
(ADD x (NEG y)) => (SUB x y)
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS64.rules b/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
index 8e484f4a3d..bb862e282b 100644
--- a/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
@@ -479,9 +479,11 @@
// Publication barrier as intrinsic
(PubBarrier ...) => (LoweredPubBarrier ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVVconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVVconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVVconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVVconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// Optimizations
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go b/src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go
index 6c04a1aea2..14f6c5c990 100644
--- a/src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go
@@ -136,10 +136,7 @@ func init() {
lo = buildReg("LO")
hi = buildReg("HI")
callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
- r1 = buildReg("R1")
- r2 = buildReg("R2")
- r3 = buildReg("R3")
- r4 = buildReg("R4")
+ first16 = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16")
)
// Common regInfo
var (
@@ -469,12 +466,15 @@ func init() {
// Do data barrier. arg0=memorys
{name: "LoweredPubBarrier", argLength: 1, asm: "SYNC", hasSideEffects: true},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{first16, first16}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
}
blocks := []blockData{
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPSOps.go b/src/cmd/compile/internal/ssa/_gen/MIPSOps.go
index 62c35ed49f..a340775c07 100644
--- a/src/cmd/compile/internal/ssa/_gen/MIPSOps.go
+++ b/src/cmd/compile/internal/ssa/_gen/MIPSOps.go
@@ -120,11 +120,8 @@ func init() {
lo = buildReg("LO")
hi = buildReg("HI")
callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
- r1 = buildReg("R1")
- r2 = buildReg("R2")
- r3 = buildReg("R3")
- r4 = buildReg("R4")
- r5 = buildReg("R5")
+ first16 = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16")
+ first4 = buildReg("R1 R2 R3 R4")
)
// Common regInfo
var (
@@ -411,16 +408,19 @@ func init() {
// Do data barrier. arg0=memorys
{name: "LoweredPubBarrier", argLength: 1, asm: "SYNC", hasSideEffects: true},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- // Extend ops are the same as Bounds ops except the indexes are 64-bit.
- {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r3, r4}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
- {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r2, r3}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
- {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r1, r2}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{first16, first16}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
+
+ // Same as above, but the x value is 64 bits.
+ {name: "LoweredPanicExtendRR", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{first4, first4, first16}}, typ: "Mem", call: true}, // arg0=x_hi, arg1=x_lo, arg2=y, arg3=mem, returns memory.
+ {name: "LoweredPanicExtendRC", argLength: 3, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first4, first4}}, typ: "Mem", call: true}, // arg0=x_hi, arg1=x_lo, arg2=mem, returns memory.
}
blocks := []blockData{
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
index dc1cc97fb3..a99a16adff 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
@@ -407,9 +407,11 @@
// Publication barrier as intrinsic
(PubBarrier ...) => (LoweredPubBarrier ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// Small moves
(Move [0] _ _ mem) => mem
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
index 8cb042a604..c12bc47621 100644
--- a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
@@ -49,7 +49,7 @@ func riscv64RegName(r int) string {
func init() {
var regNamesRISCV64 []string
- var gpMask, fpMask, gpgMask, gpspMask, gpspsbMask, gpspsbgMask regMask
+ var gpMask, fpMask, gpgMask, gpspMask, gpspsbMask, gpspsbgMask, first16Mask regMask
regNamed := make(map[string]regMask)
// Build the list of register names, creating an appropriately indexed
@@ -93,6 +93,9 @@ func init() {
gpspMask |= mask
gpspsbMask |= mask
gpspsbgMask |= mask
+ if r >= 5 && r < 5+16 {
+ first16Mask |= mask
+ }
}
}
@@ -429,12 +432,15 @@ func init() {
// Do data barrier. arg0=memorys
{name: "LoweredPubBarrier", argLength: 1, asm: "FENCE", hasSideEffects: true},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X7"], regNamed["X28"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X6"], regNamed["X7"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{first16Mask, first16Mask}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16Mask}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first16Mask}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// F extension.
{name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true, typ: "Float32"}, // arg0 + arg1
diff --git a/src/cmd/compile/internal/ssa/_gen/S390X.rules b/src/cmd/compile/internal/ssa/_gen/S390X.rules
index 231ad0615d..664bf4a89c 100644
--- a/src/cmd/compile/internal/ssa/_gen/S390X.rules
+++ b/src/cmd/compile/internal/ssa/_gen/S390X.rules
@@ -145,6 +145,9 @@
(Sqrt32 ...) => (FSQRTS ...)
+(Max(64|32)F ...) => (WFMAX(D|S)B ...)
+(Min(64|32)F ...) => (WFMIN(D|S)B ...)
+
// Atomic loads and stores.
// The SYNC instruction (fast-BCR-serialization) prevents store-load
// reordering. Other sequences of memory operations (load-load,
@@ -455,9 +458,11 @@
// Write barrier.
(WB ...) => (LoweredWB ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// ***************************
// Above: lowering rules
diff --git a/src/cmd/compile/internal/ssa/_gen/S390XOps.go b/src/cmd/compile/internal/ssa/_gen/S390XOps.go
index 2f57d12630..c002d5bcc3 100644
--- a/src/cmd/compile/internal/ssa/_gen/S390XOps.go
+++ b/src/cmd/compile/internal/ssa/_gen/S390XOps.go
@@ -114,6 +114,7 @@ func init() {
sb = buildReg("SB")
r0 = buildReg("R0")
tmp = buildReg("R11") // R11 is used as a temporary in a small number of instructions.
+ lr = buildReg("R14")
// R10 is reserved by the assembler.
gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14")
@@ -222,6 +223,12 @@ func init() {
{name: "LNDFR", argLength: 1, reg: fp11, asm: "LNDFR"}, // fp64/fp32 clear sign bit
{name: "CPSDR", argLength: 2, reg: fp21, asm: "CPSDR"}, // fp64/fp32 copy arg1 sign bit to arg0
+ // Single element vector floating point min / max instructions
+ {name: "WFMAXDB", argLength: 2, reg: fp21, asm: "WFMAXDB", typ: "Float64"}, // max[float64](arg0, arg1)
+ {name: "WFMAXSB", argLength: 2, reg: fp21, asm: "WFMAXSB", typ: "Float32"}, // max[float32](arg0, arg1)
+ {name: "WFMINDB", argLength: 2, reg: fp21, asm: "WFMINDB", typ: "Float64"}, // min[float64](arg0, arg1)
+ {name: "WFMINSB", argLength: 2, reg: fp21, asm: "WFMINSB", typ: "Float32"}, // min[float32](arg0, arg1)
+
// Round to integer, float64 only.
//
// aux | rounding mode
@@ -512,12 +519,15 @@ func init() {
// Returns a pointer to a write barrier buffer in R9.
{name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R14") | r1, outputs: []regMask{r9}}, clobberFlags: true, aux: "Int64"},
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{gp &^ lr, gp &^ lr}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp &^ lr}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{gp &^ lr}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// Constant condition code values. The condition code can be 0, 1, 2 or 3.
{name: "FlagEQ"}, // CC=0 (equal)
diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules
index 1077921f93..00e8fcbe32 100644
--- a/src/cmd/compile/internal/ssa/_gen/generic.rules
+++ b/src/cmd/compile/internal/ssa/_gen/generic.rules
@@ -295,6 +295,10 @@
(Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Neq16 (Const16 <t> [c-d]) x)
(Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Neq8 (Const8 <t> [c-d]) x)
+(CondSelect x _ (ConstBool [true ])) => x
+(CondSelect _ y (ConstBool [false])) => y
+(CondSelect x x _) => x
+
// signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) )
(AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
(AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
@@ -1010,11 +1014,10 @@
// See ../magic.go for a detailed description of these algorithms.
// Unsigned divide by power of 2. Strength reduce to a shift.
-(Div8u n (Const8 [c])) && isPowerOfTwo(c) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
-(Div16u n (Const16 [c])) && isPowerOfTwo(c) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
-(Div32u n (Const32 [c])) && isPowerOfTwo(c) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
-(Div64u n (Const64 [c])) && isPowerOfTwo(c) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
-(Div64u n (Const64 [-1<<63])) => (Rsh64Ux64 n (Const64 <typ.UInt64> [63]))
+(Div8u n (Const8 [c])) && isUnsignedPowerOfTwo(uint8(c)) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8u(uint8(c))]))
+(Div16u n (Const16 [c])) && isUnsignedPowerOfTwo(uint16(c)) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16u(uint16(c))]))
+(Div32u n (Const32 [c])) && isUnsignedPowerOfTwo(uint32(c)) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32u(uint32(c))]))
+(Div64u n (Const64 [c])) && isUnsignedPowerOfTwo(uint64(c)) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64u(uint64(c))]))
// Signed non-negative divide by power of 2.
(Div8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
@@ -1290,11 +1293,10 @@
(Const64 <typ.UInt64> [63])))
// Unsigned mod by power of 2 constant.
-(Mod8u <t> n (Const8 [c])) && isPowerOfTwo(c) => (And8 n (Const8 <t> [c-1]))
-(Mod16u <t> n (Const16 [c])) && isPowerOfTwo(c) => (And16 n (Const16 <t> [c-1]))
-(Mod32u <t> n (Const32 [c])) && isPowerOfTwo(c) => (And32 n (Const32 <t> [c-1]))
-(Mod64u <t> n (Const64 [c])) && isPowerOfTwo(c) => (And64 n (Const64 <t> [c-1]))
-(Mod64u <t> n (Const64 [-1<<63])) => (And64 n (Const64 <t> [1<<63-1]))
+(Mod8u <t> n (Const8 [c])) && isUnsignedPowerOfTwo(uint8(c)) => (And8 n (Const8 <t> [c-1]))
+(Mod16u <t> n (Const16 [c])) && isUnsignedPowerOfTwo(uint16(c)) => (And16 n (Const16 <t> [c-1]))
+(Mod32u <t> n (Const32 [c])) && isUnsignedPowerOfTwo(uint32(c)) => (And32 n (Const32 <t> [c-1]))
+(Mod64u <t> n (Const64 [c])) && isUnsignedPowerOfTwo(uint64(c)) => (And64 n (Const64 <t> [c-1]))
// Signed non-negative mod by power of 2 constant.
(Mod8 <t> n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo(c) => (And8 n (Const8 <t> [c-1]))
@@ -2053,9 +2055,7 @@
(Select1 (MakeTuple x y)) => y
// for rewriting results of some late-expanded rewrites (below)
-(SelectN [0] (MakeResult x ___)) => x
-(SelectN [1] (MakeResult x y ___)) => y
-(SelectN [2] (MakeResult x y z ___)) => z
+(SelectN [n] m:(MakeResult ___)) => m.Args[n]
// for late-expanded calls, recognize newobject and remove zeroing and nilchecks
(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
@@ -2843,3 +2843,19 @@
&& clobber(sbts)
&& clobber(key)
=> (StaticLECall {f} [argsize] dict_ (StringMake <typ.String> ptr len) mem)
+
+// Transform some CondSelect into math operations.
+// if b { x++ } => x += b // but not on arm64 because it has CSINC
+(CondSelect (Add8 <t> x (Const8 [1])) x bool) && config.arch != "arm64" => (Add8 x (CvtBoolToUint8 <t> bool))
+(CondSelect (Add(64|32|16) <t> x (Const(64|32|16) [1])) x bool) && config.arch != "arm64" => (Add(64|32|16) x (ZeroExt8to(64|32|16) <t> (CvtBoolToUint8 <types.Types[types.TUINT8]> bool)))
+
+// if b { x-- } => x -= b
+(CondSelect (Add8 <t> x (Const8 [-1])) x bool) => (Sub8 x (CvtBoolToUint8 <t> bool))
+(CondSelect (Add(64|32|16) <t> x (Const(64|32|16) [-1])) x bool) => (Sub(64|32|16) x (ZeroExt8to(64|32|16) <t> (CvtBoolToUint8 <types.Types[types.TUINT8]> bool)))
+
+// if b { x <<= 1 } => x <<= b
+(CondSelect (Lsh(64|32|16|8)x64 x (Const64 [1])) x bool) => (Lsh(64|32|16|8)x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+
+// if b { x >>= 1 } => x >>= b
+(CondSelect (Rsh(64|32|16|8)x64 x (Const64 [1])) x bool) => (Rsh(64|32|16|8)x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+(CondSelect (Rsh(64|32|16|8)Ux64 x (Const64 [1])) x bool) => (Rsh(64|32|16|8)Ux8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
diff --git a/src/cmd/compile/internal/ssa/_gen/main.go b/src/cmd/compile/internal/ssa/_gen/main.go
index 5b85cec79c..a26a4a4ea8 100644
--- a/src/cmd/compile/internal/ssa/_gen/main.go
+++ b/src/cmd/compile/internal/ssa/_gen/main.go
@@ -88,6 +88,10 @@ type regInfo struct {
// clobbers encodes the set of registers that are overwritten by
// the instruction (other than the output registers).
clobbers regMask
+ // Instruction clobbers the register containing input 0.
+ clobbersArg0 bool
+ // Instruction clobbers the register containing input 1.
+ clobbersArg1 bool
// outputs[i] encodes the set of registers allowed for the i'th output.
outputs []regMask
}
@@ -294,7 +298,7 @@ func genOp() {
fmt.Fprintf(w, "argLen: %d,\n", v.argLength)
if v.rematerializeable {
- if v.reg.clobbers != 0 {
+ if v.reg.clobbers != 0 || v.reg.clobbersArg0 || v.reg.clobbersArg1 {
log.Fatalf("%s is rematerializeable and clobbers registers", v.name)
}
if v.clobberFlags {
@@ -403,6 +407,12 @@ func genOp() {
if v.reg.clobbers > 0 {
fmt.Fprintf(w, "clobbers: %d,%s\n", v.reg.clobbers, a.regMaskComment(v.reg.clobbers))
}
+ if v.reg.clobbersArg0 {
+ fmt.Fprintf(w, "clobbersArg0: true,\n")
+ }
+ if v.reg.clobbersArg1 {
+ fmt.Fprintf(w, "clobbersArg1: true,\n")
+ }
// reg outputs
s = s[:0]
diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go
index 5e66398927..57fd2b0594 100644
--- a/src/cmd/compile/internal/ssa/_gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go
@@ -549,6 +549,13 @@ func (u *unusedInspector) node(node ast.Node) {
}
}
case *ast.BasicLit:
+ case *ast.CompositeLit:
+ for _, e := range node.Elts {
+ u.node(e)
+ }
+ case *ast.KeyValueExpr:
+ u.node(node.Key)
+ u.node(node.Value)
case *ast.ValueSpec:
u.exprs(node.Values)
default:
@@ -1440,7 +1447,8 @@ func parseValue(val string, arch arch, loc string) (op opData, oparch, typ, auxi
func opHasAuxInt(op opData) bool {
switch op.aux {
case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "UInt8", "Float32", "Float64",
- "SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop":
+ "SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop",
+ "PanicBoundsC", "PanicBoundsCC":
return true
}
return false
@@ -1449,7 +1457,7 @@ func opHasAuxInt(op opData) bool {
func opHasAux(op opData) bool {
switch op.aux {
case "String", "Sym", "SymOff", "Call", "CallOff", "SymValAndOff", "Typ", "TypSize",
- "S390XCCMask", "S390XRotateParams":
+ "S390XCCMask", "S390XRotateParams", "PanicBoundsC", "PanicBoundsCC":
return true
}
return false
@@ -1804,6 +1812,10 @@ func (op opData) auxType() string {
return "s390x.CCMask"
case "S390XRotateParams":
return "s390x.RotateParams"
+ case "PanicBoundsC":
+ return "PanicBoundsC"
+ case "PanicBoundsCC":
+ return "PanicBoundsCC"
default:
return "invalid"
}
@@ -1844,6 +1856,8 @@ func (op opData) auxIntType() string {
return "flagConstant"
case "ARM64BitField":
return "arm64BitField"
+ case "PanicBoundsC", "PanicBoundsCC":
+ return "int64"
default:
return "invalid"
}
diff --git a/src/cmd/compile/internal/ssa/biasedsparsemap.go b/src/cmd/compile/internal/ssa/biasedsparsemap.go
index 948aef9a9b..3032309b7a 100644
--- a/src/cmd/compile/internal/ssa/biasedsparsemap.go
+++ b/src/cmd/compile/internal/ssa/biasedsparsemap.go
@@ -56,19 +56,23 @@ func (s *biasedSparseMap) contains(x uint) bool {
return s.s.contains(ID(int(x) - s.first))
}
-// get returns the value s maps for key x, or -1 if
-// x is not mapped or is out of range for s.
-func (s *biasedSparseMap) get(x uint) int32 {
+// get returns the value s maps for key x and true, or
+// 0/false if x is not mapped or is out of range for s.
+func (s *biasedSparseMap) get(x uint) (int32, bool) {
if s == nil || s.s == nil {
- return -1
+ return 0, false
}
if int(x) < s.first {
- return -1
+ return 0, false
}
if int(x) >= s.cap() {
- return -1
+ return 0, false
}
- return s.s.get(ID(int(x) - s.first))
+ k := ID(int(x) - s.first)
+ if !s.s.contains(k) {
+ return 0, false
+ }
+ return s.s.get(k)
}
// getEntry returns the i'th key and value stored in s,
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
index cb6788cd95..f33c9bc87b 100644
--- a/src/cmd/compile/internal/ssa/check.go
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -215,6 +215,9 @@ func checkFunc(f *Func) {
f.Fatalf("bad FlagConstant AuxInt value for %v", v)
}
canHaveAuxInt = true
+ case auxPanicBoundsC, auxPanicBoundsCC:
+ canHaveAux = true
+ canHaveAuxInt = true
default:
f.Fatalf("unknown aux type for %s", v.Op)
}
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
index e9500a24ed..1f47362583 100644
--- a/src/cmd/compile/internal/ssa/compile.go
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -473,11 +473,11 @@ var passes = [...]pass{
{name: "expand calls", fn: expandCalls, required: true},
{name: "decompose builtin", fn: postExpandCallsDecompose, required: true},
{name: "softfloat", fn: softfloat, required: true},
+ {name: "branchelim", fn: branchelim},
{name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
{name: "dead auto elim", fn: elimDeadAutosGeneric},
{name: "sccp", fn: sccp},
{name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
- {name: "branchelim", fn: branchelim},
{name: "late fuse", fn: fuseLate},
{name: "check bce", fn: checkbce},
{name: "dse", fn: dse},
@@ -583,6 +583,10 @@ var passOrder = [...]constraint{
{"late fuse", "memcombine"},
// memcombine is a arch-independent pass.
{"memcombine", "lower"},
+ // late opt transform some CondSelects into math.
+ {"branchelim", "late opt"},
+ // ranchelim is an arch-independent pass.
+ {"branchelim", "lower"},
}
func init() {
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
index 0299e808c6..1ee94b4ee2 100644
--- a/src/cmd/compile/internal/ssa/config.go
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -291,6 +291,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo
c.RegSize = 8
c.lowerBlock = rewriteBlockLOONG64
c.lowerValue = rewriteValueLOONG64
+ c.lateLowerBlock = rewriteBlockLOONG64latelower
+ c.lateLowerValue = rewriteValueLOONG64latelower
c.registers = registersLOONG64[:]
c.gpRegMask = gpRegMaskLOONG64
c.fpRegMask = fpRegMaskLOONG64
@@ -570,6 +572,43 @@ func (c *Config) buildRecipes(arch string) {
return m.Block.NewValue2I(m.Pos, OpARM64SUBshiftLL, m.Type, int64(i), x, y)
})
}
+ case "loong64":
+ // - multiply is 4 cycles.
+ // - add/sub/shift are 1 cycle.
+ // On loong64, using a multiply also needs to load the constant into a register.
+ // TODO: figure out a happy medium.
+ mulCost = 45
+
+ // add
+ r(1, 1, 10,
+ func(m, x, y *Value) *Value {
+ return m.Block.NewValue2(m.Pos, OpLOONG64ADDV, m.Type, x, y)
+ })
+ // neg
+ r(-1, 0, 10,
+ func(m, x, y *Value) *Value {
+ return m.Block.NewValue1(m.Pos, OpLOONG64NEGV, m.Type, x)
+ })
+ // sub
+ r(1, -1, 10,
+ func(m, x, y *Value) *Value {
+ return m.Block.NewValue2(m.Pos, OpLOONG64SUBV, m.Type, x, y)
+ })
+
+ // regular shifts
+ for i := 1; i < 64; i++ {
+ c := 10
+ if i == 1 {
+ // Prefer x<<1 over x+x.
+ // Note that we eventually reverse this decision in LOONG64latelower.rules,
+ // but this makes shift combining rules in LOONG64.rules simpler.
+ c--
+ }
+ r(1<<i, 0, c,
+ func(m, x, y *Value) *Value {
+ return m.Block.NewValue1I(m.Pos, OpLOONG64SLLVconst, m.Type, int64(i), x)
+ })
+ }
}
c.mulRecipes = map[int64]mulRecipe{}
@@ -636,17 +675,58 @@ func (c *Config) buildRecipes(arch string) {
}
}
+ // Currently we only process 3 linear combination instructions for loong64.
+ if arch == "loong64" {
+ // Three-instruction recipes.
+ // D: The first and the second are all single-instruction recipes, and they are also the third's inputs.
+ // E: The first single-instruction is the second's input, and the second is the third's input.
+
+ // D
+ for _, first := range linearCombos {
+ for _, second := range linearCombos {
+ for _, third := range linearCombos {
+ x := third.a*(first.a+first.b) + third.b*(second.a+second.b)
+ cost := first.cost + second.cost + third.cost
+ old := c.mulRecipes[x]
+ if (old.build == nil || cost < old.cost) && cost < mulCost {
+ c.mulRecipes[x] = mulRecipe{cost: cost, build: func(m, v *Value) *Value {
+ v1 := first.build(m, v, v)
+ v2 := second.build(m, v, v)
+ return third.build(m, v1, v2)
+ }}
+ }
+ }
+ }
+ }
+
+ // E
+ for _, first := range linearCombos {
+ for _, second := range linearCombos {
+ for _, third := range linearCombos {
+ x := third.a*(second.a*(first.a+first.b)+second.b) + third.b
+ cost := first.cost + second.cost + third.cost
+ old := c.mulRecipes[x]
+ if (old.build == nil || cost < old.cost) && cost < mulCost {
+ c.mulRecipes[x] = mulRecipe{cost: cost, build: func(m, v *Value) *Value {
+ v1 := first.build(m, v, v)
+ v2 := second.build(m, v1, v)
+ return third.build(m, v2, v)
+ }}
+ }
+ }
+ }
+ }
+ }
+
// These cases should be handled specially by rewrite rules.
// (Otherwise v * 1 == (neg (neg v)))
delete(c.mulRecipes, 0)
delete(c.mulRecipes, 1)
- // Currently we assume that it doesn't help to do 3 linear
- // combination instructions.
-
// Currently:
// len(c.mulRecipes) == 5984 on arm64
// 680 on amd64
+ // 5984 on loong64
// This function takes ~2.5ms on arm64.
//println(len(c.mulRecipes))
}
diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go
index 1aa0f9e296..aa85097c29 100644
--- a/src/cmd/compile/internal/ssa/deadcode.go
+++ b/src/cmd/compile/internal/ssa/deadcode.go
@@ -257,7 +257,7 @@ func deadcode(f *Func) {
// Find new homes for lost lines -- require earliest in data flow with same line that is also in same block
for i := len(order) - 1; i >= 0; i-- {
w := order[i]
- if j := pendingLines.get(w.Pos); j > -1 && f.Blocks[j] == w.Block {
+ if j, ok := pendingLines.get(w.Pos); ok && f.Blocks[j] == w.Block {
w.Pos = w.Pos.WithIsStmt()
pendingLines.remove(w.Pos)
}
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
index f8c69dc698..9e67e83399 100644
--- a/src/cmd/compile/internal/ssa/deadstore.go
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -118,7 +118,8 @@ func dse(f *Func) {
ptr = la
}
}
- sr := shadowRange(shadowed.get(ptr.ID))
+ srNum, _ := shadowed.get(ptr.ID)
+ sr := shadowRange(srNum)
if sr.contains(off, off+sz) {
// Modify the store/zero into a copy of the memory state,
// effectively eliding the store operation.
@@ -156,9 +157,7 @@ func dse(f *Func) {
// A shadowRange encodes a set of byte offsets [lo():hi()] from
// a given pointer that will be written to later in the block.
-// A zero shadowRange encodes an empty shadowed range (and so
-// does a -1 shadowRange, which is what sparsemap.get returns
-// on a failed lookup).
+// A zero shadowRange encodes an empty shadowed range.
type shadowRange int32
func (sr shadowRange) lo() int64 {
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index aa503eda87..e92b37fb7b 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -41,6 +41,9 @@ type FuncDebug struct {
RegOutputParams []*ir.Name
// Variable declarations that were removed during optimization
OptDcl []*ir.Name
+ // The ssa.Func.EntryID value, used to build location lists for
+ // return values promoted to heap in later DWARF generation.
+ EntryID ID
// Filled in by the user. Translates Block and Value ID to PC.
//
@@ -1645,13 +1648,13 @@ func readPtr(ctxt *obj.Link, buf []byte) uint64 {
}
-// setupLocList creates the initial portion of a location list for a
+// SetupLocList creates the initial portion of a location list for a
// user variable. It emits the encoded start/end of the range and a
// placeholder for the size. Return value is the new list plus the
// slot in the list holding the size (to be updated later).
-func setupLocList(ctxt *obj.Link, f *Func, list []byte, st, en ID) ([]byte, int) {
- start, startOK := encodeValue(ctxt, f.Entry.ID, st)
- end, endOK := encodeValue(ctxt, f.Entry.ID, en)
+func SetupLocList(ctxt *obj.Link, entryID ID, list []byte, st, en ID) ([]byte, int) {
+ start, startOK := encodeValue(ctxt, entryID, st)
+ end, endOK := encodeValue(ctxt, entryID, en)
if !startOK || !endOK {
// This could happen if someone writes a function that uses
// >65K values on a 32-bit platform. Hopefully a degraded debugging
@@ -1800,7 +1803,6 @@ func isNamedRegParam(p abi.ABIParamAssignment) bool {
// appropriate for the ".closureptr" compiler-synthesized variable
// needed by the debugger for range func bodies.
func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
-
needCloCtx := f.CloSlot != nil
pri := f.ABISelf.ABIAnalyzeFuncType(f.Type)
@@ -1911,7 +1913,7 @@ func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, sta
// Param is arriving in one or more registers. We need a 2-element
// location expression for it. First entry in location list
// will correspond to lifetime in input registers.
- list, sizeIdx := setupLocList(ctxt, f, rval.LocationLists[pidx],
+ list, sizeIdx := SetupLocList(ctxt, f.Entry.ID, rval.LocationLists[pidx],
BlockStart.ID, afterPrologVal)
if list == nil {
pidx++
@@ -1961,7 +1963,7 @@ func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, sta
// Second entry in the location list will be the stack home
// of the param, once it has been spilled. Emit that now.
- list, sizeIdx = setupLocList(ctxt, f, list,
+ list, sizeIdx = SetupLocList(ctxt, f.Entry.ID, list,
afterPrologVal, FuncEnd.ID)
if list == nil {
pidx++
diff --git a/src/cmd/compile/internal/ssa/debug_lines_test.go b/src/cmd/compile/internal/ssa/debug_lines_test.go
index 857cce785f..79dbd91c2f 100644
--- a/src/cmd/compile/internal/ssa/debug_lines_test.go
+++ b/src/cmd/compile/internal/ssa/debug_lines_test.go
@@ -115,6 +115,34 @@ func TestDebugLines_53456(t *testing.T) {
testDebugLinesDefault(t, "-N -l", "b53456.go", "(*T).Inc", []int{15, 16, 17, 18}, true)
}
+func TestDebugLines_74576(t *testing.T) {
+ unixOnly(t)
+
+ switch testGoArch() {
+ default:
+ // Failed on linux/riscv64 (issue 74669), but conservatively
+ // skip many architectures like several other tests here.
+ t.Skip("skipped for many architectures")
+
+ case "arm64", "amd64", "loong64":
+ tests := []struct {
+ file string
+ wantStmts []int
+ }{
+ {"i74576a.go", []int{12, 13, 13, 14}},
+ {"i74576b.go", []int{12, 13, 13, 14}},
+ {"i74576c.go", []int{12, 13, 13, 14}},
+ }
+ t.Parallel()
+ for _, test := range tests {
+ t.Run(test.file, func(t *testing.T) {
+ t.Parallel()
+ testDebugLines(t, "-N -l", test.file, "main", test.wantStmts, false)
+ })
+ }
+ }
+}
+
func compileAndDump(t *testing.T, file, function, moreGCFlags string) []byte {
testenv.MustHaveGoBuild(t)
@@ -223,6 +251,9 @@ func testInlineStack(t *testing.T, file, function string, wantStacks [][]int) {
// then verifies that the statement-marked lines in that file are the same as those in wantStmts
// These files must all be short because this is super-fragile.
// "go build" is run in a temporary directory that is normally deleted, unless -test.v
+//
+// TODO: the tests calling this are somewhat expensive; perhaps more tests can be marked t.Parallel,
+// or perhaps the mechanism here can be made more efficient.
func testDebugLines(t *testing.T, gcflags, file, function string, wantStmts []int, ignoreRepeats bool) {
dumpBytes := compileAndDump(t, file, function, gcflags)
dump := bufio.NewScanner(bytes.NewReader(dumpBytes))
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
index 9e46182a4c..302626f902 100644
--- a/src/cmd/compile/internal/ssa/expand_calls.go
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -243,11 +243,8 @@ func (x *expandState) rewriteFuncResults(v *Value, b *Block, aux *AuxCall) {
if len(aRegs) > 0 {
result = &allResults
} else {
- if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
- addr := a.Args[0]
- if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.NameOfResult(i) {
- continue // Self move to output parameter
- }
+ if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr && a.Args[0].Aux == aux.NameOfResult(i) {
+ continue // Self move to output parameter
}
}
rc.init(aRegs, aux.abiInfo, result, auxBase, auxOffset)
diff --git a/src/cmd/compile/internal/ssa/likelyadjust.go b/src/cmd/compile/internal/ssa/likelyadjust.go
index 1dfb53d355..06b9414a3f 100644
--- a/src/cmd/compile/internal/ssa/likelyadjust.go
+++ b/src/cmd/compile/internal/ssa/likelyadjust.go
@@ -12,18 +12,15 @@ type loop struct {
header *Block // The header node of this (reducible) loop
outer *loop // loop containing this loop
- // By default, children, exits, and depth are not initialized.
- children []*loop // loops nested directly within this loop. Initialized by assembleChildren().
- exits []*Block // exits records blocks reached by exits from this loop. Initialized by findExits().
-
// Next three fields used by regalloc and/or
// aid in computation of inner-ness and list of blocks.
nBlocks int32 // Number of blocks in this loop but not within inner loops
- depth int16 // Nesting depth of the loop; 1 is outermost. Initialized by calculateDepths().
+ depth int16 // Nesting depth of the loop; 1 is outermost.
isInner bool // True if never discovered to contain a loop
- // register allocation uses this.
- containsUnavoidableCall bool // True if all paths through the loop have a call
+ // True if all paths through the loop have a call.
+ // Computed and used by regalloc; stored here for convenience.
+ containsUnavoidableCall bool
}
// outerinner records that outer contains inner
@@ -49,18 +46,6 @@ func (sdom SparseTree) outerinner(outer, inner *loop) {
outer.isInner = false
}
-func checkContainsCall(bb *Block) bool {
- if bb.Kind == BlockDefer {
- return true
- }
- for _, v := range bb.Values {
- if opcodeTable[v.Op].call {
- return true
- }
- }
- return false
-}
-
type loopnest struct {
f *Func
b2l []*loop
@@ -68,9 +53,6 @@ type loopnest struct {
sdom SparseTree
loops []*loop
hasIrreducible bool // TODO current treatment of irreducible loops is very flaky, if accurate loops are needed, must punt at function level.
-
- // Record which of the lazily initialized fields have actually been initialized.
- initializedChildren, initializedDepth, initializedExits bool
}
const (
@@ -355,91 +337,59 @@ func loopnestfor(f *Func) *loopnest {
visited[b.ID] = true
}
- ln := &loopnest{f: f, b2l: b2l, po: po, sdom: sdom, loops: loops, hasIrreducible: sawIrred}
-
- // Calculate containsUnavoidableCall for regalloc
- dominatedByCall := f.Cache.allocBoolSlice(f.NumBlocks())
- defer f.Cache.freeBoolSlice(dominatedByCall)
- for _, b := range po {
- if checkContainsCall(b) {
- dominatedByCall[b.ID] = true
- }
- }
- // Run dfs to find path through the loop that avoids all calls.
- // Such path either escapes loop or return back to header.
- // It isn't enough to have exit not dominated by any call, for example:
- // ... some loop
- // call1 call2
- // \ /
- // exit
- // ...
- // exit is not dominated by any call, but we don't have call-free path to it.
+ // Compute depths.
for _, l := range loops {
- // Header contains call.
- if dominatedByCall[l.header.ID] {
- l.containsUnavoidableCall = true
+ if l.depth != 0 {
+ // Already computed because it is an ancestor of
+ // a previous loop.
continue
}
- callfreepath := false
- tovisit := make([]*Block, 0, len(l.header.Succs))
- // Push all non-loop non-exit successors of header onto toVisit.
- for _, s := range l.header.Succs {
- nb := s.Block()
- // This corresponds to loop with zero iterations.
- if !l.iterationEnd(nb, b2l) {
- tovisit = append(tovisit, nb)
+ // Find depth by walking up the loop tree.
+ d := int16(0)
+ for x := l; x != nil; x = x.outer {
+ if x.depth != 0 {
+ d += x.depth
+ break
}
+ d++
}
- for len(tovisit) > 0 {
- cur := tovisit[len(tovisit)-1]
- tovisit = tovisit[:len(tovisit)-1]
- if dominatedByCall[cur.ID] {
- continue
- }
- // Record visited in dominatedByCall.
- dominatedByCall[cur.ID] = true
- for _, s := range cur.Succs {
- nb := s.Block()
- if l.iterationEnd(nb, b2l) {
- callfreepath = true
- }
- if !dominatedByCall[nb.ID] {
- tovisit = append(tovisit, nb)
- }
-
- }
- if callfreepath {
+ // Set depth for every ancestor.
+ for x := l; x != nil; x = x.outer {
+ if x.depth != 0 {
break
}
+ x.depth = d
+ d--
+ }
+ }
+ // Double-check depths.
+ for _, l := range loops {
+ want := int16(1)
+ if l.outer != nil {
+ want = l.outer.depth + 1
}
- if !callfreepath {
- l.containsUnavoidableCall = true
+ if l.depth != want {
+ l.header.Fatalf("bad depth calculation for loop %s: got %d want %d", l.header, l.depth, want)
}
}
+ ln := &loopnest{f: f, b2l: b2l, po: po, sdom: sdom, loops: loops, hasIrreducible: sawIrred}
+
// Curious about the loopiness? "-d=ssa/likelyadjust/stats"
if f.pass != nil && f.pass.stats > 0 && len(loops) > 0 {
- ln.assembleChildren()
- ln.calculateDepths()
- ln.findExits()
// Note stats for non-innermost loops are slightly flawed because
// they don't account for inner loop exits that span multiple levels.
for _, l := range loops {
- x := len(l.exits)
- cf := 0
- if !l.containsUnavoidableCall {
- cf = 1
- }
inner := 0
if l.isInner {
inner++
}
- f.LogStat("loopstats:",
- l.depth, "depth", x, "exits",
- inner, "is_inner", cf, "always_calls", l.nBlocks, "n_blocks")
+ f.LogStat("loopstats in "+f.Name+":",
+ l.depth, "depth",
+ inner, "is_inner", l.nBlocks, "n_blocks")
}
}
@@ -465,62 +415,6 @@ func loopnestfor(f *Func) *loopnest {
return ln
}
-// assembleChildren initializes the children field of each
-// loop in the nest. Loop A is a child of loop B if A is
-// directly nested within B (based on the reducible-loops
-// detection above)
-func (ln *loopnest) assembleChildren() {
- if ln.initializedChildren {
- return
- }
- for _, l := range ln.loops {
- if l.outer != nil {
- l.outer.children = append(l.outer.children, l)
- }
- }
- ln.initializedChildren = true
-}
-
-// calculateDepths uses the children field of loops
-// to determine the nesting depth (outer=1) of each
-// loop. This is helpful for finding exit edges.
-func (ln *loopnest) calculateDepths() {
- if ln.initializedDepth {
- return
- }
- ln.assembleChildren()
- for _, l := range ln.loops {
- if l.outer == nil {
- l.setDepth(1)
- }
- }
- ln.initializedDepth = true
-}
-
-// findExits uses loop depth information to find the
-// exits from a loop.
-func (ln *loopnest) findExits() {
- if ln.initializedExits {
- return
- }
- ln.calculateDepths()
- b2l := ln.b2l
- for _, b := range ln.po {
- l := b2l[b.ID]
- if l != nil && len(b.Succs) == 2 {
- sl := b2l[b.Succs[0].b.ID]
- if recordIfExit(l, sl, b.Succs[0].b) {
- continue
- }
- sl = b2l[b.Succs[1].b.ID]
- if recordIfExit(l, sl, b.Succs[1].b) {
- continue
- }
- }
- }
- ln.initializedExits = true
-}
-
// depth returns the loop nesting level of block b.
func (ln *loopnest) depth(b ID) int16 {
if l := ln.b2l[b]; l != nil {
@@ -528,39 +422,3 @@ func (ln *loopnest) depth(b ID) int16 {
}
return 0
}
-
-// recordIfExit checks sl (the loop containing b) to see if it
-// is outside of loop l, and if so, records b as an exit block
-// from l and returns true.
-func recordIfExit(l, sl *loop, b *Block) bool {
- if sl != l {
- if sl == nil || sl.depth <= l.depth {
- l.exits = append(l.exits, b)
- return true
- }
- // sl is not nil, and is deeper than l
- // it's possible for this to be a goto into an irreducible loop made from gotos.
- for sl.depth > l.depth {
- sl = sl.outer
- }
- if sl != l {
- l.exits = append(l.exits, b)
- return true
- }
- }
- return false
-}
-
-func (l *loop) setDepth(d int16) {
- l.depth = d
- for _, c := range l.children {
- c.setDepth(d + 1)
- }
-}
-
-// iterationEnd checks if block b ends iteration of loop l.
-// Ending iteration means either escaping to outer loop/code or
-// going back to header
-func (l *loop) iterationEnd(b *Block, b2l []*loop) bool {
- return b == l.header || b2l[b.ID] == nil || (b2l[b.ID] != l && b2l[b.ID].depth <= l.depth)
-}
diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go
index dd1f39dbef..aa6cc48cac 100644
--- a/src/cmd/compile/internal/ssa/loopbce.go
+++ b/src/cmd/compile/internal/ssa/loopbce.go
@@ -37,19 +37,20 @@ type indVar struct {
// - the minimum bound
// - the increment value
// - the "next" value (SSA value that is Phi'd into the induction variable every loop)
+// - the header's edge returning from the body
//
// Currently, we detect induction variables that match (Phi min nxt),
// with nxt being (Add inc ind).
// If it can't parse the induction variable correctly, it returns (nil, nil, nil).
-func parseIndVar(ind *Value) (min, inc, nxt *Value) {
+func parseIndVar(ind *Value) (min, inc, nxt *Value, loopReturn Edge) {
if ind.Op != OpPhi {
return
}
if n := ind.Args[0]; (n.Op == OpAdd64 || n.Op == OpAdd32 || n.Op == OpAdd16 || n.Op == OpAdd8) && (n.Args[0] == ind || n.Args[1] == ind) {
- min, nxt = ind.Args[1], n
+ min, nxt, loopReturn = ind.Args[1], n, ind.Block.Preds[0]
} else if n := ind.Args[1]; (n.Op == OpAdd64 || n.Op == OpAdd32 || n.Op == OpAdd16 || n.Op == OpAdd8) && (n.Args[0] == ind || n.Args[1] == ind) {
- min, nxt = ind.Args[0], n
+ min, nxt, loopReturn = ind.Args[0], n, ind.Block.Preds[1]
} else {
// Not a recognized induction variable.
return
@@ -111,13 +112,13 @@ func findIndVar(f *Func) []indVar {
// See if this is really an induction variable
less := true
- init, inc, nxt := parseIndVar(ind)
+ init, inc, nxt, loopReturn := parseIndVar(ind)
if init == nil {
// We failed to parse the induction variable. Before punting, we want to check
// whether the control op was written with the induction variable on the RHS
// instead of the LHS. This happens for the downwards case, like:
// for i := len(n)-1; i >= 0; i--
- init, inc, nxt = parseIndVar(limit)
+ init, inc, nxt, loopReturn = parseIndVar(limit)
if init == nil {
// No recognized induction variable on either operand
continue
@@ -145,6 +146,20 @@ func findIndVar(f *Func) []indVar {
continue
}
+ // startBody is the edge that eventually returns to the loop header.
+ var startBody Edge
+ switch {
+ case sdom.IsAncestorEq(b.Succs[0].b, loopReturn.b):
+ startBody = b.Succs[0]
+ case sdom.IsAncestorEq(b.Succs[1].b, loopReturn.b):
+ // if x { goto exit } else { goto entry } is identical to if !x { goto entry } else { goto exit }
+ startBody = b.Succs[1]
+ less = !less
+ inclusive = !inclusive
+ default:
+ continue
+ }
+
// Increment sign must match comparison direction.
// When incrementing, the termination comparison must be ind </<= limit.
// When decrementing, the termination comparison must be ind >/>= limit.
@@ -172,14 +187,14 @@ func findIndVar(f *Func) []indVar {
// First condition: loop entry has a single predecessor, which
// is the header block. This implies that b.Succs[0] is
// reached iff ind < limit.
- if len(b.Succs[0].b.Preds) != 1 {
- // b.Succs[1] must exit the loop.
+ if len(startBody.b.Preds) != 1 {
+ // the other successor must exit the loop.
continue
}
- // Second condition: b.Succs[0] dominates nxt so that
+ // Second condition: startBody.b dominates nxt so that
// nxt is computed when inc < limit.
- if !sdom.IsAncestorEq(b.Succs[0].b, nxt.Block) {
+ if !sdom.IsAncestorEq(startBody.b, nxt.Block) {
// inc+ind can only be reached through the branch that enters the loop.
continue
}
@@ -298,7 +313,7 @@ func findIndVar(f *Func) []indVar {
nxt: nxt,
min: min,
max: max,
- entry: b.Succs[0].b,
+ entry: startBody.b,
flags: flags,
})
b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max)
diff --git a/src/cmd/compile/internal/ssa/looprotate.go b/src/cmd/compile/internal/ssa/looprotate.go
index f32125576f..e97321019b 100644
--- a/src/cmd/compile/internal/ssa/looprotate.go
+++ b/src/cmd/compile/internal/ssa/looprotate.go
@@ -4,6 +4,10 @@
package ssa
+import (
+ "slices"
+)
+
// loopRotate converts loops with a check-loop-condition-at-beginning
// to loops with a check-loop-condition-at-end.
// This helps loops avoid extra unnecessary jumps.
@@ -41,10 +45,64 @@ func loopRotate(f *Func) {
// Map from block ID to the moving blocks that should
// come right after it.
+ // If a block, which has its ID present in keys of the 'after' map,
+ // occurs in some other block's 'after' list, that represents whole
+ // nested loop, e.g. consider an inner loop I nested into an outer
+ // loop O. It and Ot are corresponding top block for these loops
+ // chosen by our algorithm, and It is in the Ot's 'after' list.
+ //
+ // Before: After:
+ //
+ // e e
+ // │ │
+ // │ │Ot ◄───┐
+ // ▼ ▼▼ │
+ // ┌───Oh ◄────┐ ┌─┬─Oh │
+ // │ │ │ │ │ │
+ // │ │ │ │ │ It◄───┐ │
+ // │ ▼ │ │ │ ▼ │ │
+ // │ ┌─Ih◄───┐ │ │ └►Ih │ │
+ // │ │ │ │ │ │ ┌─┤ │ │
+ // │ │ ▼ │ │ │ │ ▼ │ │
+ // │ │ Ib │ │ │ │ Ib │ │
+ // │ │ └─►It─┘ │ │ │ └─────┘ │
+ // │ │ │ │ │ │
+ // │ └►Ie │ │ └►Ie │
+ // │ └─►Ot───┘ │ └───────┘
+ // │ │
+ // └──►Oe └──►Oe
+ //
+ // We build the 'after' lists for each of the top blocks Ot and It:
+ // after[Ot]: Oh, It, Ie
+ // after[It]: Ih, Ib
after := map[ID][]*Block{}
+ // Map from loop header ID to the new top block for the loop.
+ tops := map[ID]*Block{}
+
+ // Order loops to rotate any child loop before adding its top block
+ // to the parent loop's 'after' list.
+ loopOrder := f.Cache.allocIntSlice(len(loopnest.loops))
+ for i := range loopOrder {
+ loopOrder[i] = i
+ }
+ defer f.Cache.freeIntSlice(loopOrder)
+ slices.SortFunc(loopOrder, func(i, j int) int {
+ di := loopnest.loops[i].depth
+ dj := loopnest.loops[j].depth
+ switch {
+ case di > dj:
+ return -1
+ case di < dj:
+ return 1
+ default:
+ return 0
+ }
+ })
+
// Check each loop header and decide if we want to move it.
- for _, loop := range loopnest.loops {
+ for _, loopIdx := range loopOrder {
+ loop := loopnest.loops[loopIdx]
b := loop.header
var p *Block // b's in-loop predecessor
for _, e := range b.Preds {
@@ -59,6 +117,7 @@ func loopRotate(f *Func) {
if p == nil {
continue
}
+ tops[loop.header.ID] = p
p.Hotness |= HotInitial
if f.IsPgoHot {
p.Hotness |= HotPgo
@@ -80,8 +139,10 @@ func loopRotate(f *Func) {
if nextb == p { // original loop predecessor is next
break
}
- if loopnest.b2l[nextb.ID] == loop {
- after[p.ID] = append(after[p.ID], nextb)
+ if bloop := loopnest.b2l[nextb.ID]; bloop != nil {
+ if bloop == loop || bloop.outer == loop && tops[bloop.header.ID] == nextb {
+ after[p.ID] = append(after[p.ID], nextb)
+ }
}
b = nextb
}
@@ -90,7 +151,7 @@ func loopRotate(f *Func) {
f.Blocks[idToIdx[p.ID]] = loop.header
idToIdx[loop.header.ID], idToIdx[p.ID] = idToIdx[p.ID], idToIdx[loop.header.ID]
- // Place b after p.
+ // Place loop blocks after p.
for _, b := range after[p.ID] {
move[b.ID] = struct{}{}
}
@@ -107,16 +168,23 @@ func loopRotate(f *Func) {
oldOrder := f.Cache.allocBlockSlice(len(f.Blocks))
defer f.Cache.freeBlockSlice(oldOrder)
copy(oldOrder, f.Blocks)
+ var moveBlocks func(bs []*Block)
+ moveBlocks = func(blocks []*Block) {
+ for _, a := range blocks {
+ f.Blocks[j] = a
+ j++
+ if nextBlocks, ok := after[a.ID]; ok {
+ moveBlocks(nextBlocks)
+ }
+ }
+ }
for _, b := range oldOrder {
if _, ok := move[b.ID]; ok {
continue
}
f.Blocks[j] = b
j++
- for _, a := range after[b.ID] {
- f.Blocks[j] = a
- j++
- }
+ moveBlocks(after[b.ID])
}
if j != len(oldOrder) {
f.Fatalf("bad reordering in looprotate")
diff --git a/src/cmd/compile/internal/ssa/looprotate_test.go b/src/cmd/compile/internal/ssa/looprotate_test.go
new file mode 100644
index 0000000000..8e7cfc343f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/looprotate_test.go
@@ -0,0 +1,65 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestLoopRotateNested(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("constTrue", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto("outerHeader")),
+ Bloc("outerHeader",
+ If("constTrue", "outerBody", "outerExit")),
+ Bloc("outerBody",
+ Goto("innerHeader")),
+ Bloc("innerHeader",
+ If("constTrue", "innerBody", "innerExit")),
+ Bloc("innerBody",
+ Goto("innerTop")),
+ Bloc("innerTop",
+ Goto("innerHeader")),
+ Bloc("innerExit",
+ Goto("outerTop")),
+ Bloc("outerTop",
+ Goto("outerHeader")),
+ Bloc("outerExit",
+ Exit("mem")))
+
+ blockName := make([]string, len(fun.f.Blocks)+1)
+ for name, block := range fun.blocks {
+ blockName[block.ID] = name
+ }
+
+ CheckFunc(fun.f)
+ loopRotate(fun.f)
+ CheckFunc(fun.f)
+
+ // Verify the resulting block order
+ expected := []string{
+ "entry",
+ "outerTop",
+ "outerHeader",
+ "outerBody",
+ "innerTop",
+ "innerHeader",
+ "innerBody",
+ "innerExit",
+ "outerExit",
+ }
+ if len(expected) != len(fun.f.Blocks) {
+ t.Fatalf("expected %d blocks, found %d", len(expected), len(fun.f.Blocks))
+ }
+ for i, b := range fun.f.Blocks {
+ if expected[i] != blockName[b.ID] {
+ t.Errorf("position %d: expected %s, found %s", i, expected[i], blockName[b.ID])
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
index 9d43ec1991..467c7514ee 100644
--- a/src/cmd/compile/internal/ssa/nilcheck.go
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -221,7 +221,8 @@ func nilcheckelim2(f *Func) {
// Iteration order means that first nilcheck in the chain wins, others
// are bumped into the ordinary statement preservation algorithm.
- u := b.Values[unnecessary.get(v.Args[0].ID)]
+ uid, _ := unnecessary.get(v.Args[0].ID)
+ u := b.Values[uid]
if !u.Type.IsMemory() && !u.Pos.SameFileAndLine(v.Pos) {
if u.Pos.IsStmt() == src.PosIsStmt {
pendingLines.add(u.Pos)
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
index 2a3356bc5c..d5c7394a26 100644
--- a/src/cmd/compile/internal/ssa/op.go
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -6,10 +6,12 @@ package ssa
import (
"cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"fmt"
+ rtabi "internal/abi"
"strings"
)
@@ -68,6 +70,10 @@ type regInfo struct {
// clobbers encodes the set of registers that are overwritten by
// the instruction (other than the output registers).
clobbers regMask
+ // Instruction clobbers the register containing input 0.
+ clobbersArg0 bool
+ // Instruction clobbers the register containing input 1.
+ clobbersArg1 bool
// outputs is the same as inputs, but for the outputs of the instruction.
outputs []outputInfo
}
@@ -365,6 +371,9 @@ const (
auxCall // aux is a *ssa.AuxCall
auxCallOff // aux is a *ssa.AuxCall, AuxInt is int64 param (in+out) size
+ auxPanicBoundsC // constant for a bounds failure
+ auxPanicBoundsCC // two constants for a bounds failure
+
// architecture specific aux types
auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt
auxS390XRotateParams // aux is a s390x rotate parameters object encoding start bit, end bit and rotate amount
@@ -523,6 +532,50 @@ func boundsABI(b int64) int {
}
}
+// Returns the bounds error code needed by the runtime, and
+// whether the x field is signed.
+func (b BoundsKind) Code() (rtabi.BoundsErrorCode, bool) {
+ switch b {
+ case BoundsIndex:
+ return rtabi.BoundsIndex, true
+ case BoundsIndexU:
+ return rtabi.BoundsIndex, false
+ case BoundsSliceAlen:
+ return rtabi.BoundsSliceAlen, true
+ case BoundsSliceAlenU:
+ return rtabi.BoundsSliceAlen, false
+ case BoundsSliceAcap:
+ return rtabi.BoundsSliceAcap, true
+ case BoundsSliceAcapU:
+ return rtabi.BoundsSliceAcap, false
+ case BoundsSliceB:
+ return rtabi.BoundsSliceB, true
+ case BoundsSliceBU:
+ return rtabi.BoundsSliceB, false
+ case BoundsSlice3Alen:
+ return rtabi.BoundsSlice3Alen, true
+ case BoundsSlice3AlenU:
+ return rtabi.BoundsSlice3Alen, false
+ case BoundsSlice3Acap:
+ return rtabi.BoundsSlice3Acap, true
+ case BoundsSlice3AcapU:
+ return rtabi.BoundsSlice3Acap, false
+ case BoundsSlice3B:
+ return rtabi.BoundsSlice3B, true
+ case BoundsSlice3BU:
+ return rtabi.BoundsSlice3B, false
+ case BoundsSlice3C:
+ return rtabi.BoundsSlice3C, true
+ case BoundsSlice3CU:
+ return rtabi.BoundsSlice3C, false
+ case BoundsConvert:
+ return rtabi.BoundsConvert, false
+ default:
+ base.Fatalf("bad bounds kind %d", b)
+ return 0, false
+ }
+}
+
// arm64BitField is the GO type of ARM64BitField auxInt.
// if x is an ARM64BitField, then width=x&0xff, lsb=(x>>8)&0xff, and
// width+lsb<64 for 64-bit variant, width+lsb<32 for 32-bit variant.
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 6dcbec2573..a69612f28a 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -569,12 +569,12 @@ const (
Op386LoweredGetCallerSP
Op386LoweredNilCheck
Op386LoweredWB
- Op386LoweredPanicBoundsA
- Op386LoweredPanicBoundsB
- Op386LoweredPanicBoundsC
- Op386LoweredPanicExtendA
- Op386LoweredPanicExtendB
- Op386LoweredPanicExtendC
+ Op386LoweredPanicBoundsRR
+ Op386LoweredPanicBoundsRC
+ Op386LoweredPanicBoundsCR
+ Op386LoweredPanicBoundsCC
+ Op386LoweredPanicExtendRR
+ Op386LoweredPanicExtendRC
Op386FlagEQ
Op386FlagLT_ULT
Op386FlagLT_UGT
@@ -1051,7 +1051,8 @@ const (
OpAMD64MOVLstoreconstidx4
OpAMD64MOVQstoreconstidx1
OpAMD64MOVQstoreconstidx8
- OpAMD64DUFFZERO
+ OpAMD64LoweredZero
+ OpAMD64LoweredZeroLoop
OpAMD64REPSTOSQ
OpAMD64CALLstatic
OpAMD64CALLtail
@@ -1067,9 +1068,10 @@ const (
OpAMD64LoweredNilCheck
OpAMD64LoweredWB
OpAMD64LoweredHasCPUFeature
- OpAMD64LoweredPanicBoundsA
- OpAMD64LoweredPanicBoundsB
- OpAMD64LoweredPanicBoundsC
+ OpAMD64LoweredPanicBoundsRR
+ OpAMD64LoweredPanicBoundsRC
+ OpAMD64LoweredPanicBoundsCR
+ OpAMD64LoweredPanicBoundsCC
OpAMD64FlagEQ
OpAMD64FlagLT_ULT
OpAMD64FlagLT_UGT
@@ -2478,12 +2480,12 @@ const (
OpARMLoweredGetClosurePtr
OpARMLoweredGetCallerSP
OpARMLoweredGetCallerPC
- OpARMLoweredPanicBoundsA
- OpARMLoweredPanicBoundsB
- OpARMLoweredPanicBoundsC
- OpARMLoweredPanicExtendA
- OpARMLoweredPanicExtendB
- OpARMLoweredPanicExtendC
+ OpARMLoweredPanicBoundsRR
+ OpARMLoweredPanicBoundsRC
+ OpARMLoweredPanicBoundsCR
+ OpARMLoweredPanicBoundsCC
+ OpARMLoweredPanicExtendRR
+ OpARMLoweredPanicExtendRC
OpARMFlagConstant
OpARMInvertFlags
OpARMLoweredWB
@@ -2814,9 +2816,10 @@ const (
OpARM64LoweredAtomicAnd32Variant
OpARM64LoweredAtomicOr32Variant
OpARM64LoweredWB
- OpARM64LoweredPanicBoundsA
- OpARM64LoweredPanicBoundsB
- OpARM64LoweredPanicBoundsC
+ OpARM64LoweredPanicBoundsRR
+ OpARM64LoweredPanicBoundsRC
+ OpARM64LoweredPanicBoundsCR
+ OpARM64LoweredPanicBoundsCC
OpARM64PRFM
OpARM64DMB
OpARM64ZERO
@@ -3018,9 +3021,10 @@ const (
OpLOONG64LoweredGetCallerPC
OpLOONG64LoweredWB
OpLOONG64LoweredPubBarrier
- OpLOONG64LoweredPanicBoundsA
- OpLOONG64LoweredPanicBoundsB
- OpLOONG64LoweredPanicBoundsC
+ OpLOONG64LoweredPanicBoundsRR
+ OpLOONG64LoweredPanicBoundsRC
+ OpLOONG64LoweredPanicBoundsCR
+ OpLOONG64LoweredPanicBoundsCC
OpLOONG64PRELD
OpLOONG64PRELDX
@@ -3134,12 +3138,12 @@ const (
OpMIPSLoweredGetCallerPC
OpMIPSLoweredWB
OpMIPSLoweredPubBarrier
- OpMIPSLoweredPanicBoundsA
- OpMIPSLoweredPanicBoundsB
- OpMIPSLoweredPanicBoundsC
- OpMIPSLoweredPanicExtendA
- OpMIPSLoweredPanicExtendB
- OpMIPSLoweredPanicExtendC
+ OpMIPSLoweredPanicBoundsRR
+ OpMIPSLoweredPanicBoundsRC
+ OpMIPSLoweredPanicBoundsCR
+ OpMIPSLoweredPanicBoundsCC
+ OpMIPSLoweredPanicExtendRR
+ OpMIPSLoweredPanicExtendRC
OpMIPS64ADDV
OpMIPS64ADDVconst
@@ -3266,9 +3270,10 @@ const (
OpMIPS64LoweredGetCallerPC
OpMIPS64LoweredWB
OpMIPS64LoweredPubBarrier
- OpMIPS64LoweredPanicBoundsA
- OpMIPS64LoweredPanicBoundsB
- OpMIPS64LoweredPanicBoundsC
+ OpMIPS64LoweredPanicBoundsRR
+ OpMIPS64LoweredPanicBoundsRC
+ OpMIPS64LoweredPanicBoundsCR
+ OpMIPS64LoweredPanicBoundsCC
OpPPC64ADD
OpPPC64ADDCC
@@ -3637,9 +3642,10 @@ const (
OpRISCV64LoweredGetCallerPC
OpRISCV64LoweredWB
OpRISCV64LoweredPubBarrier
- OpRISCV64LoweredPanicBoundsA
- OpRISCV64LoweredPanicBoundsB
- OpRISCV64LoweredPanicBoundsC
+ OpRISCV64LoweredPanicBoundsRR
+ OpRISCV64LoweredPanicBoundsRC
+ OpRISCV64LoweredPanicBoundsCR
+ OpRISCV64LoweredPanicBoundsCC
OpRISCV64FADDS
OpRISCV64FSUBS
OpRISCV64FMULS
@@ -3708,6 +3714,10 @@ const (
OpS390XLPDFR
OpS390XLNDFR
OpS390XCPSDR
+ OpS390XWFMAXDB
+ OpS390XWFMAXSB
+ OpS390XWFMINDB
+ OpS390XWFMINSB
OpS390XFIDBR
OpS390XFMOVSload
OpS390XFMOVDload
@@ -3890,9 +3900,10 @@ const (
OpS390XLoweredRound32F
OpS390XLoweredRound64F
OpS390XLoweredWB
- OpS390XLoweredPanicBoundsA
- OpS390XLoweredPanicBoundsB
- OpS390XLoweredPanicBoundsC
+ OpS390XLoweredPanicBoundsRR
+ OpS390XLoweredPanicBoundsRC
+ OpS390XLoweredPanicBoundsCR
+ OpS390XLoweredPanicBoundsCC
OpS390XFlagEQ
OpS390XFlagLT
OpS390XFlagGT
@@ -9499,77 +9510,68 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4}, // DX
- {1, 8}, // BX
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 2}, // CX
- {1, 4}, // DX
+ {0, 239}, // AX CX DX BX BP SI DI
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1}, // AX
- {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
},
},
},
{
- name: "LoweredPanicExtendA",
- auxType: auxInt64,
- argLen: 4,
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
call: true,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 64}, // SI
- {1, 4}, // DX
- {2, 8}, // BX
- },
- },
+ reg: regInfo{},
},
{
- name: "LoweredPanicExtendB",
+ name: "LoweredPanicExtendRR",
auxType: auxInt64,
argLen: 4,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 64}, // SI
- {1, 2}, // CX
- {2, 4}, // DX
+ {0, 15}, // AX CX DX BX
+ {1, 15}, // AX CX DX BX
+ {2, 239}, // AX CX DX BX BP SI DI
},
},
},
{
- name: "LoweredPanicExtendC",
- auxType: auxInt64,
- argLen: 4,
+ name: "LoweredPanicExtendRC",
+ auxType: auxPanicBoundsC,
+ argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 64}, // SI
- {1, 1}, // AX
- {2, 2}, // CX
+ {0, 15}, // AX CX DX BX
+ {1, 15}, // AX CX DX BX
},
},
},
@@ -16796,15 +16798,28 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "DUFFZERO",
- auxType: auxInt64,
- argLen: 2,
- unsafePoint: true,
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 128}, // DI
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
},
- clobbers: 128, // DI
+ },
+ },
+ {
+ name: "LoweredZeroLoop",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ needIntTemp: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbersArg0: true,
},
},
{
@@ -16977,42 +16992,47 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4}, // DX
- {1, 8}, // BX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 2}, // CX
- {1, 4}, // DX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1}, // AX
- {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
},
},
},
{
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
+ call: true,
+ reg: regInfo{},
+ },
+ {
name: "FlagEQ",
argLen: 0,
reg: regInfo{},
@@ -37876,77 +37896,68 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4}, // R2
- {1, 8}, // R3
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
+ {1, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 2}, // R1
- {1, 4}, // R2
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1}, // R0
- {1, 2}, // R1
+ {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12
},
},
},
{
- name: "LoweredPanicExtendA",
- auxType: auxInt64,
- argLen: 4,
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
call: true,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 16}, // R4
- {1, 4}, // R2
- {2, 8}, // R3
- },
- },
+ reg: regInfo{},
},
{
- name: "LoweredPanicExtendB",
+ name: "LoweredPanicExtendRR",
auxType: auxInt64,
argLen: 4,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 16}, // R4
- {1, 2}, // R1
- {2, 4}, // R2
+ {0, 15}, // R0 R1 R2 R3
+ {1, 15}, // R0 R1 R2 R3
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
},
},
},
{
- name: "LoweredPanicExtendC",
- auxType: auxInt64,
- argLen: 4,
+ name: "LoweredPanicExtendRC",
+ auxType: auxPanicBoundsC,
+ argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 16}, // R4
- {1, 1}, // R0
- {2, 2}, // R1
+ {0, 15}, // R0 R1 R2 R3
+ {1, 15}, // R0 R1 R2 R3
},
},
},
@@ -42441,42 +42452,47 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4}, // R2
- {1, 8}, // R3
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 2}, // R1
- {1, 4}, // R2
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1}, // R0
- {1, 2}, // R1
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
},
},
},
{
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
+ call: true,
+ reg: regInfo{},
+ },
+ {
name: "PRFM",
auxType: auxInt64,
argLen: 2,
@@ -45208,42 +45224,47 @@ var opcodeTable = [...]opInfo{
reg: regInfo{},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4194304}, // R23
- {1, 8388608}, // R24
+ {0, 524280}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19
+ {1, 524280}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1048576}, // R21
- {1, 4194304}, // R23
+ {0, 524280}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 524288}, // R20
- {1, 1048576}, // R21
+ {0, 524280}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19
},
},
},
{
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
+ call: true,
+ reg: regInfo{},
+ },
+ {
name: "PRELD",
auxType: auxInt64,
argLen: 2,
@@ -46756,77 +46777,68 @@ var opcodeTable = [...]opInfo{
reg: regInfo{},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 8}, // R3
- {1, 16}, // R4
+ {0, 131070}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16
+ {1, 131070}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4}, // R2
- {1, 8}, // R3
+ {0, 131070}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 2}, // R1
- {1, 4}, // R2
+ {0, 131070}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16
},
},
},
{
- name: "LoweredPanicExtendA",
- auxType: auxInt64,
- argLen: 4,
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
call: true,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 32}, // R5
- {1, 8}, // R3
- {2, 16}, // R4
- },
- },
+ reg: regInfo{},
},
{
- name: "LoweredPanicExtendB",
+ name: "LoweredPanicExtendRR",
auxType: auxInt64,
argLen: 4,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 32}, // R5
- {1, 4}, // R2
- {2, 8}, // R3
+ {0, 30}, // R1 R2 R3 R4
+ {1, 30}, // R1 R2 R3 R4
+ {2, 131070}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16
},
},
},
{
- name: "LoweredPanicExtendC",
- auxType: auxInt64,
- argLen: 4,
+ name: "LoweredPanicExtendRC",
+ auxType: auxPanicBoundsC,
+ argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 32}, // R5
- {1, 2}, // R1
- {2, 4}, // R2
+ {0, 30}, // R1 R2 R3 R4
+ {1, 30}, // R1 R2 R3 R4
},
},
},
@@ -48529,41 +48541,46 @@ var opcodeTable = [...]opInfo{
reg: regInfo{},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 8}, // R3
- {1, 16}, // R4
+ {0, 131070}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16
+ {1, 131070}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4}, // R2
- {1, 8}, // R3
+ {0, 131070}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 2}, // R1
- {1, 4}, // R2
+ {0, 131070}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16
},
},
},
+ {
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
+ call: true,
+ reg: regInfo{},
+ },
{
name: "ADD",
@@ -53530,42 +53547,47 @@ var opcodeTable = [...]opInfo{
reg: regInfo{},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 64}, // X7
- {1, 134217728}, // X28
+ {0, 1048560}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20
+ {1, 1048560}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 32}, // X6
- {1, 64}, // X7
+ {0, 1048560}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 16}, // X5
- {1, 32}, // X6
+ {0, 1048560}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20
},
},
},
{
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
+ call: true,
+ reg: regInfo{},
+ },
+ {
name: "FADDS",
argLen: 2,
commutative: true,
@@ -54542,6 +54564,62 @@ var opcodeTable = [...]opInfo{
},
},
{
+ name: "WFMAXDB",
+ argLen: 2,
+ asm: s390x.AWFMAXDB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "WFMAXSB",
+ argLen: 2,
+ asm: s390x.AWFMAXSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "WFMINDB",
+ argLen: 2,
+ asm: s390x.AWFMINDB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "WFMINSB",
+ argLen: 2,
+ asm: s390x.AWFMINSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
name: "FIDBR",
auxType: auxInt8,
argLen: 1,
@@ -57211,42 +57289,47 @@ var opcodeTable = [...]opInfo{
},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 4}, // R2
- {1, 8}, // R3
+ {0, 7167}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12
+ {1, 7167}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 2}, // R1
- {1, 4}, // R2
+ {0, 7167}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 1}, // R0
- {1, 2}, // R1
+ {0, 7167}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12
},
},
},
{
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
+ call: true,
+ reg: regInfo{},
+ },
+ {
name: "FlagEQ",
argLen: 0,
reg: regInfo{},
diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go
index 93bd525c38..b8c952ef33 100644
--- a/src/cmd/compile/internal/ssa/prove.go
+++ b/src/cmd/compile/internal/ssa/prove.go
@@ -5,6 +5,7 @@
package ssa
import (
+ "cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"math"
@@ -2132,6 +2133,41 @@ func addRestrictions(parent *Block, ft *factsTable, t domain, v, w *Value, r rel
}
}
+func unsignedAddOverflows(a, b uint64, t *types.Type) bool {
+ switch t.Size() {
+ case 8:
+ return a+b < a
+ case 4:
+ return a+b > math.MaxUint32
+ case 2:
+ return a+b > math.MaxUint16
+ case 1:
+ return a+b > math.MaxUint8
+ default:
+ panic("unreachable")
+ }
+}
+
+func signedAddOverflowsOrUnderflows(a, b int64, t *types.Type) bool {
+ r := a + b
+ switch t.Size() {
+ case 8:
+ return (a >= 0 && b >= 0 && r < 0) || (a < 0 && b < 0 && r >= 0)
+ case 4:
+ return r < math.MinInt32 || math.MaxInt32 < r
+ case 2:
+ return r < math.MinInt16 || math.MaxInt16 < r
+ case 1:
+ return r < math.MinInt8 || math.MaxInt8 < r
+ default:
+ panic("unreachable")
+ }
+}
+
+func unsignedSubUnderflows(a, b uint64) bool {
+ return a < b
+}
+
func addLocalFacts(ft *factsTable, b *Block) {
// Propagate constant ranges among values in this block.
// We do this before the second loop so that we have the
@@ -2151,6 +2187,60 @@ func addLocalFacts(ft *factsTable, b *Block) {
// FIXME(go.dev/issue/68857): this loop only set up limits properly when b.Values is in topological order.
// flowLimit can also depend on limits given by this loop which right now is not handled.
switch v.Op {
+ case OpAdd64, OpAdd32, OpAdd16, OpAdd8:
+ x := ft.limits[v.Args[0].ID]
+ y := ft.limits[v.Args[1].ID]
+ if !unsignedAddOverflows(x.umax, y.umax, v.Type) {
+ r := gt
+ if !x.nonzero() {
+ r |= eq
+ }
+ ft.update(b, v, v.Args[1], unsigned, r)
+ r = gt
+ if !y.nonzero() {
+ r |= eq
+ }
+ ft.update(b, v, v.Args[0], unsigned, r)
+ }
+ if x.min >= 0 && !signedAddOverflowsOrUnderflows(x.max, y.max, v.Type) {
+ r := gt
+ if !x.nonzero() {
+ r |= eq
+ }
+ ft.update(b, v, v.Args[1], signed, r)
+ }
+ if y.min >= 0 && !signedAddOverflowsOrUnderflows(x.max, y.max, v.Type) {
+ r := gt
+ if !y.nonzero() {
+ r |= eq
+ }
+ ft.update(b, v, v.Args[0], signed, r)
+ }
+ if x.max <= 0 && !signedAddOverflowsOrUnderflows(x.min, y.min, v.Type) {
+ r := lt
+ if !x.nonzero() {
+ r |= eq
+ }
+ ft.update(b, v, v.Args[1], signed, r)
+ }
+ if y.max <= 0 && !signedAddOverflowsOrUnderflows(x.min, y.min, v.Type) {
+ r := lt
+ if !y.nonzero() {
+ r |= eq
+ }
+ ft.update(b, v, v.Args[0], signed, r)
+ }
+ case OpSub64, OpSub32, OpSub16, OpSub8:
+ x := ft.limits[v.Args[0].ID]
+ y := ft.limits[v.Args[1].ID]
+ if !unsignedSubUnderflows(x.umin, y.umax) {
+ r := lt
+ if !y.nonzero() {
+ r |= eq
+ }
+ ft.update(b, v, v.Args[0], unsigned, r)
+ }
+ // FIXME: we could also do signed facts but the overflow checks are much trickier and I don't need it yet.
case OpAnd64, OpAnd32, OpAnd16, OpAnd8:
ft.update(b, v, v.Args[0], unsigned, lt|eq)
ft.update(b, v, v.Args[1], unsigned, lt|eq)
@@ -2177,6 +2267,10 @@ func addLocalFacts(ft *factsTable, b *Block) {
// the mod instruction executes (and thus panics if the
// modulus is 0). See issue 67625.
ft.update(b, v, v.Args[1], unsigned, lt)
+ case OpStringLen:
+ if v.Args[0].Op == OpStringMake {
+ ft.update(b, v, v.Args[0].Args[1], signed, eq)
+ }
case OpSliceLen:
if v.Args[0].Op == OpSliceMake {
ft.update(b, v, v.Args[0].Args[1], signed, eq)
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
index d4ce7a815b..0fd0b9173c 100644
--- a/src/cmd/compile/internal/ssa/regalloc.go
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -1591,6 +1591,12 @@ func (s *regAllocState) regalloc(f *Func) {
mask &^= desired.avoid
}
}
+ if mask&s.values[v.Args[i.idx].ID].regs&(1<<s.SPReg) != 0 {
+ // Prefer SP register. This ensures that local variables
+ // use SP as their base register (instead of a copy of the
+ // stack pointer living in another register). See issue 74836.
+ mask = 1 << s.SPReg
+ }
args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos)
}
@@ -1694,8 +1700,38 @@ func (s *regAllocState) regalloc(f *Func) {
}
}
}
-
ok:
+ for i := 0; i < 2; i++ {
+ if !(i == 0 && regspec.clobbersArg0 || i == 1 && regspec.clobbersArg1) {
+ continue
+ }
+ if !s.liveAfterCurrentInstruction(v.Args[i]) {
+ // arg is dead. We can clobber its register.
+ continue
+ }
+ if s.values[v.Args[i].ID].rematerializeable {
+ // We can rematerialize the input, don't worry about clobbering it.
+ continue
+ }
+ if countRegs(s.values[v.Args[i].ID].regs) >= 2 {
+ // We have at least 2 copies of arg. We can afford to clobber one.
+ continue
+ }
+ // Possible new registers to copy into.
+ m := s.compatRegs(v.Args[i].Type) &^ s.used
+ if m == 0 {
+ // No free registers. In this case we'll just clobber the
+ // input and future uses of that input must use a restore.
+ // TODO(khr): We should really do this like allocReg does it,
+ // spilling the value with the most distant next use.
+ continue
+ }
+ // Copy input to a new clobberable register.
+ c := s.allocValToReg(v.Args[i], m, true, v.Pos)
+ s.copies[c] = false
+ args[i] = c
+ }
+
// Pick a temporary register if needed.
// It should be distinct from all the input registers, so we
// allocate it after all the input registers, but before
@@ -1717,6 +1753,13 @@ func (s *regAllocState) regalloc(f *Func) {
s.tmpused |= regMask(1) << tmpReg
}
+ if regspec.clobbersArg0 {
+ s.freeReg(register(s.f.getHome(args[0].ID).(*Register).num))
+ }
+ if regspec.clobbersArg1 {
+ s.freeReg(register(s.f.getHome(args[1].ID).(*Register).num))
+ }
+
// Now that all args are in regs, we're ready to issue the value itself.
// Before we pick a register for the output value, allow input registers
// to be deallocated. We do this here so that the output can use the
@@ -2743,7 +2786,7 @@ func (s *regAllocState) computeLive() {
// out to all of them.
po := f.postorder()
s.loopnest = f.loopnest()
- s.loopnest.calculateDepths()
+ s.loopnest.computeUnavoidableCalls()
for {
changed := false
@@ -3050,3 +3093,72 @@ func (d *desiredState) merge(x *desiredState) {
d.addList(e.ID, e.regs)
}
}
+
+// computeUnavoidableCalls computes the containsUnavoidableCall fields in the loop nest.
+func (loopnest *loopnest) computeUnavoidableCalls() {
+ f := loopnest.f
+
+ hasCall := f.Cache.allocBoolSlice(f.NumBlocks())
+ defer f.Cache.freeBoolSlice(hasCall)
+ for _, b := range f.Blocks {
+ if b.containsCall() {
+ hasCall[b.ID] = true
+ }
+ }
+ found := f.Cache.allocSparseSet(f.NumBlocks())
+ defer f.Cache.freeSparseSet(found)
+ // Run dfs to find path through the loop that avoids all calls.
+ // Such path either escapes the loop or returns back to the header.
+ // It isn't enough to have exit not dominated by any call, for example:
+ // ... some loop
+ // call1 call2
+ // \ /
+ // block
+ // ...
+ // block is not dominated by any single call, but we don't have call-free path to it.
+loopLoop:
+ for _, l := range loopnest.loops {
+ found.clear()
+ tovisit := make([]*Block, 0, 8)
+ tovisit = append(tovisit, l.header)
+ for len(tovisit) > 0 {
+ cur := tovisit[len(tovisit)-1]
+ tovisit = tovisit[:len(tovisit)-1]
+ if hasCall[cur.ID] {
+ continue
+ }
+ for _, s := range cur.Succs {
+ nb := s.Block()
+ if nb == l.header {
+ // Found a call-free path around the loop.
+ continue loopLoop
+ }
+ if found.contains(nb.ID) {
+ // Already found via another path.
+ continue
+ }
+ nl := loopnest.b2l[nb.ID]
+ if nl == nil || (nl.depth <= l.depth && nl != l) {
+ // Left the loop.
+ continue
+ }
+ tovisit = append(tovisit, nb)
+ found.add(nb.ID)
+ }
+ }
+ // No call-free path was found.
+ l.containsUnavoidableCall = true
+ }
+}
+
+func (b *Block) containsCall() bool {
+ if b.Kind == BlockDefer {
+ return true
+ }
+ for _, v := range b.Values {
+ if opcodeTable[v.Op].call {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
index 7d804a0d30..e7ed416c50 100644
--- a/src/cmd/compile/internal/ssa/regalloc_test.go
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -6,6 +6,7 @@ package ssa
import (
"cmd/compile/internal/types"
+ "fmt"
"testing"
)
@@ -218,10 +219,37 @@ func TestSpillMove2(t *testing.T) {
}
+func TestClobbersArg0(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
+ Valu("dst", OpArg, c.config.Types.Int64.PtrTo().PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo().PtrTo())),
+ Valu("zero", OpAMD64LoweredZeroLoop, types.TypeMem, 256, nil, "ptr", "mem"),
+ Valu("store", OpAMD64MOVQstore, types.TypeMem, 0, nil, "dst", "ptr", "zero"),
+ Exit("store")))
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // LoweredZeroLoop clobbers its argument, so there must be a copy of "ptr" somewhere
+ // so we still have that value available at "store".
+ if n := numCopies(f.blocks["entry"]); n != 1 {
+ fmt.Printf("%s\n", f.f.String())
+ t.Errorf("got %d copies, want 1", n)
+ }
+}
+
func numSpills(b *Block) int {
+ return numOps(b, OpStoreReg)
+}
+func numCopies(b *Block) int {
+ return numOps(b, OpCopy)
+}
+func numOps(b *Block, op Op) int {
n := 0
for _, v := range b.Values {
- if v.Op == OpStoreReg {
+ if v.Op == op {
n++
}
}
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index eb2c3b31b8..f9a35deecc 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -29,6 +29,8 @@ type deadValueChoice bool
const (
leaveDeadValues deadValueChoice = false
removeDeadValues = true
+
+ repZeroThreshold = 1408 // size beyond which we use REP STOS for zeroing
)
// deadcode indicates whether rewrite should try to remove any values that become dead.
@@ -199,16 +201,18 @@ func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValu
f.freeValue(v)
continue
}
- if v.Pos.IsStmt() != src.PosNotStmt && !notStmtBoundary(v.Op) && pendingLines.get(vl) == int32(b.ID) {
- pendingLines.remove(vl)
- v.Pos = v.Pos.WithIsStmt()
+ if v.Pos.IsStmt() != src.PosNotStmt && !notStmtBoundary(v.Op) {
+ if pl, ok := pendingLines.get(vl); ok && pl == int32(b.ID) {
+ pendingLines.remove(vl)
+ v.Pos = v.Pos.WithIsStmt()
+ }
}
if i != j {
b.Values[j] = v
}
j++
}
- if pendingLines.get(b.Pos) == int32(b.ID) {
+ if pl, ok := pendingLines.get(b.Pos); ok && pl == int32(b.ID) {
b.Pos = b.Pos.WithIsStmt()
pendingLines.remove(b.Pos)
}
@@ -301,7 +305,6 @@ func canMergeLoadClobber(target, load, x *Value) bool {
return false
}
loopnest := x.Block.Func.loopnest()
- loopnest.calculateDepths()
if loopnest.depth(target.Block.ID) > loopnest.depth(x.Block.ID) {
return false
}
@@ -479,30 +482,28 @@ func nto(x int64) int64 {
// logX returns logarithm of n base 2.
// n must be a positive power of 2 (isPowerOfTwoX returns true).
-func log8(n int8) int64 {
- return int64(bits.Len8(uint8(n))) - 1
-}
-func log16(n int16) int64 {
- return int64(bits.Len16(uint16(n))) - 1
-}
-func log32(n int32) int64 {
- return int64(bits.Len32(uint32(n))) - 1
-}
-func log64(n int64) int64 {
- return int64(bits.Len64(uint64(n))) - 1
-}
+func log8(n int8) int64 { return log8u(uint8(n)) }
+func log16(n int16) int64 { return log16u(uint16(n)) }
+func log32(n int32) int64 { return log32u(uint32(n)) }
+func log64(n int64) int64 { return log64u(uint64(n)) }
-// log2uint32 returns logarithm in base 2 of uint32(n), with log2(0) = -1.
-// Rounds down.
-func log2uint32(n int64) int64 {
- return int64(bits.Len32(uint32(n))) - 1
-}
+// logXu returns the logarithm of n base 2.
+// n must be a power of 2 (isUnsignedPowerOfTwo returns true)
+func log8u(n uint8) int64 { return int64(bits.Len8(n)) - 1 }
+func log16u(n uint16) int64 { return int64(bits.Len16(n)) - 1 }
+func log32u(n uint32) int64 { return int64(bits.Len32(n)) - 1 }
+func log64u(n uint64) int64 { return int64(bits.Len64(n)) - 1 }
// isPowerOfTwoX functions report whether n is a power of 2.
func isPowerOfTwo[T int8 | int16 | int32 | int64](n T) bool {
return n > 0 && n&(n-1) == 0
}
+// isUnsignedPowerOfTwo reports whether n is an unsigned power of 2.
+func isUnsignedPowerOfTwo[T uint8 | uint16 | uint32 | uint64](n T) bool {
+ return n != 0 && n&(n-1) == 0
+}
+
// isUint64PowerOfTwo reports whether uint64(n) is a power of 2.
func isUint64PowerOfTwo(in int64) bool {
n := uint64(in)
@@ -2671,3 +2672,32 @@ func flagify(v *Value) bool {
v.AddArg(inner)
return true
}
+
+// PanicBoundsC contains a constant for a bounds failure.
+type PanicBoundsC struct {
+ C int64
+}
+
+// PanicBoundsCC contains 2 constants for a bounds failure.
+type PanicBoundsCC struct {
+ Cx int64
+ Cy int64
+}
+
+func (p PanicBoundsC) CanBeAnSSAAux() {
+}
+func (p PanicBoundsCC) CanBeAnSSAAux() {
+}
+
+func auxToPanicBoundsC(i Aux) PanicBoundsC {
+ return i.(PanicBoundsC)
+}
+func auxToPanicBoundsCC(i Aux) PanicBoundsCC {
+ return i.(PanicBoundsCC)
+}
+func panicBoundsCToAux(p PanicBoundsC) Aux {
+ return p
+}
+func panicBoundsCCToAux(p PanicBoundsCC) Aux {
+ return p
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
index 9ece0e4eb7..0495438710 100644
--- a/src/cmd/compile/internal/ssa/rewrite386.go
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -75,6 +75,14 @@ func rewriteValue386(v *Value) bool {
return rewriteValue386_Op386LEAL4(v)
case Op386LEAL8:
return rewriteValue386_Op386LEAL8(v)
+ case Op386LoweredPanicBoundsRC:
+ return rewriteValue386_Op386LoweredPanicBoundsRC(v)
+ case Op386LoweredPanicBoundsRR:
+ return rewriteValue386_Op386LoweredPanicBoundsRR(v)
+ case Op386LoweredPanicExtendRC:
+ return rewriteValue386_Op386LoweredPanicExtendRC(v)
+ case Op386LoweredPanicExtendRR:
+ return rewriteValue386_Op386LoweredPanicExtendRR(v)
case Op386MOVBLSX:
return rewriteValue386_Op386MOVBLSX(v)
case Op386MOVBLSXload:
@@ -558,9 +566,11 @@ func rewriteValue386(v *Value) bool {
v.Op = Op386ORL
return true
case OpPanicBounds:
- return rewriteValue386_OpPanicBounds(v)
+ v.Op = Op386LoweredPanicBoundsRR
+ return true
case OpPanicExtend:
- return rewriteValue386_OpPanicExtend(v)
+ v.Op = Op386LoweredPanicExtendRR
+ return true
case OpRotateLeft16:
v.Op = Op386ROLW
return true
@@ -3398,6 +3408,135 @@ func rewriteValue386_Op386LEAL8(v *Value) bool {
}
return false
}
+func rewriteValue386_Op386LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVLconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(c), Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ mem := v_1
+ v.reset(Op386LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: int64(c), Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVLconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:int64(c)}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(c)})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVLconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(c)}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(Op386LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(c)})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LoweredPanicExtendRC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicExtendRC [kind] {p} (MOVLconst [hi]) (MOVLconst [lo]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(hi)<<32+int64(uint32(lo)), Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ hi := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ lo := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: int64(hi)<<32 + int64(uint32(lo)), Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LoweredPanicExtendRR(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicExtendRR [kind] hi lo (MOVLconst [c]) mem)
+ // result: (LoweredPanicExtendRC [kind] hi lo {PanicBoundsC{C:int64(c)}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ if v_2.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ mem := v_3
+ v.reset(Op386LoweredPanicExtendRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(c)})
+ v.AddArg3(hi, lo, mem)
+ return true
+ }
+ // match: (LoweredPanicExtendRR [kind] (MOVLconst [hi]) (MOVLconst [lo]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(hi)<<32 + int64(uint32(lo))}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ hi := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ lo := auxIntToInt32(v_1.AuxInt)
+ y := v_2
+ mem := v_3
+ v.reset(Op386LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(hi)<<32 + int64(uint32(lo))})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValue386_Op386MOVBLSX(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -9313,118 +9452,6 @@ func rewriteValue386_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValue386_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(Op386LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(Op386LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(Op386LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
-func rewriteValue386_OpPanicExtend(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicExtend [kind] hi lo y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicExtendA [kind] hi lo y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- hi := v_0
- lo := v_1
- y := v_2
- mem := v_3
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(Op386LoweredPanicExtendA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg4(hi, lo, y, mem)
- return true
- }
- // match: (PanicExtend [kind] hi lo y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicExtendB [kind] hi lo y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- hi := v_0
- lo := v_1
- y := v_2
- mem := v_3
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(Op386LoweredPanicExtendB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg4(hi, lo, y, mem)
- return true
- }
- // match: (PanicExtend [kind] hi lo y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicExtendC [kind] hi lo y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- hi := v_0
- lo := v_1
- y := v_2
- mem := v_3
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(Op386LoweredPanicExtendC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg4(hi, lo, y, mem)
- return true
- }
- return false
-}
func rewriteValue386_OpRsh16Ux16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index a3a7ba7ed6..f0b25d3c5d 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -215,6 +215,12 @@ func rewriteValueAMD64(v *Value) bool {
return rewriteValueAMD64_OpAMD64LEAQ4(v)
case OpAMD64LEAQ8:
return rewriteValueAMD64_OpAMD64LEAQ8(v)
+ case OpAMD64LoweredPanicBoundsCR:
+ return rewriteValueAMD64_OpAMD64LoweredPanicBoundsCR(v)
+ case OpAMD64LoweredPanicBoundsRC:
+ return rewriteValueAMD64_OpAMD64LoweredPanicBoundsRC(v)
+ case OpAMD64LoweredPanicBoundsRR:
+ return rewriteValueAMD64_OpAMD64LoweredPanicBoundsRR(v)
case OpAMD64MOVBELstore:
return rewriteValueAMD64_OpAMD64MOVBELstore(v)
case OpAMD64MOVBEQstore:
@@ -3431,7 +3437,8 @@ func rewriteValueAMD64(v *Value) bool {
case OpPairDotProdMaskedInt16x8:
return rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v)
case OpPanicBounds:
- return rewriteValueAMD64_OpPanicBounds(v)
+ v.Op = OpAMD64LoweredPanicBoundsRR
+ return true
case OpPermute2Float32x16:
v.Op = OpAMD64VPERMI2PS512
return true
@@ -14127,6 +14134,86 @@ func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
}
return false
}
+func rewriteValueAMD64_OpAMD64LoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVQconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpAMD64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVQconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpAMD64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVQconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVQconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpAMD64LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -45567,60 +45654,6 @@ func rewriteValueAMD64_OpPairDotProdMaskedInt16x8(v *Value) bool {
return true
}
}
-func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpAMD64LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpAMD64LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpAMD64LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValueAMD64_OpPermute2MaskedFloat32x16(v *Value) bool {
v_3 := v.Args[3]
v_2 := v.Args[2]
@@ -55499,119 +55532,64 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
return true
}
// match: (Zero [s] destptr mem)
- // cond: s%16 != 0 && s > 16
- // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+ // cond: s >= 16 && s < 192
+ // result: (LoweredZero [s] destptr mem)
for {
s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
- if !(s%16 != 0 && s > 16) {
+ if !(s >= 16 && s < 192) {
break
}
- v.reset(OpZero)
- v.AuxInt = int64ToAuxInt(s - s%16)
- v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
- v0.AuxInt = int64ToAuxInt(s % 16)
- v0.AddArg(destptr)
- v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
- v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
- v1.AddArg2(destptr, mem)
- v.AddArg2(v0, v1)
- return true
- }
- // match: (Zero [16] destptr mem)
- // result: (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)
- for {
- if auxIntToInt64(v.AuxInt) != 16 {
- break
- }
- destptr := v_0
- mem := v_1
- v.reset(OpAMD64MOVOstoreconst)
- v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.reset(OpAMD64LoweredZero)
+ v.AuxInt = int64ToAuxInt(s)
v.AddArg2(destptr, mem)
return true
}
- // match: (Zero [32] destptr mem)
- // result: (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
- for {
- if auxIntToInt64(v.AuxInt) != 32 {
- break
- }
- destptr := v_0
- mem := v_1
- v.reset(OpAMD64MOVOstoreconst)
- v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
- v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
- v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
- v0.AddArg2(destptr, mem)
- v.AddArg2(destptr, v0)
- return true
- }
- // match: (Zero [48] destptr mem)
- // result: (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))
+ // match: (Zero [s] destptr mem)
+ // cond: s >= 192 && s <= repZeroThreshold
+ // result: (LoweredZeroLoop [s] destptr mem)
for {
- if auxIntToInt64(v.AuxInt) != 48 {
- break
- }
+ s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
- v.reset(OpAMD64MOVOstoreconst)
- v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
- v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
- v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
- v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
- v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
- v1.AddArg2(destptr, mem)
- v0.AddArg2(destptr, v1)
- v.AddArg2(destptr, v0)
- return true
- }
- // match: (Zero [64] destptr mem)
- // result: (MOVOstoreconst [makeValAndOff(0,48)] destptr (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))))
- for {
- if auxIntToInt64(v.AuxInt) != 64 {
+ if !(s >= 192 && s <= repZeroThreshold) {
break
}
- destptr := v_0
- mem := v_1
- v.reset(OpAMD64MOVOstoreconst)
- v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48))
- v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
- v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
- v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
- v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
- v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
- v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
- v2.AddArg2(destptr, mem)
- v1.AddArg2(destptr, v2)
- v0.AddArg2(destptr, v1)
- v.AddArg2(destptr, v0)
+ v.reset(OpAMD64LoweredZeroLoop)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(destptr, mem)
return true
}
// match: (Zero [s] destptr mem)
- // cond: s > 64 && s <= 1024 && s%16 == 0
- // result: (DUFFZERO [s] destptr mem)
+ // cond: s > repZeroThreshold && s%8 != 0
+ // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
for {
s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
- if !(s > 64 && s <= 1024 && s%16 == 0) {
+ if !(s > repZeroThreshold && s%8 != 0) {
break
}
- v.reset(OpAMD64DUFFZERO)
- v.AuxInt = int64ToAuxInt(s)
- v.AddArg2(destptr, mem)
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 8)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
return true
}
// match: (Zero [s] destptr mem)
- // cond: s > 1024 && s%8 == 0
+ // cond: s > repZeroThreshold && s%8 == 0
// result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
for {
s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
- if !(s > 1024 && s%8 == 0) {
+ if !(s > repZeroThreshold && s%8 == 0) {
break
}
v.reset(OpAMD64REPSTOSQ)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index 00517da4a1..44380cf8f5 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -151,6 +151,14 @@ func rewriteValueARM(v *Value) bool {
return rewriteValueARM_OpARMLessThan(v)
case OpARMLessThanU:
return rewriteValueARM_OpARMLessThanU(v)
+ case OpARMLoweredPanicBoundsRC:
+ return rewriteValueARM_OpARMLoweredPanicBoundsRC(v)
+ case OpARMLoweredPanicBoundsRR:
+ return rewriteValueARM_OpARMLoweredPanicBoundsRR(v)
+ case OpARMLoweredPanicExtendRC:
+ return rewriteValueARM_OpARMLoweredPanicExtendRC(v)
+ case OpARMLoweredPanicExtendRR:
+ return rewriteValueARM_OpARMLoweredPanicExtendRR(v)
case OpARMMOVBUload:
return rewriteValueARM_OpARMMOVBUload(v)
case OpARMMOVBUloadidx:
@@ -745,9 +753,11 @@ func rewriteValueARM(v *Value) bool {
v.Op = OpARMOR
return true
case OpPanicBounds:
- return rewriteValueARM_OpPanicBounds(v)
+ v.Op = OpARMLoweredPanicBoundsRR
+ return true
case OpPanicExtend:
- return rewriteValueARM_OpPanicExtend(v)
+ v.Op = OpARMLoweredPanicExtendRR
+ return true
case OpRotateLeft16:
return rewriteValueARM_OpRotateLeft16(v)
case OpRotateLeft32:
@@ -4548,6 +4558,135 @@ func rewriteValueARM_OpARMLessThanU(v *Value) bool {
}
return false
}
+func rewriteValueARM_OpARMLoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVWconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(c), Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpARMLoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: int64(c), Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVWconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:int64(c)}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMLoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(c)})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVWconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(c)}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpARMLoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(c)})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLoweredPanicExtendRC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicExtendRC [kind] {p} (MOVWconst [hi]) (MOVWconst [lo]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(hi)<<32+int64(uint32(lo)), Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ hi := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ lo := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMLoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: int64(hi)<<32 + int64(uint32(lo)), Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLoweredPanicExtendRR(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicExtendRR [kind] hi lo (MOVWconst [c]) mem)
+ // result: (LoweredPanicExtendRC [kind] hi lo {PanicBoundsC{C:int64(c)}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ mem := v_3
+ v.reset(OpARMLoweredPanicExtendRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(c)})
+ v.AddArg3(hi, lo, mem)
+ return true
+ }
+ // match: (LoweredPanicExtendRR [kind] (MOVWconst [hi]) (MOVWconst [lo]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(hi)<<32 + int64(uint32(lo))}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ hi := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ lo := auxIntToInt32(v_1.AuxInt)
+ y := v_2
+ mem := v_3
+ v.reset(OpARMLoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(hi)<<32 + int64(uint32(lo))})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -14969,118 +15108,6 @@ func rewriteValueARM_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueARM_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpARMLoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpARMLoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpARMLoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
-func rewriteValueARM_OpPanicExtend(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicExtend [kind] hi lo y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicExtendA [kind] hi lo y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- hi := v_0
- lo := v_1
- y := v_2
- mem := v_3
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpARMLoweredPanicExtendA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg4(hi, lo, y, mem)
- return true
- }
- // match: (PanicExtend [kind] hi lo y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicExtendB [kind] hi lo y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- hi := v_0
- lo := v_1
- y := v_2
- mem := v_3
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpARMLoweredPanicExtendB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg4(hi, lo, y, mem)
- return true
- }
- // match: (PanicExtend [kind] hi lo y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicExtendC [kind] hi lo y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- hi := v_0
- lo := v_1
- y := v_2
- mem := v_3
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpARMLoweredPanicExtendC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg4(hi, lo, y, mem)
- return true
- }
- return false
-}
func rewriteValueARM_OpRotateLeft16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index 792967c001..32f0f55434 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -180,6 +180,12 @@ func rewriteValueARM64(v *Value) bool {
return rewriteValueARM64_OpARM64LessThanNoov(v)
case OpARM64LessThanU:
return rewriteValueARM64_OpARM64LessThanU(v)
+ case OpARM64LoweredPanicBoundsCR:
+ return rewriteValueARM64_OpARM64LoweredPanicBoundsCR(v)
+ case OpARM64LoweredPanicBoundsRC:
+ return rewriteValueARM64_OpARM64LoweredPanicBoundsRC(v)
+ case OpARM64LoweredPanicBoundsRR:
+ return rewriteValueARM64_OpARM64LoweredPanicBoundsRR(v)
case OpARM64MADD:
return rewriteValueARM64_OpARM64MADD(v)
case OpARM64MADDW:
@@ -936,7 +942,8 @@ func rewriteValueARM64(v *Value) bool {
v.Op = OpARM64OR
return true
case OpPanicBounds:
- return rewriteValueARM64_OpPanicBounds(v)
+ v.Op = OpARM64LoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValueARM64_OpPopCount16(v)
case OpPopCount32:
@@ -1592,6 +1599,66 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool {
}
break
}
+ // match: (ADD x0 x1:(ANDshiftRA x2:(SLLconst [sl] y) z [63]))
+ // cond: x1.Uses == 1 && x2.Uses == 1
+ // result: (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64ANDshiftRA || auxIntToInt64(x1.AuxInt) != 63 {
+ continue
+ }
+ z := x1.Args[1]
+ x2 := x1.Args[0]
+ if x2.Op != OpARM64SLLconst {
+ continue
+ }
+ sl := auxIntToInt64(x2.AuxInt)
+ y := x2.Args[0]
+ if !(x1.Uses == 1 && x2.Uses == 1) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(sl)
+ v0 := b.NewValue0(v.Pos, OpARM64ANDshiftRA, y.Type)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg2(y, z)
+ v.AddArg2(x0, v0)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(ANDshiftLL x2:(SRAconst [63] z) y [sl]))
+ // cond: x1.Uses == 1 && x2.Uses == 1
+ // result: (ADDshiftLL x0 (ANDshiftRA <y.Type> y z [63]) [sl])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64ANDshiftLL {
+ continue
+ }
+ sl := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[1]
+ x2 := x1.Args[0]
+ if x2.Op != OpARM64SRAconst || auxIntToInt64(x2.AuxInt) != 63 {
+ continue
+ }
+ z := x2.Args[0]
+ if !(x1.Uses == 1 && x2.Uses == 1) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(sl)
+ v0 := b.NewValue0(v.Pos, OpARM64ANDshiftRA, y.Type)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg2(y, z)
+ v.AddArg2(x0, v0)
+ return true
+ }
+ break
+ }
return false
}
func rewriteValueARM64_OpARM64ADDSflags(v *Value) bool {
@@ -6982,6 +7049,86 @@ func rewriteValueARM64_OpARM64LessThanU(v *Value) bool {
}
return false
}
+func rewriteValueARM64_OpARM64LoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpARM64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpARM64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARM64LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpARM64LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueARM64_OpARM64MADD(v *Value) bool {
v_2 := v.Args[2]
v_1 := v.Args[1]
@@ -19790,60 +19937,6 @@ func rewriteValueARM64_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueARM64_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpARM64LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpARM64LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpARM64LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValueARM64_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -25045,6 +25138,37 @@ func rewriteBlockARM64(b *Block) bool {
b.resetWithControl(BlockARM64FGE, cc)
return true
}
+ // match: (NZ sub:(SUB x y))
+ // cond: sub.Uses == 1
+ // result: (NE (CMP x y))
+ for b.Controls[0].Op == OpARM64SUB {
+ sub := b.Controls[0]
+ y := sub.Args[1]
+ x := sub.Args[0]
+ if !(sub.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(sub.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NZ sub:(SUBconst [c] y))
+ // cond: sub.Uses == 1
+ // result: (NE (CMPconst [c] y))
+ for b.Controls[0].Op == OpARM64SUBconst {
+ sub := b.Controls[0]
+ c := auxIntToInt64(sub.AuxInt)
+ y := sub.Args[0]
+ if !(sub.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(sub.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
// match: (NZ (ANDconst [c] x) yes no)
// cond: oneBit(c)
// result: (TBNZ [int64(ntz64(c))] x yes no)
@@ -25083,6 +25207,37 @@ func rewriteBlockARM64(b *Block) bool {
return true
}
case BlockARM64NZW:
+ // match: (NZW sub:(SUB x y))
+ // cond: sub.Uses == 1
+ // result: (NE (CMPW x y))
+ for b.Controls[0].Op == OpARM64SUB {
+ sub := b.Controls[0]
+ y := sub.Args[1]
+ x := sub.Args[0]
+ if !(sub.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(sub.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NZW sub:(SUBconst [c] y))
+ // cond: sub.Uses == 1
+ // result: (NE (CMPWconst [int32(c)] y))
+ for b.Controls[0].Op == OpARM64SUBconst {
+ sub := b.Controls[0]
+ c := auxIntToInt64(sub.AuxInt)
+ y := sub.Args[0]
+ if !(sub.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(sub.Pos, OpARM64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
// match: (NZW (ANDconst [c] x) yes no)
// cond: oneBit(int64(uint32(c)))
// result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
@@ -25312,6 +25467,34 @@ func rewriteBlockARM64(b *Block) bool {
return true
}
case BlockARM64UGT:
+ // match: (UGT (CMPconst [0] x))
+ // result: (NE (CMPconst [0] x))
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (UGT (CMPWconst [0] x))
+ // result: (NE (CMPWconst [0] x))
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
// match: (UGT (FlagConstant [fc]) yes no)
// cond: fc.ugt()
// result: (First yes no)
@@ -25346,6 +25529,34 @@ func rewriteBlockARM64(b *Block) bool {
return true
}
case BlockARM64ULE:
+ // match: (ULE (CMPconst [0] x))
+ // result: (EQ (CMPconst [0] x))
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (ULE (CMPWconst [0] x))
+ // result: (EQ (CMPWconst [0] x))
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
// match: (ULE (FlagConstant [fc]) yes no)
// cond: fc.ule()
// result: (First yes no)
@@ -25414,6 +25625,37 @@ func rewriteBlockARM64(b *Block) bool {
return true
}
case BlockARM64Z:
+ // match: (Z sub:(SUB x y))
+ // cond: sub.Uses == 1
+ // result: (EQ (CMP x y))
+ for b.Controls[0].Op == OpARM64SUB {
+ sub := b.Controls[0]
+ y := sub.Args[1]
+ x := sub.Args[0]
+ if !(sub.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(sub.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (Z sub:(SUBconst [c] y))
+ // cond: sub.Uses == 1
+ // result: (EQ (CMPconst [c] y))
+ for b.Controls[0].Op == OpARM64SUBconst {
+ sub := b.Controls[0]
+ c := auxIntToInt64(sub.AuxInt)
+ y := sub.Args[0]
+ if !(sub.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(sub.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
// match: (Z (ANDconst [c] x) yes no)
// cond: oneBit(c)
// result: (TBZ [int64(ntz64(c))] x yes no)
@@ -25452,6 +25694,37 @@ func rewriteBlockARM64(b *Block) bool {
return true
}
case BlockARM64ZW:
+ // match: (ZW sub:(SUB x y))
+ // cond: sub.Uses == 1
+ // result: (EQ (CMPW x y))
+ for b.Controls[0].Op == OpARM64SUB {
+ sub := b.Controls[0]
+ y := sub.Args[1]
+ x := sub.Args[0]
+ if !(sub.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(sub.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (ZW sub:(SUBconst [c] y))
+ // cond: sub.Uses == 1
+ // result: (EQ (CMPWconst [int32(c)] y))
+ for b.Controls[0].Op == OpARM64SUBconst {
+ sub := b.Controls[0]
+ c := auxIntToInt64(sub.AuxInt)
+ y := sub.Args[0]
+ if !(sub.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(sub.Pos, OpARM64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
// match: (ZW (ANDconst [c] x) yes no)
// cond: oneBit(int64(uint32(c)))
// result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
index 7c46ed7727..83242413f0 100644
--- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go
+++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
@@ -330,6 +330,12 @@ func rewriteValueLOONG64(v *Value) bool {
return rewriteValueLOONG64_OpLOONG64DIVV(v)
case OpLOONG64DIVVU:
return rewriteValueLOONG64_OpLOONG64DIVVU(v)
+ case OpLOONG64LoweredPanicBoundsCR:
+ return rewriteValueLOONG64_OpLOONG64LoweredPanicBoundsCR(v)
+ case OpLOONG64LoweredPanicBoundsRC:
+ return rewriteValueLOONG64_OpLOONG64LoweredPanicBoundsRC(v)
+ case OpLOONG64LoweredPanicBoundsRR:
+ return rewriteValueLOONG64_OpLOONG64LoweredPanicBoundsRR(v)
case OpLOONG64MASKEQZ:
return rewriteValueLOONG64_OpLOONG64MASKEQZ(v)
case OpLOONG64MASKNEZ:
@@ -669,7 +675,8 @@ func rewriteValueLOONG64(v *Value) bool {
v.Op = OpLOONG64OR
return true
case OpPanicBounds:
- return rewriteValueLOONG64_OpPanicBounds(v)
+ v.Op = OpLOONG64LoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValueLOONG64_OpPopCount16(v)
case OpPopCount32:
@@ -2070,6 +2077,86 @@ func rewriteValueLOONG64_OpLOONG64DIVVU(v *Value) bool {
}
return false
}
+func rewriteValueLOONG64_OpLOONG64LoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVVconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpLOONG64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVVconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpLOONG64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVVconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpLOONG64LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVVconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpLOONG64LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueLOONG64_OpLOONG64MASKEQZ(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -2378,6 +2465,21 @@ func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool {
v.AddArg(x)
return true
}
+ // match: (MOVBUreg x:(ANDconst [c] y))
+ // cond: c >= 0 && int64(uint8(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpLOONG64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(uint8(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
@@ -2526,6 +2628,21 @@ func rewriteValueLOONG64_OpLOONG64MOVBreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(int8(c)))
return true
}
+ // match: (MOVBreg x:(ANDconst [c] y))
+ // cond: c >= 0 && int64(int8(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpLOONG64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(int8(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool {
@@ -3614,6 +3731,21 @@ func rewriteValueLOONG64_OpLOONG64MOVHUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(uint16(c)))
return true
}
+ // match: (MOVHUreg x:(ANDconst [c] y))
+ // cond: c >= 0 && int64(uint16(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpLOONG64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(uint16(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
@@ -3806,6 +3938,21 @@ func rewriteValueLOONG64_OpLOONG64MOVHreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(int16(c)))
return true
}
+ // match: (MOVHreg x:(ANDconst [c] y))
+ // cond: c >= 0 && int64(int16(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpLOONG64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(int16(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool {
@@ -4821,6 +4968,21 @@ func rewriteValueLOONG64_OpLOONG64MOVWUreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(uint32(c)))
return true
}
+ // match: (MOVWUreg x:(ANDconst [c] y))
+ // cond: c >= 0 && int64(uint32(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpLOONG64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(uint32(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
@@ -5046,6 +5208,21 @@ func rewriteValueLOONG64_OpLOONG64MOVWreg(v *Value) bool {
v.AuxInt = int64ToAuxInt(int64(int32(c)))
return true
}
+ // match: (MOVWreg x:(ANDconst [c] y))
+ // cond: c >= 0 && int64(int32(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpLOONG64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(int32(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
return false
}
func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool {
@@ -5360,20 +5537,8 @@ func rewriteValueLOONG64_OpLOONG64MOVWstorezeroidx(v *Value) bool {
func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
- // match: (MULV x (MOVVconst [-1]))
- // result: (NEGV x)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != -1 {
- continue
- }
- v.reset(OpLOONG64NEGV)
- v.AddArg(x)
- return true
- }
- break
- }
+ b := v.Block
+ config := b.Func.Config
// match: (MULV _ (MOVVconst [0]))
// result: (MOVVconst [0])
for {
@@ -5401,8 +5566,8 @@ func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool {
break
}
// match: (MULV x (MOVVconst [c]))
- // cond: isPowerOfTwo(c)
- // result: (SLLVconst [log64(c)] x)
+ // cond: canMulStrengthReduce(config, c)
+ // result: {mulStrengthReduce(v, x, c)}
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
x := v_0
@@ -5410,12 +5575,10 @@ func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool {
continue
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(canMulStrengthReduce(config, c)) {
continue
}
- v.reset(OpLOONG64SLLVconst)
- v.AuxInt = int64ToAuxInt(log64(c))
- v.AddArg(x)
+ v.copyOf(mulStrengthReduce(v, x, c))
return true
}
break
@@ -9016,60 +9179,6 @@ func rewriteValueLOONG64_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueLOONG64_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpLOONG64LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpLOONG64LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpLOONG64LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValueLOONG64_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64latelower.go b/src/cmd/compile/internal/ssa/rewriteLOONG64latelower.go
new file mode 100644
index 0000000000..ef9b83192c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteLOONG64latelower.go
@@ -0,0 +1,29 @@
+// Code generated from _gen/LOONG64latelower.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+func rewriteValueLOONG64latelower(v *Value) bool {
+ switch v.Op {
+ case OpLOONG64SLLVconst:
+ return rewriteValueLOONG64latelower_OpLOONG64SLLVconst(v)
+ }
+ return false
+}
+func rewriteValueLOONG64latelower_OpLOONG64SLLVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLVconst [1] x)
+ // result: (ADDV x x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ v.reset(OpLOONG64ADDV)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteBlockLOONG64latelower(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go
index 4c5edb8694..fda02e64d1 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go
@@ -279,6 +279,14 @@ func rewriteValueMIPS(v *Value) bool {
return rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v)
case OpMIPSLoweredAtomicStore32:
return rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v)
+ case OpMIPSLoweredPanicBoundsRC:
+ return rewriteValueMIPS_OpMIPSLoweredPanicBoundsRC(v)
+ case OpMIPSLoweredPanicBoundsRR:
+ return rewriteValueMIPS_OpMIPSLoweredPanicBoundsRR(v)
+ case OpMIPSLoweredPanicExtendRC:
+ return rewriteValueMIPS_OpMIPSLoweredPanicExtendRC(v)
+ case OpMIPSLoweredPanicExtendRR:
+ return rewriteValueMIPS_OpMIPSLoweredPanicExtendRR(v)
case OpMIPSMOVBUload:
return rewriteValueMIPS_OpMIPSMOVBUload(v)
case OpMIPSMOVBUreg:
@@ -447,9 +455,11 @@ func rewriteValueMIPS(v *Value) bool {
v.Op = OpMIPSOR
return true
case OpPanicBounds:
- return rewriteValueMIPS_OpPanicBounds(v)
+ v.Op = OpMIPSLoweredPanicBoundsRR
+ return true
case OpPanicExtend:
- return rewriteValueMIPS_OpPanicExtend(v)
+ v.Op = OpMIPSLoweredPanicExtendRR
+ return true
case OpPubBarrier:
v.Op = OpMIPSLoweredPubBarrier
return true
@@ -2435,6 +2445,135 @@ func rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v *Value) bool {
}
return false
}
+func rewriteValueMIPS_OpMIPSLoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVWconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(c), Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpMIPSLoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: int64(c), Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSLoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVWconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:int64(c)}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpMIPSLoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(c)})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVWconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(c)}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpMIPSLoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(c)})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSLoweredPanicExtendRC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicExtendRC [kind] {p} (MOVWconst [hi]) (MOVWconst [lo]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:int64(hi)<<32+int64(uint32(lo)), Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ hi := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ lo := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpMIPSLoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: int64(hi)<<32 + int64(uint32(lo)), Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSLoweredPanicExtendRR(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicExtendRR [kind] hi lo (MOVWconst [c]) mem)
+ // result: (LoweredPanicExtendRC [kind] hi lo {PanicBoundsC{C:int64(c)}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ if v_2.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ mem := v_3
+ v.reset(OpMIPSLoweredPanicExtendRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(c)})
+ v.AddArg3(hi, lo, mem)
+ return true
+ }
+ // match: (LoweredPanicExtendRR [kind] (MOVWconst [hi]) (MOVWconst [lo]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:int64(hi)<<32 + int64(uint32(lo))}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ hi := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ lo := auxIntToInt32(v_1.AuxInt)
+ y := v_2
+ mem := v_3
+ v.reset(OpMIPSLoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: int64(hi)<<32 + int64(uint32(lo))})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -4058,8 +4197,8 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
break
}
// match: (MUL (MOVWconst [c]) x )
- // cond: isPowerOfTwo(int64(uint32(c)))
- // result: (SLLconst [int32(log2uint32(int64(c)))] x)
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SLLconst [int32(log32u(uint32(c)))] x)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
if v_0.Op != OpMIPSMOVWconst {
@@ -4067,11 +4206,11 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
}
c := auxIntToInt32(v_0.AuxInt)
x := v_1
- if !(isPowerOfTwo(int64(uint32(c)))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
continue
}
v.reset(OpMIPSSLLconst)
- v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c))))
+ v.AuxInt = int32ToAuxInt(int32(log32u(uint32(c))))
v.AddArg(x)
return true
}
@@ -5586,118 +5725,6 @@ func rewriteValueMIPS_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueMIPS_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpMIPSLoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpMIPSLoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpMIPSLoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
-func rewriteValueMIPS_OpPanicExtend(v *Value) bool {
- v_3 := v.Args[3]
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicExtend [kind] hi lo y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicExtendA [kind] hi lo y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- hi := v_0
- lo := v_1
- y := v_2
- mem := v_3
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpMIPSLoweredPanicExtendA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg4(hi, lo, y, mem)
- return true
- }
- // match: (PanicExtend [kind] hi lo y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicExtendB [kind] hi lo y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- hi := v_0
- lo := v_1
- y := v_2
- mem := v_3
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpMIPSLoweredPanicExtendB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg4(hi, lo, y, mem)
- return true
- }
- // match: (PanicExtend [kind] hi lo y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicExtendC [kind] hi lo y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- hi := v_0
- lo := v_1
- y := v_2
- mem := v_3
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpMIPSLoweredPanicExtendC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg4(hi, lo, y, mem)
- return true
- }
- return false
-}
func rewriteValueMIPS_OpRotateLeft16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -6611,8 +6638,8 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
break
}
// match: (Select0 (MULTU (MOVWconst [c]) x ))
- // cond: isPowerOfTwo(int64(uint32(c)))
- // result: (SRLconst [int32(32-log2uint32(int64(c)))] x)
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SRLconst [int32(32-log32u(uint32(c)))] x)
for {
if v_0.Op != OpMIPSMULTU {
break
@@ -6626,11 +6653,11 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool {
}
c := auxIntToInt32(v_0_0.AuxInt)
x := v_0_1
- if !(isPowerOfTwo(int64(uint32(c)))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
continue
}
v.reset(OpMIPSSRLconst)
- v.AuxInt = int32ToAuxInt(int32(32 - log2uint32(int64(c))))
+ v.AuxInt = int32ToAuxInt(int32(32 - log32u(uint32(c))))
v.AddArg(x)
return true
}
@@ -6807,8 +6834,8 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
break
}
// match: (Select1 (MULTU (MOVWconst [c]) x ))
- // cond: isPowerOfTwo(int64(uint32(c)))
- // result: (SLLconst [int32(log2uint32(int64(c)))] x)
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (SLLconst [int32(log32u(uint32(c)))] x)
for {
if v_0.Op != OpMIPSMULTU {
break
@@ -6822,11 +6849,11 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool {
}
c := auxIntToInt32(v_0_0.AuxInt)
x := v_0_1
- if !(isPowerOfTwo(int64(uint32(c)))) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
continue
}
v.reset(OpMIPSSLLconst)
- v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c))))
+ v.AuxInt = int32ToAuxInt(int32(log32u(uint32(c))))
v.AddArg(x)
return true
}
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
index c30815cefb..c270ee4d83 100644
--- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
@@ -316,6 +316,12 @@ func rewriteValueMIPS64(v *Value) bool {
return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v)
case OpMIPS64LoweredAtomicStore64:
return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v)
+ case OpMIPS64LoweredPanicBoundsCR:
+ return rewriteValueMIPS64_OpMIPS64LoweredPanicBoundsCR(v)
+ case OpMIPS64LoweredPanicBoundsRC:
+ return rewriteValueMIPS64_OpMIPS64LoweredPanicBoundsRC(v)
+ case OpMIPS64LoweredPanicBoundsRR:
+ return rewriteValueMIPS64_OpMIPS64LoweredPanicBoundsRR(v)
case OpMIPS64MOVBUload:
return rewriteValueMIPS64_OpMIPS64MOVBUload(v)
case OpMIPS64MOVBUreg:
@@ -501,7 +507,8 @@ func rewriteValueMIPS64(v *Value) bool {
v.Op = OpMIPS64OR
return true
case OpPanicBounds:
- return rewriteValueMIPS64_OpPanicBounds(v)
+ v.Op = OpMIPS64LoweredPanicBoundsRR
+ return true
case OpPubBarrier:
v.Op = OpMIPS64LoweredPubBarrier
return true
@@ -2757,6 +2764,86 @@ func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v *Value) bool {
}
return false
}
+func rewriteValueMIPS64_OpMIPS64LoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVVconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpMIPS64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVVconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpMIPS64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVVconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpMIPS64LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVVconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpMIPS64LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
@@ -6364,60 +6451,6 @@ func rewriteValueMIPS64_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueMIPS64_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpMIPS64LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpMIPS64LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpMIPS64LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValueMIPS64_OpRotateLeft16(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
index 95c6489a51..bbdb817900 100644
--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -486,7 +486,8 @@ func rewriteValueRISCV64(v *Value) bool {
v.Op = OpRISCV64OR
return true
case OpPanicBounds:
- return rewriteValueRISCV64_OpPanicBounds(v)
+ v.Op = OpRISCV64LoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValueRISCV64_OpPopCount16(v)
case OpPopCount32:
@@ -532,6 +533,12 @@ func rewriteValueRISCV64(v *Value) bool {
return rewriteValueRISCV64_OpRISCV64FSUBD(v)
case OpRISCV64FSUBS:
return rewriteValueRISCV64_OpRISCV64FSUBS(v)
+ case OpRISCV64LoweredPanicBoundsCR:
+ return rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsCR(v)
+ case OpRISCV64LoweredPanicBoundsRC:
+ return rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRC(v)
+ case OpRISCV64LoweredPanicBoundsRR:
+ return rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRR(v)
case OpRISCV64MOVBUload:
return rewriteValueRISCV64_OpRISCV64MOVBUload(v)
case OpRISCV64MOVBUreg:
@@ -3416,60 +3423,6 @@ func rewriteValueRISCV64_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueRISCV64_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpRISCV64LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpRISCV64LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpRISCV64LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValueRISCV64_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -4239,6 +4192,86 @@ func rewriteValueRISCV64_OpRISCV64FSUBS(v *Value) bool {
}
return false
}
+func rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpRISCV64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpRISCV64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpRISCV64LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpRISCV64LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
index 2e7492501a..07dbe7bf7a 100644
--- a/src/cmd/compile/internal/ssa/rewriteS390X.go
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -368,6 +368,18 @@ func rewriteValueS390X(v *Value) bool {
return rewriteValueS390X_OpLsh8x64(v)
case OpLsh8x8:
return rewriteValueS390X_OpLsh8x8(v)
+ case OpMax32F:
+ v.Op = OpS390XWFMAXSB
+ return true
+ case OpMax64F:
+ v.Op = OpS390XWFMAXDB
+ return true
+ case OpMin32F:
+ v.Op = OpS390XWFMINSB
+ return true
+ case OpMin64F:
+ v.Op = OpS390XWFMINDB
+ return true
case OpMod16:
return rewriteValueS390X_OpMod16(v)
case OpMod16u:
@@ -465,7 +477,8 @@ func rewriteValueS390X(v *Value) bool {
v.Op = OpS390XORW
return true
case OpPanicBounds:
- return rewriteValueS390X_OpPanicBounds(v)
+ v.Op = OpS390XLoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValueS390X_OpPopCount16(v)
case OpPopCount32:
@@ -632,6 +645,12 @@ func rewriteValueS390X(v *Value) bool {
return rewriteValueS390X_OpS390XLTDBR(v)
case OpS390XLTEBR:
return rewriteValueS390X_OpS390XLTEBR(v)
+ case OpS390XLoweredPanicBoundsCR:
+ return rewriteValueS390X_OpS390XLoweredPanicBoundsCR(v)
+ case OpS390XLoweredPanicBoundsRC:
+ return rewriteValueS390X_OpS390XLoweredPanicBoundsRC(v)
+ case OpS390XLoweredPanicBoundsRR:
+ return rewriteValueS390X_OpS390XLoweredPanicBoundsRR(v)
case OpS390XLoweredRound32F:
return rewriteValueS390X_OpS390XLoweredRound32F(v)
case OpS390XLoweredRound64F:
@@ -3959,60 +3978,6 @@ func rewriteValueS390X_OpOffPtr(v *Value) bool {
return true
}
}
-func rewriteValueS390X_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpS390XLoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpS390XLoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpS390XLoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValueS390X_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
@@ -8135,6 +8100,86 @@ func rewriteValueS390X_OpS390XLTEBR(v *Value) bool {
}
return false
}
+func rewriteValueS390X_OpS390XLoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpS390XLoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpS390XLoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpS390XLoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpS390XLoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValueS390X_OpS390XLoweredRound32F(v *Value) bool {
v_0 := v.Args[0]
// match: (LoweredRound32F x:(FMOVSconst))
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
index b7a4ff95d1..fe61ceaff2 100644
--- a/src/cmd/compile/internal/ssa/rewritegeneric.go
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -56,6 +56,8 @@ func rewriteValuegeneric(v *Value) bool {
return rewriteValuegeneric_OpCom64(v)
case OpCom8:
return rewriteValuegeneric_OpCom8(v)
+ case OpCondSelect:
+ return rewriteValuegeneric_OpCondSelect(v)
case OpConstInterface:
return rewriteValuegeneric_OpConstInterface(v)
case OpConstSlice:
@@ -5694,6 +5696,504 @@ func rewriteValuegeneric_OpCom8(v *Value) bool {
}
return false
}
+func rewriteValuegeneric_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (CondSelect x _ (ConstBool [true ]))
+ // result: x
+ for {
+ x := v_0
+ if v_2.Op != OpConstBool || auxIntToBool(v_2.AuxInt) != true {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CondSelect _ y (ConstBool [false]))
+ // result: y
+ for {
+ y := v_1
+ if v_2.Op != OpConstBool || auxIntToBool(v_2.AuxInt) != false {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CondSelect x x _)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CondSelect (Add8 <t> x (Const8 [1])) x bool)
+ // cond: config.arch != "arm64"
+ // result: (Add8 x (CvtBoolToUint8 <t> bool))
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 || auxIntToInt8(v_0_1.AuxInt) != 1 || x != v_1 {
+ continue
+ }
+ bool := v_2
+ if !(config.arch != "arm64") {
+ continue
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, t)
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (CondSelect (Add64 <t> x (Const64 [1])) x bool)
+ // cond: config.arch != "arm64"
+ // result: (Add64 x (ZeroExt8to64 <t> (CvtBoolToUint8 <types.Types[types.TUINT8]> bool)))
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ continue
+ }
+ bool := v_2
+ if !(config.arch != "arm64") {
+ continue
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, t)
+ v1 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v1.AddArg(bool)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (CondSelect (Add32 <t> x (Const32 [1])) x bool)
+ // cond: config.arch != "arm64"
+ // result: (Add32 x (ZeroExt8to32 <t> (CvtBoolToUint8 <types.Types[types.TUINT8]> bool)))
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 || auxIntToInt32(v_0_1.AuxInt) != 1 || x != v_1 {
+ continue
+ }
+ bool := v_2
+ if !(config.arch != "arm64") {
+ continue
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, t)
+ v1 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v1.AddArg(bool)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (CondSelect (Add16 <t> x (Const16 [1])) x bool)
+ // cond: config.arch != "arm64"
+ // result: (Add16 x (ZeroExt8to16 <t> (CvtBoolToUint8 <types.Types[types.TUINT8]> bool)))
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 || auxIntToInt16(v_0_1.AuxInt) != 1 || x != v_1 {
+ continue
+ }
+ bool := v_2
+ if !(config.arch != "arm64") {
+ continue
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, t)
+ v1 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v1.AddArg(bool)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (CondSelect (Add8 <t> x (Const8 [-1])) x bool)
+ // result: (Sub8 x (CvtBoolToUint8 <t> bool))
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 || auxIntToInt8(v_0_1.AuxInt) != -1 || x != v_1 {
+ continue
+ }
+ bool := v_2
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, t)
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (CondSelect (Add64 <t> x (Const64 [-1])) x bool)
+ // result: (Sub64 x (ZeroExt8to64 <t> (CvtBoolToUint8 <types.Types[types.TUINT8]> bool)))
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != -1 || x != v_1 {
+ continue
+ }
+ bool := v_2
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, t)
+ v1 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v1.AddArg(bool)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (CondSelect (Add32 <t> x (Const32 [-1])) x bool)
+ // result: (Sub32 x (ZeroExt8to32 <t> (CvtBoolToUint8 <types.Types[types.TUINT8]> bool)))
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 || auxIntToInt32(v_0_1.AuxInt) != -1 || x != v_1 {
+ continue
+ }
+ bool := v_2
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, t)
+ v1 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v1.AddArg(bool)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (CondSelect (Add16 <t> x (Const16 [-1])) x bool)
+ // result: (Sub16 x (ZeroExt8to16 <t> (CvtBoolToUint8 <types.Types[types.TUINT8]> bool)))
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 || auxIntToInt16(v_0_1.AuxInt) != -1 || x != v_1 {
+ continue
+ }
+ bool := v_2
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, t)
+ v1 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v1.AddArg(bool)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (CondSelect (Lsh64x64 x (Const64 [1])) x bool)
+ // result: (Lsh64x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpLsh64x8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Lsh32x64 x (Const64 [1])) x bool)
+ // result: (Lsh32x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpLsh32x8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Lsh16x64 x (Const64 [1])) x bool)
+ // result: (Lsh16x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpLsh16x8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Lsh8x64 x (Const64 [1])) x bool)
+ // result: (Lsh8x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpLsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpLsh8x8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Rsh64x64 x (Const64 [1])) x bool)
+ // result: (Rsh64x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpRsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpRsh64x8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Rsh32x64 x (Const64 [1])) x bool)
+ // result: (Rsh32x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpRsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpRsh32x8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Rsh16x64 x (Const64 [1])) x bool)
+ // result: (Rsh16x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpRsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpRsh16x8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Rsh8x64 x (Const64 [1])) x bool)
+ // result: (Rsh8x8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpRsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpRsh8x8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Rsh64Ux64 x (Const64 [1])) x bool)
+ // result: (Rsh64Ux8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpRsh64Ux8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Rsh32Ux64 x (Const64 [1])) x bool)
+ // result: (Rsh32Ux8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpRsh32Ux8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Rsh16Ux64 x (Const64 [1])) x bool)
+ // result: (Rsh16Ux8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpRsh16Ux8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (CondSelect (Rsh8Ux64 x (Const64 [1])) x bool)
+ // result: (Rsh8Ux8 [true] x (CvtBoolToUint8 <types.Types[types.TUINT8]> bool))
+ for {
+ if v_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 1 || x != v_1 {
+ break
+ }
+ bool := v_2
+ v.reset(OpRsh8Ux8)
+ v.AuxInt = boolToAuxInt(true)
+ v0 := b.NewValue0(v.Pos, OpCvtBoolToUint8, types.Types[types.TUINT8])
+ v0.AddArg(bool)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
func rewriteValuegeneric_OpConstInterface(v *Value) bool {
b := v.Block
typ := &b.Func.Config.Types
@@ -6490,20 +6990,20 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool {
return true
}
// match: (Div16u n (Const16 [c]))
- // cond: isPowerOfTwo(c)
- // result: (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+ // cond: isUnsignedPowerOfTwo(uint16(c))
+ // result: (Rsh16Ux64 n (Const64 <typ.UInt64> [log16u(uint16(c))]))
for {
n := v_0
if v_1.Op != OpConst16 {
break
}
c := auxIntToInt16(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint16(c))) {
break
}
v.reset(OpRsh16Ux64)
v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v0.AuxInt = int64ToAuxInt(log16(c))
+ v0.AuxInt = int64ToAuxInt(log16u(uint16(c)))
v.AddArg2(n, v0)
return true
}
@@ -6900,20 +7400,20 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool {
return true
}
// match: (Div32u n (Const32 [c]))
- // cond: isPowerOfTwo(c)
- // result: (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+ // cond: isUnsignedPowerOfTwo(uint32(c))
+ // result: (Rsh32Ux64 n (Const64 <typ.UInt64> [log32u(uint32(c))]))
for {
n := v_0
if v_1.Op != OpConst32 {
break
}
c := auxIntToInt32(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v.reset(OpRsh32Ux64)
v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v0.AuxInt = int64ToAuxInt(log32(c))
+ v0.AuxInt = int64ToAuxInt(log32u(uint32(c)))
v.AddArg2(n, v0)
return true
}
@@ -7339,33 +7839,20 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool {
return true
}
// match: (Div64u n (Const64 [c]))
- // cond: isPowerOfTwo(c)
- // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+ // cond: isUnsignedPowerOfTwo(uint64(c))
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [log64u(uint64(c))]))
for {
n := v_0
if v_1.Op != OpConst64 {
break
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
- break
- }
- v.reset(OpRsh64Ux64)
- v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v0.AuxInt = int64ToAuxInt(log64(c))
- v.AddArg2(n, v0)
- return true
- }
- // match: (Div64u n (Const64 [-1<<63]))
- // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [63]))
- for {
- n := v_0
- if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v.reset(OpRsh64Ux64)
v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v0.AuxInt = int64ToAuxInt(63)
+ v0.AuxInt = int64ToAuxInt(log64u(uint64(c)))
v.AddArg2(n, v0)
return true
}
@@ -7675,20 +8162,20 @@ func rewriteValuegeneric_OpDiv8u(v *Value) bool {
return true
}
// match: (Div8u n (Const8 [c]))
- // cond: isPowerOfTwo(c)
- // result: (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+ // cond: isUnsignedPowerOfTwo(uint8(c))
+ // result: (Rsh8Ux64 n (Const64 <typ.UInt64> [log8u(uint8(c))]))
for {
n := v_0
if v_1.Op != OpConst8 {
break
}
c := auxIntToInt8(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint8(c))) {
break
}
v.reset(OpRsh8Ux64)
v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
- v0.AuxInt = int64ToAuxInt(log8(c))
+ v0.AuxInt = int64ToAuxInt(log8u(uint8(c)))
v.AddArg2(n, v0)
return true
}
@@ -16419,7 +16906,7 @@ func rewriteValuegeneric_OpMod16u(v *Value) bool {
return true
}
// match: (Mod16u <t> n (Const16 [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isUnsignedPowerOfTwo(uint16(c))
// result: (And16 n (Const16 <t> [c-1]))
for {
t := v.Type
@@ -16428,7 +16915,7 @@ func rewriteValuegeneric_OpMod16u(v *Value) bool {
break
}
c := auxIntToInt16(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint16(c))) {
break
}
v.reset(OpAnd16)
@@ -16573,7 +17060,7 @@ func rewriteValuegeneric_OpMod32u(v *Value) bool {
return true
}
// match: (Mod32u <t> n (Const32 [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isUnsignedPowerOfTwo(uint32(c))
// result: (And32 n (Const32 <t> [c-1]))
for {
t := v.Type
@@ -16582,7 +17069,7 @@ func rewriteValuegeneric_OpMod32u(v *Value) bool {
break
}
c := auxIntToInt32(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint32(c))) {
break
}
v.reset(OpAnd32)
@@ -16738,7 +17225,7 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool {
return true
}
// match: (Mod64u <t> n (Const64 [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isUnsignedPowerOfTwo(uint64(c))
// result: (And64 n (Const64 <t> [c-1]))
for {
t := v.Type
@@ -16747,7 +17234,7 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool {
break
}
c := auxIntToInt64(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint64(c))) {
break
}
v.reset(OpAnd64)
@@ -16756,20 +17243,6 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool {
v.AddArg2(n, v0)
return true
}
- // match: (Mod64u <t> n (Const64 [-1<<63]))
- // result: (And64 n (Const64 <t> [1<<63-1]))
- for {
- t := v.Type
- n := v_0
- if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
- break
- }
- v.reset(OpAnd64)
- v0 := b.NewValue0(v.Pos, OpConst64, t)
- v0.AuxInt = int64ToAuxInt(1<<63 - 1)
- v.AddArg2(n, v0)
- return true
- }
// match: (Mod64u <t> x (Const64 [c]))
// cond: x.Op != OpConst64 && c > 0 && umagicOK64(c)
// result: (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
@@ -16906,7 +17379,7 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool {
return true
}
// match: (Mod8u <t> n (Const8 [c]))
- // cond: isPowerOfTwo(c)
+ // cond: isUnsignedPowerOfTwo(uint8(c))
// result: (And8 n (Const8 <t> [c-1]))
for {
t := v.Type
@@ -16915,7 +17388,7 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool {
break
}
c := auxIntToInt8(v_1.AuxInt)
- if !(isPowerOfTwo(c)) {
+ if !(isUnsignedPowerOfTwo(uint8(c))) {
break
}
v.reset(OpAnd8)
@@ -29386,34 +29859,15 @@ func rewriteValuegeneric_OpSelectN(v *Value) bool {
b := v.Block
config := b.Func.Config
typ := &b.Func.Config.Types
- // match: (SelectN [0] (MakeResult x ___))
- // result: x
+ // match: (SelectN [n] m:(MakeResult ___))
+ // result: m.Args[n]
for {
- if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpMakeResult || len(v_0.Args) < 1 {
- break
- }
- x := v_0.Args[0]
- v.copyOf(x)
- return true
- }
- // match: (SelectN [1] (MakeResult x y ___))
- // result: y
- for {
- if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpMakeResult || len(v_0.Args) < 2 {
- break
- }
- y := v_0.Args[1]
- v.copyOf(y)
- return true
- }
- // match: (SelectN [2] (MakeResult x y z ___))
- // result: z
- for {
- if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpMakeResult || len(v_0.Args) < 3 {
+ n := auxIntToInt64(v.AuxInt)
+ m := v_0
+ if m.Op != OpMakeResult {
break
}
- z := v_0.Args[2]
- v.copyOf(z)
+ v.copyOf(m.Args[n])
return true
}
// match: (SelectN [0] call:(StaticCall {sym} sptr (Const64 [c]) mem))
diff --git a/src/cmd/compile/internal/ssa/sparsemap.go b/src/cmd/compile/internal/ssa/sparsemap.go
index 9443c8b4b4..255a346d37 100644
--- a/src/cmd/compile/internal/ssa/sparsemap.go
+++ b/src/cmd/compile/internal/ssa/sparsemap.go
@@ -7,70 +7,60 @@ package ssa
// from https://research.swtch.com/sparse
// in turn, from Briggs and Torczon
-type sparseEntry struct {
- key ID
- val int32
+// sparseKey needs to be something we can index a slice with.
+type sparseKey interface{ ~int | ~int32 }
+
+type sparseEntry[K sparseKey, V any] struct {
+ key K
+ val V
}
-type sparseMap struct {
- dense []sparseEntry
+type genericSparseMap[K sparseKey, V any] struct {
+ dense []sparseEntry[K, V]
sparse []int32
}
-// newSparseMap returns a sparseMap that can map
-// integers between 0 and n-1 to int32s.
-func newSparseMap(n int) *sparseMap {
- return &sparseMap{dense: nil, sparse: make([]int32, n)}
+// newGenericSparseMap returns a sparseMap that can map
+// integers between 0 and n-1 to a value type.
+func newGenericSparseMap[K sparseKey, V any](n int) *genericSparseMap[K, V] {
+ return &genericSparseMap[K, V]{dense: nil, sparse: make([]int32, n)}
}
-func (s *sparseMap) cap() int {
+func (s *genericSparseMap[K, V]) cap() int {
return len(s.sparse)
}
-func (s *sparseMap) size() int {
+func (s *genericSparseMap[K, V]) size() int {
return len(s.dense)
}
-func (s *sparseMap) contains(k ID) bool {
+func (s *genericSparseMap[K, V]) contains(k K) bool {
i := s.sparse[k]
return i < int32(len(s.dense)) && s.dense[i].key == k
}
-// get returns the value for key k, or -1 if k does
-// not appear in the map.
-func (s *sparseMap) get(k ID) int32 {
+// get returns the value for key k, or the zero V
+// if k does not appear in the map.
+func (s *genericSparseMap[K, V]) get(k K) (V, bool) {
i := s.sparse[k]
if i < int32(len(s.dense)) && s.dense[i].key == k {
- return s.dense[i].val
+ return s.dense[i].val, true
}
- return -1
+ var v V
+ return v, false
}
-func (s *sparseMap) set(k ID, v int32) {
+func (s *genericSparseMap[K, V]) set(k K, v V) {
i := s.sparse[k]
if i < int32(len(s.dense)) && s.dense[i].key == k {
s.dense[i].val = v
return
}
- s.dense = append(s.dense, sparseEntry{k, v})
- s.sparse[k] = int32(len(s.dense)) - 1
-}
-
-// setBit sets the v'th bit of k's value, where 0 <= v < 32
-func (s *sparseMap) setBit(k ID, v uint) {
- if v >= 32 {
- panic("bit index too large.")
- }
- i := s.sparse[k]
- if i < int32(len(s.dense)) && s.dense[i].key == k {
- s.dense[i].val |= 1 << v
- return
- }
- s.dense = append(s.dense, sparseEntry{k, 1 << v})
+ s.dense = append(s.dense, sparseEntry[K, V]{k, v})
s.sparse[k] = int32(len(s.dense)) - 1
}
-func (s *sparseMap) remove(k ID) {
+func (s *genericSparseMap[K, V]) remove(k K) {
i := s.sparse[k]
if i < int32(len(s.dense)) && s.dense[i].key == k {
y := s.dense[len(s.dense)-1]
@@ -80,10 +70,18 @@ func (s *sparseMap) remove(k ID) {
}
}
-func (s *sparseMap) clear() {
+func (s *genericSparseMap[K, V]) clear() {
s.dense = s.dense[:0]
}
-func (s *sparseMap) contents() []sparseEntry {
+func (s *genericSparseMap[K, V]) contents() []sparseEntry[K, V] {
return s.dense
}
+
+type sparseMap = genericSparseMap[ID, int32]
+
+// newSparseMap returns a sparseMap that can map
+// integers between 0 and n-1 to int32s.
+func newSparseMap(n int) *sparseMap {
+ return newGenericSparseMap[ID, int32](n)
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/i74576a.go b/src/cmd/compile/internal/ssa/testdata/i74576a.go
new file mode 100644
index 0000000000..40bb7b6069
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i74576a.go
@@ -0,0 +1,17 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "runtime"
+)
+
+func main() {
+ a := 1
+ runtime.Breakpoint()
+ sink = a
+}
+
+var sink any
diff --git a/src/cmd/compile/internal/ssa/testdata/i74576b.go b/src/cmd/compile/internal/ssa/testdata/i74576b.go
new file mode 100644
index 0000000000..fa89063299
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i74576b.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "runtime"
+)
+
+func main() {
+ a := 1
+ runtime.Breakpoint()
+ _ = make([]int, a)
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/i74576c.go b/src/cmd/compile/internal/ssa/testdata/i74576c.go
new file mode 100644
index 0000000000..92cacaf0d7
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i74576c.go
@@ -0,0 +1,19 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "runtime"
+)
+
+func main() {
+ s := S{1, 1}
+ runtime.Breakpoint()
+ sink = s
+}
+
+type S struct{ a, b uint64 }
+
+var sink any
diff --git a/src/cmd/compile/internal/ssa/tighten.go b/src/cmd/compile/internal/ssa/tighten.go
index eb5007b26e..48efdb5609 100644
--- a/src/cmd/compile/internal/ssa/tighten.go
+++ b/src/cmd/compile/internal/ssa/tighten.go
@@ -82,7 +82,6 @@ func tighten(f *Func) {
// We use this to make sure we don't tighten a value into a (deeper) loop.
idom := f.Idom()
loops := f.loopnest()
- loops.calculateDepths()
changed := true
for changed {
diff --git a/src/cmd/compile/internal/ssa/xposmap.go b/src/cmd/compile/internal/ssa/xposmap.go
index 93582e1373..382f916571 100644
--- a/src/cmd/compile/internal/ssa/xposmap.go
+++ b/src/cmd/compile/internal/ssa/xposmap.go
@@ -69,10 +69,10 @@ func (m *xposmap) set(p src.XPos, v int32) {
}
// get returns the int32 associated with the file index and line of p.
-func (m *xposmap) get(p src.XPos) int32 {
+func (m *xposmap) get(p src.XPos) (int32, bool) {
s := m.mapFor(p.FileIndex())
if s == nil {
- return -1
+ return 0, false
}
return s.get(p.Line())
}
diff --git a/src/cmd/compile/internal/ssagen/arch.go b/src/cmd/compile/internal/ssagen/arch.go
index 483e45cad4..ef5d8f59d7 100644
--- a/src/cmd/compile/internal/ssagen/arch.go
+++ b/src/cmd/compile/internal/ssagen/arch.go
@@ -25,8 +25,13 @@ type ArchInfo struct {
PadFrame func(int64) int64
- // ZeroRange zeroes a range of memory on stack. It is only inserted
- // at function entry, and it is ok to clobber registers.
+ // ZeroRange zeroes a range of memory the on stack.
+ // - it is only called at function entry
+ // - it is ok to clobber (non-arg) registers.
+ // - currently used only for small things, so it can be simple.
+ // - pointers to heap-allocated return values
+ // - open-coded deferred functions
+ // (Max size in make.bash is 40 bytes.)
ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
Ginsnop func(*objw.Progs) *obj.Prog
diff --git a/src/cmd/compile/internal/ssagen/intrinsics.go b/src/cmd/compile/internal/ssagen/intrinsics.go
index d7b25f2ab1..eae754da4e 100644
--- a/src/cmd/compile/internal/ssagen/intrinsics.go
+++ b/src/cmd/compile/internal/ssagen/intrinsics.go
@@ -1509,7 +1509,7 @@ func initIntrinsics(cfg *intrinsicBuildConfig) {
// No PSIGNB, simply do byte equality with ctrlEmpty.
// Load ctrlEmpty into each byte of a control word.
- var ctrlsEmpty uint64 = abi.SwissMapCtrlEmpty
+ var ctrlsEmpty uint64 = abi.MapCtrlEmpty
e := s.constInt64(types.Types[types.TUINT64], int64(ctrlsEmpty))
// Explicit copy to fp register. See
// https://go.dev/issue/70451.
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index 3b406c0d6f..221f979996 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -94,12 +94,8 @@ func InitConfig() {
_ = types.NewPtr(types.Types[types.TINT16]) // *int16
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
- if buildcfg.Experiment.SwissMap {
- _ = types.NewPtr(reflectdata.SwissMapType()) // *internal/runtime/maps.Map
- } else {
- _ = types.NewPtr(reflectdata.OldMapType()) // *runtime.hmap
- }
- _ = types.NewPtr(deferstruct()) // *runtime._defer
+ _ = types.NewPtr(reflectdata.MapType()) // *internal/runtime/maps.Map
+ _ = types.NewPtr(deferstruct()) // *runtime._defer
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
ssaConfig.Race = base.Flag.Race
@@ -137,6 +133,8 @@ func InitConfig() {
ir.Syms.Asanwrite = typecheck.LookupRuntimeFunc("asanwrite")
ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
+ ir.Syms.PanicBounds = typecheck.LookupRuntimeFunc("panicBounds")
+ ir.Syms.PanicExtend = typecheck.LookupRuntimeFunc("panicExtend")
ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
@@ -3089,13 +3087,8 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
return v
}
- // map <--> *hmap
- var mt *types.Type
- if buildcfg.Experiment.SwissMap {
- mt = types.NewPtr(reflectdata.SwissMapType())
- } else {
- mt = types.NewPtr(reflectdata.OldMapType())
- }
+ // map <--> *internal/runtime/maps.Map
+ mt := types.NewPtr(reflectdata.MapType())
if to.Kind() == types.TMAP && from == mt {
return v
}
@@ -4001,7 +3994,7 @@ func (s *state) minMax(n *ir.CallExpr) *ssa.Value {
if typ.IsFloat() {
hasIntrinsic := false
switch Arch.LinkArch.Family {
- case sys.AMD64, sys.ARM64, sys.Loong64, sys.RISCV64:
+ case sys.AMD64, sys.ARM64, sys.Loong64, sys.RISCV64, sys.S390X:
hasIntrinsic = true
case sys.PPC64:
hasIntrinsic = buildcfg.GOPPC64 >= 9
@@ -5767,13 +5760,13 @@ func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
s.startBlock(bElse)
switch n.Op() {
case ir.OLEN:
- if buildcfg.Experiment.SwissMap && n.X.Type().IsMap() {
- // length is stored in the first word.
- loadType := reflectdata.SwissMapType().Field(0).Type // uint64
+ if n.X.Type().IsMap() {
+ // length is stored in the first word, but needs conversion to int.
+ loadType := reflectdata.MapType().Field(0).Type // uint64
load := s.load(loadType, x)
s.vars[n] = s.conv(nil, load, loadType, lenType) // integer conversion doesn't need Node
} else {
- // length is stored in the first word for map/chan
+ // length is stored in the first word for chan, no conversion needed.
s.vars[n] = s.load(lenType, x)
}
case ir.OCAP:
@@ -6970,6 +6963,9 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
if base.Ctxt.Flag_locationlists {
var debugInfo *ssa.FuncDebug
debugInfo = e.curfn.DebugInfo.(*ssa.FuncDebug)
+ // Save off entry ID in case we need it later for DWARF generation
+ // for return values promoted to the heap.
+ debugInfo.EntryID = f.Entry.ID
if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 {
ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset, debugInfo)
} else {
diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
index 091933e1e7..eda6084b48 100644
--- a/src/cmd/compile/internal/test/inl_test.go
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -6,7 +6,6 @@ package test
import (
"bufio"
- "internal/goexperiment"
"internal/testenv"
"io"
"math/bits"
@@ -47,7 +46,6 @@ func TestIntendedInlining(t *testing.T) {
"getMCache",
"heapSetTypeNoHeader",
"heapSetTypeSmallHeader",
- "isDirectIface",
"itabHashFunc",
"nextslicecap",
"noescape",
@@ -109,6 +107,7 @@ func TestIntendedInlining(t *testing.T) {
"(*Buffer).tryGrowByReslice",
},
"internal/abi": {
+ "(*Type).IsDirectIface",
"UseInterfaceSwitchCache",
},
"internal/runtime/math": {
@@ -234,15 +233,6 @@ func TestIntendedInlining(t *testing.T) {
},
}
- if !goexperiment.SwissMap {
- // Maps
- want["runtime"] = append(want["runtime"], "bucketMask")
- want["runtime"] = append(want["runtime"], "bucketShift")
- want["runtime"] = append(want["runtime"], "evacuated")
- want["runtime"] = append(want["runtime"], "tophash")
- want["runtime"] = append(want["runtime"], "(*bmap).keys")
- want["runtime"] = append(want["runtime"], "(*bmap).overflow")
- }
if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {
// nextFreeFast calls sys.TrailingZeros64, which on 386 is implemented in asm and is not inlinable.
// We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386.
diff --git a/src/cmd/compile/internal/test/mulconst_test.go b/src/cmd/compile/internal/test/mulconst_test.go
index c4aed84432..1d1b351af1 100644
--- a/src/cmd/compile/internal/test/mulconst_test.go
+++ b/src/cmd/compile/internal/test/mulconst_test.go
@@ -143,7 +143,7 @@ func BenchmarkMulconstI32(b *testing.B) {
}
mulSinkI32 = x
})
- // -120x = 8x - 120x
+ // -120x = 8x - 128x
b.Run("-120", func(b *testing.B) {
x := int32(1)
for i := 0; i < b.N; i++ {
@@ -202,7 +202,7 @@ func BenchmarkMulconstI64(b *testing.B) {
}
mulSinkI64 = x
})
- // -120x = 8x - 120x
+ // -120x = 8x - 128x
b.Run("-120", func(b *testing.B) {
x := int64(1)
for i := 0; i < b.N; i++ {
diff --git a/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/src/cmd/compile/internal/typecheck/_builtin/runtime.go
index a1397b32b3..296bfdc281 100644
--- a/src/cmd/compile/internal/typecheck/_builtin/runtime.go
+++ b/src/cmd/compile/internal/typecheck/_builtin/runtime.go
@@ -25,7 +25,7 @@ func throwinit()
func panicwrap()
func gopanic(interface{})
-func gorecover(*int32) interface{}
+func gorecover() interface{}
func goschedguarded()
// Note: these declarations are just for wasm port.
@@ -152,14 +152,12 @@ func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (v
func mapassign_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any)
func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any)
func mapassign_faststr(mapType *byte, hmap map[any]any, key string) (val *any)
-func mapiterinit(mapType *byte, hmap map[any]any, hiter *any) // old maps
-func mapIterStart(mapType *byte, hmap map[any]any, hiter *any) // swiss maps
+func mapIterStart(mapType *byte, hmap map[any]any, hiter *any)
func mapdelete(mapType *byte, hmap map[any]any, key *any)
func mapdelete_fast32(mapType *byte, hmap map[any]any, key uint32)
func mapdelete_fast64(mapType *byte, hmap map[any]any, key uint64)
func mapdelete_faststr(mapType *byte, hmap map[any]any, key string)
-func mapiternext(hiter *any) // old maps
-func mapIterNext(hiter *any) // swiss maps
+func mapIterNext(hiter *any)
func mapclear(mapType *byte, hmap map[any]any)
// *byte is really *runtime.Type
diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go
index f3ab6766ec..535f0fb7e8 100644
--- a/src/cmd/compile/internal/typecheck/builtin.go
+++ b/src/cmd/compile/internal/typecheck/builtin.go
@@ -36,204 +36,202 @@ var runtimeDecls = [...]struct {
{"throwinit", funcTag, 9},
{"panicwrap", funcTag, 9},
{"gopanic", funcTag, 11},
- {"gorecover", funcTag, 14},
+ {"gorecover", funcTag, 12},
{"goschedguarded", funcTag, 9},
- {"goPanicIndex", funcTag, 16},
- {"goPanicIndexU", funcTag, 18},
- {"goPanicSliceAlen", funcTag, 16},
- {"goPanicSliceAlenU", funcTag, 18},
- {"goPanicSliceAcap", funcTag, 16},
- {"goPanicSliceAcapU", funcTag, 18},
- {"goPanicSliceB", funcTag, 16},
- {"goPanicSliceBU", funcTag, 18},
- {"goPanicSlice3Alen", funcTag, 16},
- {"goPanicSlice3AlenU", funcTag, 18},
- {"goPanicSlice3Acap", funcTag, 16},
- {"goPanicSlice3AcapU", funcTag, 18},
- {"goPanicSlice3B", funcTag, 16},
- {"goPanicSlice3BU", funcTag, 18},
- {"goPanicSlice3C", funcTag, 16},
- {"goPanicSlice3CU", funcTag, 18},
- {"goPanicSliceConvert", funcTag, 16},
- {"printbool", funcTag, 19},
- {"printfloat", funcTag, 21},
- {"printint", funcTag, 23},
- {"printhex", funcTag, 25},
- {"printuint", funcTag, 25},
- {"printcomplex", funcTag, 27},
- {"printstring", funcTag, 29},
- {"printpointer", funcTag, 30},
- {"printuintptr", funcTag, 31},
- {"printiface", funcTag, 30},
- {"printeface", funcTag, 30},
- {"printslice", funcTag, 30},
+ {"goPanicIndex", funcTag, 14},
+ {"goPanicIndexU", funcTag, 16},
+ {"goPanicSliceAlen", funcTag, 14},
+ {"goPanicSliceAlenU", funcTag, 16},
+ {"goPanicSliceAcap", funcTag, 14},
+ {"goPanicSliceAcapU", funcTag, 16},
+ {"goPanicSliceB", funcTag, 14},
+ {"goPanicSliceBU", funcTag, 16},
+ {"goPanicSlice3Alen", funcTag, 14},
+ {"goPanicSlice3AlenU", funcTag, 16},
+ {"goPanicSlice3Acap", funcTag, 14},
+ {"goPanicSlice3AcapU", funcTag, 16},
+ {"goPanicSlice3B", funcTag, 14},
+ {"goPanicSlice3BU", funcTag, 16},
+ {"goPanicSlice3C", funcTag, 14},
+ {"goPanicSlice3CU", funcTag, 16},
+ {"goPanicSliceConvert", funcTag, 14},
+ {"printbool", funcTag, 17},
+ {"printfloat", funcTag, 19},
+ {"printint", funcTag, 21},
+ {"printhex", funcTag, 23},
+ {"printuint", funcTag, 23},
+ {"printcomplex", funcTag, 25},
+ {"printstring", funcTag, 27},
+ {"printpointer", funcTag, 28},
+ {"printuintptr", funcTag, 29},
+ {"printiface", funcTag, 28},
+ {"printeface", funcTag, 28},
+ {"printslice", funcTag, 28},
{"printnl", funcTag, 9},
{"printsp", funcTag, 9},
{"printlock", funcTag, 9},
{"printunlock", funcTag, 9},
- {"concatstring2", funcTag, 34},
- {"concatstring3", funcTag, 35},
- {"concatstring4", funcTag, 36},
- {"concatstring5", funcTag, 37},
- {"concatstrings", funcTag, 39},
- {"concatbyte2", funcTag, 41},
- {"concatbyte3", funcTag, 42},
- {"concatbyte4", funcTag, 43},
- {"concatbyte5", funcTag, 44},
- {"concatbytes", funcTag, 45},
- {"cmpstring", funcTag, 46},
- {"intstring", funcTag, 49},
- {"slicebytetostring", funcTag, 50},
- {"slicebytetostringtmp", funcTag, 51},
- {"slicerunetostring", funcTag, 54},
- {"stringtoslicebyte", funcTag, 55},
- {"stringtoslicerune", funcTag, 58},
- {"slicecopy", funcTag, 59},
- {"decoderune", funcTag, 60},
- {"countrunes", funcTag, 61},
- {"convT", funcTag, 62},
- {"convTnoptr", funcTag, 62},
- {"convT16", funcTag, 64},
- {"convT32", funcTag, 66},
- {"convT64", funcTag, 67},
- {"convTstring", funcTag, 68},
- {"convTslice", funcTag, 71},
- {"assertE2I", funcTag, 72},
- {"assertE2I2", funcTag, 72},
- {"panicdottypeE", funcTag, 73},
- {"panicdottypeI", funcTag, 73},
- {"panicnildottype", funcTag, 74},
- {"typeAssert", funcTag, 72},
- {"interfaceSwitch", funcTag, 75},
- {"ifaceeq", funcTag, 77},
- {"efaceeq", funcTag, 77},
- {"panicrangestate", funcTag, 78},
- {"deferrangefunc", funcTag, 79},
- {"rand", funcTag, 80},
- {"rand32", funcTag, 81},
- {"makemap64", funcTag, 83},
- {"makemap", funcTag, 84},
- {"makemap_small", funcTag, 85},
- {"mapaccess1", funcTag, 86},
- {"mapaccess1_fast32", funcTag, 87},
- {"mapaccess1_fast64", funcTag, 88},
- {"mapaccess1_faststr", funcTag, 89},
- {"mapaccess1_fat", funcTag, 90},
- {"mapaccess2", funcTag, 91},
- {"mapaccess2_fast32", funcTag, 92},
- {"mapaccess2_fast64", funcTag, 93},
- {"mapaccess2_faststr", funcTag, 94},
- {"mapaccess2_fat", funcTag, 95},
- {"mapassign", funcTag, 86},
- {"mapassign_fast32", funcTag, 87},
- {"mapassign_fast32ptr", funcTag, 96},
- {"mapassign_fast64", funcTag, 88},
- {"mapassign_fast64ptr", funcTag, 96},
- {"mapassign_faststr", funcTag, 89},
- {"mapiterinit", funcTag, 97},
- {"mapIterStart", funcTag, 97},
- {"mapdelete", funcTag, 97},
- {"mapdelete_fast32", funcTag, 98},
- {"mapdelete_fast64", funcTag, 99},
- {"mapdelete_faststr", funcTag, 100},
- {"mapiternext", funcTag, 101},
- {"mapIterNext", funcTag, 101},
- {"mapclear", funcTag, 102},
- {"makechan64", funcTag, 104},
- {"makechan", funcTag, 105},
- {"chanrecv1", funcTag, 107},
- {"chanrecv2", funcTag, 108},
- {"chansend1", funcTag, 110},
- {"closechan", funcTag, 111},
- {"chanlen", funcTag, 112},
- {"chancap", funcTag, 112},
- {"writeBarrier", varTag, 114},
- {"typedmemmove", funcTag, 115},
- {"typedmemclr", funcTag, 116},
- {"typedslicecopy", funcTag, 117},
- {"selectnbsend", funcTag, 118},
- {"selectnbrecv", funcTag, 119},
- {"selectsetpc", funcTag, 120},
- {"selectgo", funcTag, 121},
+ {"concatstring2", funcTag, 32},
+ {"concatstring3", funcTag, 33},
+ {"concatstring4", funcTag, 34},
+ {"concatstring5", funcTag, 35},
+ {"concatstrings", funcTag, 37},
+ {"concatbyte2", funcTag, 39},
+ {"concatbyte3", funcTag, 40},
+ {"concatbyte4", funcTag, 41},
+ {"concatbyte5", funcTag, 42},
+ {"concatbytes", funcTag, 43},
+ {"cmpstring", funcTag, 44},
+ {"intstring", funcTag, 47},
+ {"slicebytetostring", funcTag, 48},
+ {"slicebytetostringtmp", funcTag, 49},
+ {"slicerunetostring", funcTag, 52},
+ {"stringtoslicebyte", funcTag, 53},
+ {"stringtoslicerune", funcTag, 56},
+ {"slicecopy", funcTag, 57},
+ {"decoderune", funcTag, 58},
+ {"countrunes", funcTag, 59},
+ {"convT", funcTag, 60},
+ {"convTnoptr", funcTag, 60},
+ {"convT16", funcTag, 62},
+ {"convT32", funcTag, 64},
+ {"convT64", funcTag, 65},
+ {"convTstring", funcTag, 66},
+ {"convTslice", funcTag, 69},
+ {"assertE2I", funcTag, 70},
+ {"assertE2I2", funcTag, 70},
+ {"panicdottypeE", funcTag, 71},
+ {"panicdottypeI", funcTag, 71},
+ {"panicnildottype", funcTag, 72},
+ {"typeAssert", funcTag, 70},
+ {"interfaceSwitch", funcTag, 73},
+ {"ifaceeq", funcTag, 75},
+ {"efaceeq", funcTag, 75},
+ {"panicrangestate", funcTag, 76},
+ {"deferrangefunc", funcTag, 12},
+ {"rand", funcTag, 77},
+ {"rand32", funcTag, 78},
+ {"makemap64", funcTag, 80},
+ {"makemap", funcTag, 81},
+ {"makemap_small", funcTag, 82},
+ {"mapaccess1", funcTag, 83},
+ {"mapaccess1_fast32", funcTag, 84},
+ {"mapaccess1_fast64", funcTag, 85},
+ {"mapaccess1_faststr", funcTag, 86},
+ {"mapaccess1_fat", funcTag, 87},
+ {"mapaccess2", funcTag, 88},
+ {"mapaccess2_fast32", funcTag, 89},
+ {"mapaccess2_fast64", funcTag, 90},
+ {"mapaccess2_faststr", funcTag, 91},
+ {"mapaccess2_fat", funcTag, 92},
+ {"mapassign", funcTag, 83},
+ {"mapassign_fast32", funcTag, 84},
+ {"mapassign_fast32ptr", funcTag, 93},
+ {"mapassign_fast64", funcTag, 85},
+ {"mapassign_fast64ptr", funcTag, 93},
+ {"mapassign_faststr", funcTag, 86},
+ {"mapIterStart", funcTag, 94},
+ {"mapdelete", funcTag, 94},
+ {"mapdelete_fast32", funcTag, 95},
+ {"mapdelete_fast64", funcTag, 96},
+ {"mapdelete_faststr", funcTag, 97},
+ {"mapIterNext", funcTag, 98},
+ {"mapclear", funcTag, 99},
+ {"makechan64", funcTag, 101},
+ {"makechan", funcTag, 102},
+ {"chanrecv1", funcTag, 104},
+ {"chanrecv2", funcTag, 105},
+ {"chansend1", funcTag, 107},
+ {"closechan", funcTag, 108},
+ {"chanlen", funcTag, 109},
+ {"chancap", funcTag, 109},
+ {"writeBarrier", varTag, 111},
+ {"typedmemmove", funcTag, 112},
+ {"typedmemclr", funcTag, 113},
+ {"typedslicecopy", funcTag, 114},
+ {"selectnbsend", funcTag, 115},
+ {"selectnbrecv", funcTag, 116},
+ {"selectsetpc", funcTag, 117},
+ {"selectgo", funcTag, 118},
{"block", funcTag, 9},
- {"makeslice", funcTag, 122},
- {"makeslice64", funcTag, 123},
- {"makeslicecopy", funcTag, 124},
- {"growslice", funcTag, 126},
- {"unsafeslicecheckptr", funcTag, 127},
+ {"makeslice", funcTag, 119},
+ {"makeslice64", funcTag, 120},
+ {"makeslicecopy", funcTag, 121},
+ {"growslice", funcTag, 123},
+ {"unsafeslicecheckptr", funcTag, 124},
{"panicunsafeslicelen", funcTag, 9},
{"panicunsafeslicenilptr", funcTag, 9},
- {"unsafestringcheckptr", funcTag, 128},
+ {"unsafestringcheckptr", funcTag, 125},
{"panicunsafestringlen", funcTag, 9},
{"panicunsafestringnilptr", funcTag, 9},
- {"memmove", funcTag, 129},
- {"memclrNoHeapPointers", funcTag, 130},
- {"memclrHasPointers", funcTag, 130},
- {"memequal", funcTag, 131},
- {"memequal0", funcTag, 132},
- {"memequal8", funcTag, 132},
- {"memequal16", funcTag, 132},
- {"memequal32", funcTag, 132},
- {"memequal64", funcTag, 132},
- {"memequal128", funcTag, 132},
- {"f32equal", funcTag, 133},
- {"f64equal", funcTag, 133},
- {"c64equal", funcTag, 133},
- {"c128equal", funcTag, 133},
- {"strequal", funcTag, 133},
- {"interequal", funcTag, 133},
- {"nilinterequal", funcTag, 133},
- {"memhash", funcTag, 134},
- {"memhash0", funcTag, 135},
- {"memhash8", funcTag, 135},
- {"memhash16", funcTag, 135},
- {"memhash32", funcTag, 135},
- {"memhash64", funcTag, 135},
- {"memhash128", funcTag, 135},
- {"f32hash", funcTag, 136},
- {"f64hash", funcTag, 136},
- {"c64hash", funcTag, 136},
- {"c128hash", funcTag, 136},
- {"strhash", funcTag, 136},
- {"interhash", funcTag, 136},
- {"nilinterhash", funcTag, 136},
- {"int64div", funcTag, 137},
- {"uint64div", funcTag, 138},
- {"int64mod", funcTag, 137},
- {"uint64mod", funcTag, 138},
- {"float64toint64", funcTag, 139},
- {"float64touint64", funcTag, 140},
- {"float64touint32", funcTag, 141},
- {"int64tofloat64", funcTag, 142},
- {"int64tofloat32", funcTag, 144},
- {"uint64tofloat64", funcTag, 145},
- {"uint64tofloat32", funcTag, 146},
- {"uint32tofloat64", funcTag, 147},
- {"complex128div", funcTag, 148},
- {"racefuncenter", funcTag, 31},
+ {"memmove", funcTag, 126},
+ {"memclrNoHeapPointers", funcTag, 127},
+ {"memclrHasPointers", funcTag, 127},
+ {"memequal", funcTag, 128},
+ {"memequal0", funcTag, 129},
+ {"memequal8", funcTag, 129},
+ {"memequal16", funcTag, 129},
+ {"memequal32", funcTag, 129},
+ {"memequal64", funcTag, 129},
+ {"memequal128", funcTag, 129},
+ {"f32equal", funcTag, 130},
+ {"f64equal", funcTag, 130},
+ {"c64equal", funcTag, 130},
+ {"c128equal", funcTag, 130},
+ {"strequal", funcTag, 130},
+ {"interequal", funcTag, 130},
+ {"nilinterequal", funcTag, 130},
+ {"memhash", funcTag, 131},
+ {"memhash0", funcTag, 132},
+ {"memhash8", funcTag, 132},
+ {"memhash16", funcTag, 132},
+ {"memhash32", funcTag, 132},
+ {"memhash64", funcTag, 132},
+ {"memhash128", funcTag, 132},
+ {"f32hash", funcTag, 133},
+ {"f64hash", funcTag, 133},
+ {"c64hash", funcTag, 133},
+ {"c128hash", funcTag, 133},
+ {"strhash", funcTag, 133},
+ {"interhash", funcTag, 133},
+ {"nilinterhash", funcTag, 133},
+ {"int64div", funcTag, 134},
+ {"uint64div", funcTag, 135},
+ {"int64mod", funcTag, 134},
+ {"uint64mod", funcTag, 135},
+ {"float64toint64", funcTag, 136},
+ {"float64touint64", funcTag, 137},
+ {"float64touint32", funcTag, 138},
+ {"int64tofloat64", funcTag, 139},
+ {"int64tofloat32", funcTag, 141},
+ {"uint64tofloat64", funcTag, 142},
+ {"uint64tofloat32", funcTag, 143},
+ {"uint32tofloat64", funcTag, 144},
+ {"complex128div", funcTag, 145},
+ {"racefuncenter", funcTag, 29},
{"racefuncexit", funcTag, 9},
- {"raceread", funcTag, 31},
- {"racewrite", funcTag, 31},
- {"racereadrange", funcTag, 149},
- {"racewriterange", funcTag, 149},
- {"msanread", funcTag, 149},
- {"msanwrite", funcTag, 149},
- {"msanmove", funcTag, 150},
- {"asanread", funcTag, 149},
- {"asanwrite", funcTag, 149},
- {"checkptrAlignment", funcTag, 151},
- {"checkptrArithmetic", funcTag, 153},
- {"libfuzzerTraceCmp1", funcTag, 154},
- {"libfuzzerTraceCmp2", funcTag, 155},
- {"libfuzzerTraceCmp4", funcTag, 156},
- {"libfuzzerTraceCmp8", funcTag, 157},
- {"libfuzzerTraceConstCmp1", funcTag, 154},
- {"libfuzzerTraceConstCmp2", funcTag, 155},
- {"libfuzzerTraceConstCmp4", funcTag, 156},
- {"libfuzzerTraceConstCmp8", funcTag, 157},
- {"libfuzzerHookStrCmp", funcTag, 158},
- {"libfuzzerHookEqualFold", funcTag, 158},
- {"addCovMeta", funcTag, 160},
+ {"raceread", funcTag, 29},
+ {"racewrite", funcTag, 29},
+ {"racereadrange", funcTag, 146},
+ {"racewriterange", funcTag, 146},
+ {"msanread", funcTag, 146},
+ {"msanwrite", funcTag, 146},
+ {"msanmove", funcTag, 147},
+ {"asanread", funcTag, 146},
+ {"asanwrite", funcTag, 146},
+ {"checkptrAlignment", funcTag, 148},
+ {"checkptrArithmetic", funcTag, 150},
+ {"libfuzzerTraceCmp1", funcTag, 151},
+ {"libfuzzerTraceCmp2", funcTag, 152},
+ {"libfuzzerTraceCmp4", funcTag, 153},
+ {"libfuzzerTraceCmp8", funcTag, 154},
+ {"libfuzzerTraceConstCmp1", funcTag, 151},
+ {"libfuzzerTraceConstCmp2", funcTag, 152},
+ {"libfuzzerTraceConstCmp4", funcTag, 153},
+ {"libfuzzerTraceConstCmp8", funcTag, 154},
+ {"libfuzzerHookStrCmp", funcTag, 155},
+ {"libfuzzerHookEqualFold", funcTag, 155},
+ {"addCovMeta", funcTag, 157},
{"x86HasPOPCNT", varTag, 6},
{"x86HasSSE41", varTag, 6},
{"x86HasFMA", varTag, 6},
@@ -243,11 +241,11 @@ var runtimeDecls = [...]struct {
{"loong64HasLAM_BH", varTag, 6},
{"loong64HasLSX", varTag, 6},
{"riscv64HasZbb", varTag, 6},
- {"asanregisterglobals", funcTag, 130},
+ {"asanregisterglobals", funcTag, 127},
}
func runtimeTypes() []*types.Type {
- var typs [161]*types.Type
+ var typs [158]*types.Type
typs[0] = types.ByteType
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[types.TANY]
@@ -260,155 +258,152 @@ func runtimeTypes() []*types.Type {
typs[9] = newSig(nil, nil)
typs[10] = types.Types[types.TINTER]
typs[11] = newSig(params(typs[10]), nil)
- typs[12] = types.Types[types.TINT32]
- typs[13] = types.NewPtr(typs[12])
- typs[14] = newSig(params(typs[13]), params(typs[10]))
- typs[15] = types.Types[types.TINT]
- typs[16] = newSig(params(typs[15], typs[15]), nil)
- typs[17] = types.Types[types.TUINT]
- typs[18] = newSig(params(typs[17], typs[15]), nil)
- typs[19] = newSig(params(typs[6]), nil)
- typs[20] = types.Types[types.TFLOAT64]
+ typs[12] = newSig(nil, params(typs[10]))
+ typs[13] = types.Types[types.TINT]
+ typs[14] = newSig(params(typs[13], typs[13]), nil)
+ typs[15] = types.Types[types.TUINT]
+ typs[16] = newSig(params(typs[15], typs[13]), nil)
+ typs[17] = newSig(params(typs[6]), nil)
+ typs[18] = types.Types[types.TFLOAT64]
+ typs[19] = newSig(params(typs[18]), nil)
+ typs[20] = types.Types[types.TINT64]
typs[21] = newSig(params(typs[20]), nil)
- typs[22] = types.Types[types.TINT64]
+ typs[22] = types.Types[types.TUINT64]
typs[23] = newSig(params(typs[22]), nil)
- typs[24] = types.Types[types.TUINT64]
+ typs[24] = types.Types[types.TCOMPLEX128]
typs[25] = newSig(params(typs[24]), nil)
- typs[26] = types.Types[types.TCOMPLEX128]
+ typs[26] = types.Types[types.TSTRING]
typs[27] = newSig(params(typs[26]), nil)
- typs[28] = types.Types[types.TSTRING]
- typs[29] = newSig(params(typs[28]), nil)
- typs[30] = newSig(params(typs[2]), nil)
- typs[31] = newSig(params(typs[5]), nil)
- typs[32] = types.NewArray(typs[0], 32)
- typs[33] = types.NewPtr(typs[32])
- typs[34] = newSig(params(typs[33], typs[28], typs[28]), params(typs[28]))
- typs[35] = newSig(params(typs[33], typs[28], typs[28], typs[28]), params(typs[28]))
- typs[36] = newSig(params(typs[33], typs[28], typs[28], typs[28], typs[28]), params(typs[28]))
- typs[37] = newSig(params(typs[33], typs[28], typs[28], typs[28], typs[28], typs[28]), params(typs[28]))
- typs[38] = types.NewSlice(typs[28])
- typs[39] = newSig(params(typs[33], typs[38]), params(typs[28]))
- typs[40] = types.NewSlice(typs[0])
- typs[41] = newSig(params(typs[33], typs[28], typs[28]), params(typs[40]))
- typs[42] = newSig(params(typs[33], typs[28], typs[28], typs[28]), params(typs[40]))
- typs[43] = newSig(params(typs[33], typs[28], typs[28], typs[28], typs[28]), params(typs[40]))
- typs[44] = newSig(params(typs[33], typs[28], typs[28], typs[28], typs[28], typs[28]), params(typs[40]))
- typs[45] = newSig(params(typs[33], typs[38]), params(typs[40]))
- typs[46] = newSig(params(typs[28], typs[28]), params(typs[15]))
- typs[47] = types.NewArray(typs[0], 4)
- typs[48] = types.NewPtr(typs[47])
- typs[49] = newSig(params(typs[48], typs[22]), params(typs[28]))
- typs[50] = newSig(params(typs[33], typs[1], typs[15]), params(typs[28]))
- typs[51] = newSig(params(typs[1], typs[15]), params(typs[28]))
- typs[52] = types.RuneType
- typs[53] = types.NewSlice(typs[52])
- typs[54] = newSig(params(typs[33], typs[53]), params(typs[28]))
- typs[55] = newSig(params(typs[33], typs[28]), params(typs[40]))
- typs[56] = types.NewArray(typs[52], 32)
- typs[57] = types.NewPtr(typs[56])
- typs[58] = newSig(params(typs[57], typs[28]), params(typs[53]))
- typs[59] = newSig(params(typs[3], typs[15], typs[3], typs[15], typs[5]), params(typs[15]))
- typs[60] = newSig(params(typs[28], typs[15]), params(typs[52], typs[15]))
- typs[61] = newSig(params(typs[28]), params(typs[15]))
- typs[62] = newSig(params(typs[1], typs[3]), params(typs[7]))
- typs[63] = types.Types[types.TUINT16]
+ typs[28] = newSig(params(typs[2]), nil)
+ typs[29] = newSig(params(typs[5]), nil)
+ typs[30] = types.NewArray(typs[0], 32)
+ typs[31] = types.NewPtr(typs[30])
+ typs[32] = newSig(params(typs[31], typs[26], typs[26]), params(typs[26]))
+ typs[33] = newSig(params(typs[31], typs[26], typs[26], typs[26]), params(typs[26]))
+ typs[34] = newSig(params(typs[31], typs[26], typs[26], typs[26], typs[26]), params(typs[26]))
+ typs[35] = newSig(params(typs[31], typs[26], typs[26], typs[26], typs[26], typs[26]), params(typs[26]))
+ typs[36] = types.NewSlice(typs[26])
+ typs[37] = newSig(params(typs[31], typs[36]), params(typs[26]))
+ typs[38] = types.NewSlice(typs[0])
+ typs[39] = newSig(params(typs[31], typs[26], typs[26]), params(typs[38]))
+ typs[40] = newSig(params(typs[31], typs[26], typs[26], typs[26]), params(typs[38]))
+ typs[41] = newSig(params(typs[31], typs[26], typs[26], typs[26], typs[26]), params(typs[38]))
+ typs[42] = newSig(params(typs[31], typs[26], typs[26], typs[26], typs[26], typs[26]), params(typs[38]))
+ typs[43] = newSig(params(typs[31], typs[36]), params(typs[38]))
+ typs[44] = newSig(params(typs[26], typs[26]), params(typs[13]))
+ typs[45] = types.NewArray(typs[0], 4)
+ typs[46] = types.NewPtr(typs[45])
+ typs[47] = newSig(params(typs[46], typs[20]), params(typs[26]))
+ typs[48] = newSig(params(typs[31], typs[1], typs[13]), params(typs[26]))
+ typs[49] = newSig(params(typs[1], typs[13]), params(typs[26]))
+ typs[50] = types.RuneType
+ typs[51] = types.NewSlice(typs[50])
+ typs[52] = newSig(params(typs[31], typs[51]), params(typs[26]))
+ typs[53] = newSig(params(typs[31], typs[26]), params(typs[38]))
+ typs[54] = types.NewArray(typs[50], 32)
+ typs[55] = types.NewPtr(typs[54])
+ typs[56] = newSig(params(typs[55], typs[26]), params(typs[51]))
+ typs[57] = newSig(params(typs[3], typs[13], typs[3], typs[13], typs[5]), params(typs[13]))
+ typs[58] = newSig(params(typs[26], typs[13]), params(typs[50], typs[13]))
+ typs[59] = newSig(params(typs[26]), params(typs[13]))
+ typs[60] = newSig(params(typs[1], typs[3]), params(typs[7]))
+ typs[61] = types.Types[types.TUINT16]
+ typs[62] = newSig(params(typs[61]), params(typs[7]))
+ typs[63] = types.Types[types.TUINT32]
typs[64] = newSig(params(typs[63]), params(typs[7]))
- typs[65] = types.Types[types.TUINT32]
- typs[66] = newSig(params(typs[65]), params(typs[7]))
- typs[67] = newSig(params(typs[24]), params(typs[7]))
- typs[68] = newSig(params(typs[28]), params(typs[7]))
- typs[69] = types.Types[types.TUINT8]
- typs[70] = types.NewSlice(typs[69])
- typs[71] = newSig(params(typs[70]), params(typs[7]))
- typs[72] = newSig(params(typs[1], typs[1]), params(typs[1]))
- typs[73] = newSig(params(typs[1], typs[1], typs[1]), nil)
- typs[74] = newSig(params(typs[1]), nil)
- typs[75] = newSig(params(typs[1], typs[1]), params(typs[15], typs[1]))
- typs[76] = types.NewPtr(typs[5])
- typs[77] = newSig(params(typs[76], typs[7], typs[7]), params(typs[6]))
- typs[78] = newSig(params(typs[15]), nil)
- typs[79] = newSig(nil, params(typs[10]))
- typs[80] = newSig(nil, params(typs[24]))
- typs[81] = newSig(nil, params(typs[65]))
- typs[82] = types.NewMap(typs[2], typs[2])
- typs[83] = newSig(params(typs[1], typs[22], typs[3]), params(typs[82]))
- typs[84] = newSig(params(typs[1], typs[15], typs[3]), params(typs[82]))
- typs[85] = newSig(nil, params(typs[82]))
- typs[86] = newSig(params(typs[1], typs[82], typs[3]), params(typs[3]))
- typs[87] = newSig(params(typs[1], typs[82], typs[65]), params(typs[3]))
- typs[88] = newSig(params(typs[1], typs[82], typs[24]), params(typs[3]))
- typs[89] = newSig(params(typs[1], typs[82], typs[28]), params(typs[3]))
- typs[90] = newSig(params(typs[1], typs[82], typs[3], typs[1]), params(typs[3]))
- typs[91] = newSig(params(typs[1], typs[82], typs[3]), params(typs[3], typs[6]))
- typs[92] = newSig(params(typs[1], typs[82], typs[65]), params(typs[3], typs[6]))
- typs[93] = newSig(params(typs[1], typs[82], typs[24]), params(typs[3], typs[6]))
- typs[94] = newSig(params(typs[1], typs[82], typs[28]), params(typs[3], typs[6]))
- typs[95] = newSig(params(typs[1], typs[82], typs[3], typs[1]), params(typs[3], typs[6]))
- typs[96] = newSig(params(typs[1], typs[82], typs[7]), params(typs[3]))
- typs[97] = newSig(params(typs[1], typs[82], typs[3]), nil)
- typs[98] = newSig(params(typs[1], typs[82], typs[65]), nil)
- typs[99] = newSig(params(typs[1], typs[82], typs[24]), nil)
- typs[100] = newSig(params(typs[1], typs[82], typs[28]), nil)
- typs[101] = newSig(params(typs[3]), nil)
- typs[102] = newSig(params(typs[1], typs[82]), nil)
- typs[103] = types.NewChan(typs[2], types.Cboth)
- typs[104] = newSig(params(typs[1], typs[22]), params(typs[103]))
- typs[105] = newSig(params(typs[1], typs[15]), params(typs[103]))
- typs[106] = types.NewChan(typs[2], types.Crecv)
+ typs[65] = newSig(params(typs[22]), params(typs[7]))
+ typs[66] = newSig(params(typs[26]), params(typs[7]))
+ typs[67] = types.Types[types.TUINT8]
+ typs[68] = types.NewSlice(typs[67])
+ typs[69] = newSig(params(typs[68]), params(typs[7]))
+ typs[70] = newSig(params(typs[1], typs[1]), params(typs[1]))
+ typs[71] = newSig(params(typs[1], typs[1], typs[1]), nil)
+ typs[72] = newSig(params(typs[1]), nil)
+ typs[73] = newSig(params(typs[1], typs[1]), params(typs[13], typs[1]))
+ typs[74] = types.NewPtr(typs[5])
+ typs[75] = newSig(params(typs[74], typs[7], typs[7]), params(typs[6]))
+ typs[76] = newSig(params(typs[13]), nil)
+ typs[77] = newSig(nil, params(typs[22]))
+ typs[78] = newSig(nil, params(typs[63]))
+ typs[79] = types.NewMap(typs[2], typs[2])
+ typs[80] = newSig(params(typs[1], typs[20], typs[3]), params(typs[79]))
+ typs[81] = newSig(params(typs[1], typs[13], typs[3]), params(typs[79]))
+ typs[82] = newSig(nil, params(typs[79]))
+ typs[83] = newSig(params(typs[1], typs[79], typs[3]), params(typs[3]))
+ typs[84] = newSig(params(typs[1], typs[79], typs[63]), params(typs[3]))
+ typs[85] = newSig(params(typs[1], typs[79], typs[22]), params(typs[3]))
+ typs[86] = newSig(params(typs[1], typs[79], typs[26]), params(typs[3]))
+ typs[87] = newSig(params(typs[1], typs[79], typs[3], typs[1]), params(typs[3]))
+ typs[88] = newSig(params(typs[1], typs[79], typs[3]), params(typs[3], typs[6]))
+ typs[89] = newSig(params(typs[1], typs[79], typs[63]), params(typs[3], typs[6]))
+ typs[90] = newSig(params(typs[1], typs[79], typs[22]), params(typs[3], typs[6]))
+ typs[91] = newSig(params(typs[1], typs[79], typs[26]), params(typs[3], typs[6]))
+ typs[92] = newSig(params(typs[1], typs[79], typs[3], typs[1]), params(typs[3], typs[6]))
+ typs[93] = newSig(params(typs[1], typs[79], typs[7]), params(typs[3]))
+ typs[94] = newSig(params(typs[1], typs[79], typs[3]), nil)
+ typs[95] = newSig(params(typs[1], typs[79], typs[63]), nil)
+ typs[96] = newSig(params(typs[1], typs[79], typs[22]), nil)
+ typs[97] = newSig(params(typs[1], typs[79], typs[26]), nil)
+ typs[98] = newSig(params(typs[3]), nil)
+ typs[99] = newSig(params(typs[1], typs[79]), nil)
+ typs[100] = types.NewChan(typs[2], types.Cboth)
+ typs[101] = newSig(params(typs[1], typs[20]), params(typs[100]))
+ typs[102] = newSig(params(typs[1], typs[13]), params(typs[100]))
+ typs[103] = types.NewChan(typs[2], types.Crecv)
+ typs[104] = newSig(params(typs[103], typs[3]), nil)
+ typs[105] = newSig(params(typs[103], typs[3]), params(typs[6]))
+ typs[106] = types.NewChan(typs[2], types.Csend)
typs[107] = newSig(params(typs[106], typs[3]), nil)
- typs[108] = newSig(params(typs[106], typs[3]), params(typs[6]))
- typs[109] = types.NewChan(typs[2], types.Csend)
- typs[110] = newSig(params(typs[109], typs[3]), nil)
- typs[111] = newSig(params(typs[109]), nil)
- typs[112] = newSig(params(typs[2]), params(typs[15]))
- typs[113] = types.NewArray(typs[0], 3)
- typs[114] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[113]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
- typs[115] = newSig(params(typs[1], typs[3], typs[3]), nil)
- typs[116] = newSig(params(typs[1], typs[3]), nil)
- typs[117] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15]))
- typs[118] = newSig(params(typs[109], typs[3]), params(typs[6]))
- typs[119] = newSig(params(typs[3], typs[106]), params(typs[6], typs[6]))
- typs[120] = newSig(params(typs[76]), nil)
- typs[121] = newSig(params(typs[1], typs[1], typs[76], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
- typs[122] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7]))
- typs[123] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7]))
- typs[124] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7]))
- typs[125] = types.NewSlice(typs[2])
- typs[126] = newSig(params(typs[3], typs[15], typs[15], typs[15], typs[1]), params(typs[125]))
- typs[127] = newSig(params(typs[1], typs[7], typs[22]), nil)
- typs[128] = newSig(params(typs[7], typs[22]), nil)
- typs[129] = newSig(params(typs[3], typs[3], typs[5]), nil)
- typs[130] = newSig(params(typs[7], typs[5]), nil)
- typs[131] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
- typs[132] = newSig(params(typs[3], typs[3]), params(typs[6]))
- typs[133] = newSig(params(typs[7], typs[7]), params(typs[6]))
- typs[134] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5]))
- typs[135] = newSig(params(typs[7], typs[5]), params(typs[5]))
- typs[136] = newSig(params(typs[3], typs[5]), params(typs[5]))
- typs[137] = newSig(params(typs[22], typs[22]), params(typs[22]))
- typs[138] = newSig(params(typs[24], typs[24]), params(typs[24]))
- typs[139] = newSig(params(typs[20]), params(typs[22]))
- typs[140] = newSig(params(typs[20]), params(typs[24]))
- typs[141] = newSig(params(typs[20]), params(typs[65]))
- typs[142] = newSig(params(typs[22]), params(typs[20]))
- typs[143] = types.Types[types.TFLOAT32]
- typs[144] = newSig(params(typs[22]), params(typs[143]))
- typs[145] = newSig(params(typs[24]), params(typs[20]))
- typs[146] = newSig(params(typs[24]), params(typs[143]))
- typs[147] = newSig(params(typs[65]), params(typs[20]))
- typs[148] = newSig(params(typs[26], typs[26]), params(typs[26]))
- typs[149] = newSig(params(typs[5], typs[5]), nil)
- typs[150] = newSig(params(typs[5], typs[5], typs[5]), nil)
- typs[151] = newSig(params(typs[7], typs[1], typs[5]), nil)
- typs[152] = types.NewSlice(typs[7])
- typs[153] = newSig(params(typs[7], typs[152]), nil)
- typs[154] = newSig(params(typs[69], typs[69], typs[17]), nil)
- typs[155] = newSig(params(typs[63], typs[63], typs[17]), nil)
- typs[156] = newSig(params(typs[65], typs[65], typs[17]), nil)
- typs[157] = newSig(params(typs[24], typs[24], typs[17]), nil)
- typs[158] = newSig(params(typs[28], typs[28], typs[17]), nil)
- typs[159] = types.NewArray(typs[0], 16)
- typs[160] = newSig(params(typs[7], typs[65], typs[159], typs[28], typs[15], typs[69], typs[69]), params(typs[65]))
+ typs[108] = newSig(params(typs[106]), nil)
+ typs[109] = newSig(params(typs[2]), params(typs[13]))
+ typs[110] = types.NewArray(typs[0], 3)
+ typs[111] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[110]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[22])})
+ typs[112] = newSig(params(typs[1], typs[3], typs[3]), nil)
+ typs[113] = newSig(params(typs[1], typs[3]), nil)
+ typs[114] = newSig(params(typs[1], typs[3], typs[13], typs[3], typs[13]), params(typs[13]))
+ typs[115] = newSig(params(typs[106], typs[3]), params(typs[6]))
+ typs[116] = newSig(params(typs[3], typs[103]), params(typs[6], typs[6]))
+ typs[117] = newSig(params(typs[74]), nil)
+ typs[118] = newSig(params(typs[1], typs[1], typs[74], typs[13], typs[13], typs[6]), params(typs[13], typs[6]))
+ typs[119] = newSig(params(typs[1], typs[13], typs[13]), params(typs[7]))
+ typs[120] = newSig(params(typs[1], typs[20], typs[20]), params(typs[7]))
+ typs[121] = newSig(params(typs[1], typs[13], typs[13], typs[7]), params(typs[7]))
+ typs[122] = types.NewSlice(typs[2])
+ typs[123] = newSig(params(typs[3], typs[13], typs[13], typs[13], typs[1]), params(typs[122]))
+ typs[124] = newSig(params(typs[1], typs[7], typs[20]), nil)
+ typs[125] = newSig(params(typs[7], typs[20]), nil)
+ typs[126] = newSig(params(typs[3], typs[3], typs[5]), nil)
+ typs[127] = newSig(params(typs[7], typs[5]), nil)
+ typs[128] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
+ typs[129] = newSig(params(typs[3], typs[3]), params(typs[6]))
+ typs[130] = newSig(params(typs[7], typs[7]), params(typs[6]))
+ typs[131] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5]))
+ typs[132] = newSig(params(typs[7], typs[5]), params(typs[5]))
+ typs[133] = newSig(params(typs[3], typs[5]), params(typs[5]))
+ typs[134] = newSig(params(typs[20], typs[20]), params(typs[20]))
+ typs[135] = newSig(params(typs[22], typs[22]), params(typs[22]))
+ typs[136] = newSig(params(typs[18]), params(typs[20]))
+ typs[137] = newSig(params(typs[18]), params(typs[22]))
+ typs[138] = newSig(params(typs[18]), params(typs[63]))
+ typs[139] = newSig(params(typs[20]), params(typs[18]))
+ typs[140] = types.Types[types.TFLOAT32]
+ typs[141] = newSig(params(typs[20]), params(typs[140]))
+ typs[142] = newSig(params(typs[22]), params(typs[18]))
+ typs[143] = newSig(params(typs[22]), params(typs[140]))
+ typs[144] = newSig(params(typs[63]), params(typs[18]))
+ typs[145] = newSig(params(typs[24], typs[24]), params(typs[24]))
+ typs[146] = newSig(params(typs[5], typs[5]), nil)
+ typs[147] = newSig(params(typs[5], typs[5], typs[5]), nil)
+ typs[148] = newSig(params(typs[7], typs[1], typs[5]), nil)
+ typs[149] = types.NewSlice(typs[7])
+ typs[150] = newSig(params(typs[7], typs[149]), nil)
+ typs[151] = newSig(params(typs[67], typs[67], typs[15]), nil)
+ typs[152] = newSig(params(typs[61], typs[61], typs[15]), nil)
+ typs[153] = newSig(params(typs[63], typs[63], typs[15]), nil)
+ typs[154] = newSig(params(typs[22], typs[22], typs[15]), nil)
+ typs[155] = newSig(params(typs[26], typs[26], typs[15]), nil)
+ typs[156] = types.NewArray(typs[0], 16)
+ typs[157] = newSig(params(typs[7], typs[63], typs[156], typs[26], typs[13], typs[67], typs[67]), params(typs[63]))
return typs[:]
}
diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go
index fc6e799e74..6a664156af 100644
--- a/src/cmd/compile/internal/typecheck/const.go
+++ b/src/cmd/compile/internal/typecheck/const.go
@@ -455,7 +455,6 @@ func callOrChan(n ir.Node) bool {
ir.OPRINTLN,
ir.OREAL,
ir.ORECOVER,
- ir.ORECOVERFP,
ir.ORECV,
ir.OUNSAFEADD,
ir.OUNSAFESLICE,
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
index 02e59fa360..5765731546 100644
--- a/src/cmd/compile/internal/typecheck/func.go
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -758,17 +758,7 @@ func tcRecover(n *ir.CallExpr) ir.Node {
return n
}
- // FP is equal to caller's SP plus FixedFrameSize.
- var fp ir.Node = ir.NewCallExpr(n.Pos(), ir.OGETCALLERSP, nil, nil)
- if off := base.Ctxt.Arch.FixedFrameSize; off != 0 {
- fp = ir.NewBinaryExpr(n.Pos(), ir.OADD, fp, ir.NewInt(base.Pos, off))
- }
- // TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
- fp = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
-
- n.SetOp(ir.ORECOVERFP)
n.SetType(types.Types[types.TINTER])
- n.Args = []ir.Node{Expr(fp)}
return n
}
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
index 8d792485d8..ac49f251bb 100644
--- a/src/cmd/compile/internal/typecheck/stmt.go
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -319,7 +319,7 @@ func normalizeGoDeferCall(pos src.XPos, op ir.Op, call ir.Node, init *ir.Nodes)
argps = append(argps, &call.Fun.(*ir.SelectorExpr).X) // must be first for OCHECKNIL; see below
visitList(call.Args)
- case ir.OAPPEND, ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+ case ir.OAPPEND, ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVER:
call := call.(*ir.CallExpr)
visitList(call.Args)
visit(&call.RType)
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
index 139defafe2..67e2e99f02 100644
--- a/src/cmd/compile/internal/types/fmt.go
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -471,12 +471,10 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
case TSTRUCT:
if m := t.StructType().Map; m != nil {
mt := m.MapType()
- // Format the bucket struct for map[x]y as map.bucket[x]y.
+ // Format the bucket struct for map[x]y as map.group[x]y.
// This avoids a recursive print that generates very long names.
switch t {
- case mt.OldBucket:
- b.WriteString("map.bucket[")
- case mt.SwissGroup:
+ case mt.Group:
b.WriteString("map.group[")
default:
base.Fatalf("unknown internal map type")
diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go
index 3b2aeece3e..ba033ec499 100644
--- a/src/cmd/compile/internal/types/sizeof_test.go
+++ b/src/cmd/compile/internal/types/sizeof_test.go
@@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) {
}{
{Sym{}, 32, 64},
{Type{}, 60, 96},
- {Map{}, 16, 32},
+ {Map{}, 12, 24},
{Forward{}, 20, 32},
{Func{}, 32, 56},
{Struct{}, 12, 24},
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index f7b9b0f3f7..8c2f7a3b5d 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -10,7 +10,6 @@ import (
"cmd/internal/src"
"fmt"
"go/constant"
- "internal/buildcfg"
"internal/types/errors"
"sync"
)
@@ -282,17 +281,7 @@ type Map struct {
Key *Type // Key type
Elem *Type // Val (elem) type
- // Note: It would be cleaner to completely split Map into OldMap and
- // SwissMap, but 99% of the types map code doesn't care about the
- // implementation at all, so it is tons of churn to split the type.
- // Only code that looks at the bucket field can care about the
- // implementation.
-
- // GOEXPERIMENT=noswissmap fields
- OldBucket *Type // internal struct type representing a hash bucket
-
- // GOEXPERIMENT=swissmap fields
- SwissGroup *Type // internal struct type representing a slot group
+ Group *Type // internal struct type representing a slot group
}
// MapType returns t's extra map-specific fields.
@@ -1193,37 +1182,20 @@ func (t *Type) cmp(x *Type) Cmp {
// by the general code after the switch.
case TSTRUCT:
- if buildcfg.Experiment.SwissMap {
- // Is this a map group type?
- if t.StructType().Map == nil {
- if x.StructType().Map != nil {
- return CMPlt // nil < non-nil
- }
- // to the fallthrough
- } else if x.StructType().Map == nil {
- return CMPgt // nil > non-nil
- }
- // Both have non-nil Map, fallthrough to the general
- // case. Note that the map type does not directly refer
- // to the group type (it uses unsafe.Pointer). If it
- // did, this would need special handling to avoid
- // infinite recursion.
- } else {
- // Is this a map bucket type?
- if t.StructType().Map == nil {
- if x.StructType().Map != nil {
- return CMPlt // nil < non-nil
- }
- // to the fallthrough
- } else if x.StructType().Map == nil {
- return CMPgt // nil > non-nil
+ // Is this a map group type?
+ if t.StructType().Map == nil {
+ if x.StructType().Map != nil {
+ return CMPlt // nil < non-nil
}
- // Both have non-nil Map, fallthrough to the general
- // case. Note that the map type does not directly refer
- // to the bucket type (it uses unsafe.Pointer). If it
- // did, this would need special handling to avoid
- // infinite recursion.
+ // to the general case
+ } else if x.StructType().Map == nil {
+ return CMPgt // nil > non-nil
}
+ // Both have non-nil Map, fallthrough to the general
+ // case. Note that the map type does not directly refer
+ // to the group type (it uses unsafe.Pointer). If it
+ // did, this would need special handling to avoid
+ // infinite recursion.
tfs := t.Fields()
xfs := x.Fields()
diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go
index 6a6b96a6e3..90dda18cc8 100644
--- a/src/cmd/compile/internal/types2/alias.go
+++ b/src/cmd/compile/internal/types2/alias.go
@@ -6,7 +6,6 @@ package types2
import (
"cmd/compile/internal/syntax"
- "fmt"
)
// An Alias represents an alias type.
@@ -50,7 +49,7 @@ type Alias struct {
}
// NewAlias creates a new Alias type with the given type name and rhs.
-// rhs must not be nil.
+// If rhs is nil, the alias is incomplete.
func NewAlias(obj *TypeName, rhs Type) *Alias {
alias := (*Checker)(nil).newAlias(obj, rhs)
// Ensure that alias.actual is set (#65455).
@@ -98,6 +97,7 @@ func (a *Alias) Rhs() Type { return a.fromRHS }
// otherwise it follows t's alias chain until it
// reaches a non-alias type which is then returned.
// Consequently, the result is never an alias type.
+// Returns nil if the alias is incomplete.
func Unalias(t Type) Type {
if a0, _ := t.(*Alias); a0 != nil {
return unalias(a0)
@@ -113,19 +113,10 @@ func unalias(a0 *Alias) Type {
for a := a0; a != nil; a, _ = t.(*Alias) {
t = a.fromRHS
}
- if t == nil {
- panic(fmt.Sprintf("non-terminated alias %s", a0.obj.name))
- }
-
- // Memoize the type only if valid.
- // In the presence of unfinished cyclic declarations, Unalias
- // would otherwise latch the invalid value (#66704).
- // TODO(adonovan): rethink, along with checker.typeDecl's use
- // of Invalid to mark unfinished aliases.
- if t != Typ[Invalid] {
- a0.actual = t
- }
+ // It's fine to memoize nil types since it's the zero value for actual.
+ // It accomplishes nothing.
+ a0.actual = t
return t
}
@@ -137,9 +128,8 @@ func asNamed(t Type) *Named {
}
// newAlias creates a new Alias type with the given type name and rhs.
-// rhs must not be nil.
+// If rhs is nil, the alias is incomplete.
func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias {
- assert(rhs != nil)
a := new(Alias)
a.obj = obj
a.orig = a
@@ -172,12 +162,6 @@ func (check *Checker) newAliasInstance(pos syntax.Pos, orig *Alias, targs []Type
func (a *Alias) cleanup() {
// Ensure a.actual is set before types are published,
- // so Unalias is a pure "getter", not a "setter".
- actual := Unalias(a)
-
- if actual == Typ[Invalid] {
- // We don't set a.actual to Typ[Invalid] during type checking,
- // as it may indicate that the RHS is not fully set up.
- a.actual = actual
- }
+ // so unalias is a pure "getter", not a "setter".
+ unalias(a)
}
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
index fe46b4e997..4bb2135755 100644
--- a/src/cmd/compile/internal/types2/builtins.go
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -91,22 +91,25 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// to type []byte with a second argument of string type followed by ... .
// This form appends the bytes of the string."
- // get special case out of the way
+ // Handle append(bytes, y...) special case, where
+ // the type set of y is {string} or {string, []byte}.
var sig *Signature
if nargs == 2 && hasDots(call) {
if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok {
y := args[1]
+ hasString := false
typeset(y.typ, func(_, u Type) bool {
if s, _ := u.(*Slice); s != nil && Identical(s.elem, universeByte) {
return true
}
if isString(u) {
+ hasString = true
return true
}
y = nil
return false
})
- if y != nil {
+ if y != nil && hasString {
// setting the signature also signals that we're done
sig = makeSig(x.typ, x.typ, y.typ)
sig.variadic = true
diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go
index 35bbcec5c5..8b97a9f676 100644
--- a/src/cmd/compile/internal/types2/check_test.go
+++ b/src/cmd/compile/internal/types2/check_test.go
@@ -399,12 +399,6 @@ func TestIssue47243_TypedRHS(t *testing.T) {
}
func TestCheck(t *testing.T) {
- old := buildcfg.Experiment.RangeFunc
- defer func() {
- buildcfg.Experiment.RangeFunc = old
- }()
- buildcfg.Experiment.RangeFunc = true
-
DefPredeclaredTestFuncs()
testDirFiles(t, "../../../../internal/types/testdata/check", 50, false) // TODO(gri) narrow column tolerance
}
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index bedcc4c015..64047aa84f 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -320,11 +320,15 @@ func (check *Checker) cycleError(cycle []Object, start int) {
// If obj is a type alias, mark it as valid (not broken) in order to avoid follow-on errors.
obj := cycle[start]
tname, _ := obj.(*TypeName)
- if tname != nil && tname.IsAlias() {
- // If we use Alias nodes, it is initialized with Typ[Invalid].
- // TODO(gri) Adjust this code if we initialize with nil.
- if !check.conf.EnableAlias {
- check.validAlias(tname, Typ[Invalid])
+ if tname != nil {
+ if check.conf.EnableAlias {
+ if a, ok := tname.Type().(*Alias); ok {
+ a.fromRHS = Typ[Invalid]
+ }
+ } else {
+ if tname.IsAlias() {
+ check.validAlias(tname, Typ[Invalid])
+ }
}
}
@@ -507,17 +511,18 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN
}
if check.conf.EnableAlias {
- // TODO(gri) Should be able to use nil instead of Typ[Invalid] to mark
- // the alias as incomplete. Currently this causes problems
- // with certain cycles. Investigate.
- //
- // NOTE(adonovan): to avoid the Invalid being prematurely observed
- // by (e.g.) a var whose type is an unfinished cycle,
- // Unalias does not memoize if Invalid. Perhaps we should use a
- // special sentinel distinct from Invalid.
- alias := check.newAlias(obj, Typ[Invalid])
+ alias := check.newAlias(obj, nil)
setDefType(def, alias)
+ // If we could not type the RHS, set it to invalid. This should
+ // only ever happen if we panic before setting.
+ defer func() {
+ if alias.fromRHS == nil {
+ alias.fromRHS = Typ[Invalid]
+ unalias(alias)
+ }
+ }()
+
// handle type parameters even if not allowed (Alias type is supported)
if tparam0 != nil {
if !versionErr && !buildcfg.Experiment.AliasTypeParams {
@@ -531,8 +536,9 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN
rhs = check.definedType(tdecl.Type, obj)
assert(rhs != nil)
+
alias.fromRHS = rhs
- Unalias(alias) // resolve alias.actual
+ unalias(alias) // resolve alias.actual
} else {
if !versionErr && tparam0 != nil {
check.error(tdecl, UnsupportedFeature, "generic type alias requires GODEBUG=gotypesalias=1 or unset")
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
index eaa55e20c9..e5f9a1c6f7 100644
--- a/src/cmd/compile/internal/types2/expr.go
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -895,6 +895,10 @@ func (check *Checker) matchTypes(x, y *operand) {
if isTyped(x.typ) && isTyped(y.typ) {
return false
}
+ // A numeric type can only convert to another numeric type.
+ if allNumeric(x.typ) != allNumeric(y.typ) {
+ return false
+ }
// An untyped operand may convert to its default type when paired with an empty interface
// TODO(gri) This should only matter for comparisons (the only binary operation that is
// valid with interfaces), but in that case the assignability check should take
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
index a9a27c9320..dbb1fa0b3e 100644
--- a/src/cmd/compile/internal/types2/named.go
+++ b/src/cmd/compile/internal/types2/named.go
@@ -127,8 +127,8 @@ type Named struct {
// accessed.
methods []*Func
- // loader may be provided to lazily load type parameters, underlying type, and methods.
- loader func(*Named) (tparams []*TypeParam, underlying Type, methods []*Func)
+ // loader may be provided to lazily load type parameters, underlying type, methods, and delayed functions
+ loader func(*Named) ([]*TypeParam, Type, []*Func, []func())
}
// instance holds information that is only necessary for instantiated named
@@ -143,9 +143,11 @@ type instance struct {
// namedState represents the possible states that a named type may assume.
type namedState uint32
+// Note: the order of states is relevant
const (
unresolved namedState = iota // tparams, underlying type and methods might be unavailable
- resolved // resolve has run; methods might be incomplete (for instances)
+ resolved // resolve has run; methods might be unexpanded (for instances)
+ loaded // loader has run; constraints might be unexpanded (for generic types)
complete // all data is known
)
@@ -167,7 +169,7 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
// accessible; but if n is an instantiated type, its methods may still be
// unexpanded.
func (n *Named) resolve() *Named {
- if n.state() >= resolved { // avoid locking below
+ if n.state() > unresolved { // avoid locking below
return n
}
@@ -176,7 +178,7 @@ func (n *Named) resolve() *Named {
n.mu.Lock()
defer n.mu.Unlock()
- if n.state() >= resolved {
+ if n.state() > unresolved {
return n
}
@@ -212,13 +214,20 @@ func (n *Named) resolve() *Named {
assert(n.underlying == nil)
assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil
- tparams, underlying, methods := n.loader(n)
+ tparams, underlying, methods, delayed := n.loader(n)
+ n.loader = nil
n.tparams = bindTParams(tparams)
n.underlying = underlying
n.fromRHS = underlying // for cycle detection
n.methods = methods
- n.loader = nil
+
+ // advance state to avoid deadlock calling delayed functions
+ n.setState(loaded)
+
+ for _, f := range delayed {
+ f()
+ }
}
n.setState(complete)
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
index 26752c44b0..7096c55697 100644
--- a/src/cmd/compile/internal/types2/object.go
+++ b/src/cmd/compile/internal/types2/object.go
@@ -293,7 +293,7 @@ func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName
// NewTypeNameLazy returns a new defined type like NewTypeName, but it
// lazily calls resolve to finish constructing the Named object.
-func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName {
+func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(*Named) ([]*TypeParam, Type, []*Func, []func())) *TypeName {
obj := NewTypeName(pos, pkg, name, nil)
NewNamed(obj, nil, nil).loader = load
return obj
diff --git a/src/cmd/compile/internal/types2/range.go b/src/cmd/compile/internal/types2/range.go
index dc0d81d05b..b654601eaf 100644
--- a/src/cmd/compile/internal/types2/range.go
+++ b/src/cmd/compile/internal/types2/range.go
@@ -9,7 +9,6 @@ package types2
import (
"cmd/compile/internal/syntax"
"go/constant"
- "internal/buildcfg"
. "internal/types/errors"
)
@@ -237,7 +236,7 @@ func rangeKeyVal(check *Checker, orig Type, allowVersion func(goVersion) bool) (
assert(typ.dir != SendOnly)
return typ.elem, nil, "", true
case *Signature:
- if !buildcfg.Experiment.RangeFunc && allowVersion != nil && !allowVersion(go1_23) {
+ if allowVersion != nil && !allowVersion(go1_23) {
return bad("requires go1.23 or later")
}
// check iterator arity
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
index 84e7436103..974eb06886 100644
--- a/src/cmd/compile/internal/walk/builtin.go
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -9,7 +9,6 @@ import (
"go/constant"
"go/token"
"internal/abi"
- "internal/buildcfg"
"strings"
"cmd/compile/internal/base"
@@ -313,15 +312,8 @@ func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// walkMakeMap walks an OMAKEMAP node.
func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
- if buildcfg.Experiment.SwissMap {
- return walkMakeSwissMap(n, init)
- }
- return walkMakeOldMap(n, init)
-}
-
-func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
t := n.Type()
- mapType := reflectdata.SwissMapType()
+ mapType := reflectdata.MapType()
hint := n.Len
// var m *Map
@@ -334,28 +326,28 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
m = stackTempAddr(init, mapType)
// Allocate one group pointed to by m.dirPtr on stack if hint
- // is not larger than SwissMapGroupSlots. In case hint is
+ // is not larger than MapGroupSlots. In case hint is
// larger, runtime.makemap will allocate on the heap.
// Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
if !ir.IsConst(hint, constant.Int) ||
- constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapGroupSlots)) {
+ constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.MapGroupSlots)) {
- // In case hint is larger than SwissMapGroupSlots
+ // In case hint is larger than MapGroupSlots
// runtime.makemap will allocate on the heap, see
// #20184
//
- // if hint <= abi.SwissMapGroupSlots {
+ // if hint <= abi.MapGroupSlots {
// var gv group
// g = &gv
- // g.ctrl = abi.SwissMapCtrlEmpty
+ // g.ctrl = abi.MapCtrlEmpty
// m.dirPtr = g
// }
- nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.SwissMapGroupSlots)), nil, nil)
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.MapGroupSlots)), nil, nil)
nif.Likely = true
- groupType := reflectdata.SwissMapGroupType(t)
+ groupType := reflectdata.MapGroupType(t)
// var gv group
// g = &gv
@@ -363,27 +355,27 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// Can't use ir.NewInt because bit 63 is set, which
// makes conversion to uint64 upset.
- empty := ir.NewBasicLit(base.Pos, types.UntypedInt, constant.MakeUint64(abi.SwissMapCtrlEmpty))
+ empty := ir.NewBasicLit(base.Pos, types.UntypedInt, constant.MakeUint64(abi.MapCtrlEmpty))
- // g.ctrl = abi.SwissMapCtrlEmpty
- csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map_swiss.go
+ // g.ctrl = abi.MapCtrlEmpty
+ csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map.go
ca := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, g, csym), empty)
nif.Body.Append(ca)
// m.dirPtr = g
- dsym := mapType.Field(2).Sym // m.dirPtr see reflectdata/map_swiss.go
+ dsym := mapType.Field(2).Sym // m.dirPtr see reflectdata/map.go
na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, m, dsym), typecheck.ConvNop(g, types.Types[types.TUNSAFEPTR]))
nif.Body.Append(na)
appendWalkStmt(init, nif)
}
}
- if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapGroupSlots)) {
+ if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.MapGroupSlots)) {
// Handling make(map[any]any) and
- // make(map[any]any, hint) where hint <= abi.SwissMapGroupSlots
+ // make(map[any]any, hint) where hint <= abi.MapGroupSlots
// specially allows for faster map initialization and
// improves binary size by using calls with fewer arguments.
- // For hint <= abi.SwissMapGroupSlots no groups will be
+ // For hint <= abi.MapGroupSlots no groups will be
// allocated by makemap. Therefore, no groups need to be
// allocated in this code path.
if n.Esc() == ir.EscNone {
@@ -391,7 +383,7 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
// m map has been allocated on the stack already.
// m.seed = uintptr(rand())
rand := mkcall("rand", types.Types[types.TUINT64], init)
- seedSym := mapType.Field(1).Sym // m.seed see reflectdata/map_swiss.go
+ seedSym := mapType.Field(1).Sym // m.seed see reflectdata/map.go
appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, m, seedSym), typecheck.Conv(rand, types.Types[types.TUINTPTR])))
return typecheck.ConvNop(m, t)
}
@@ -428,101 +420,6 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), m)
}
-func walkMakeOldMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
- t := n.Type()
- hmapType := reflectdata.OldMapType()
- hint := n.Len
-
- // var h *hmap
- var h ir.Node
- if n.Esc() == ir.EscNone {
- // Allocate hmap on stack.
-
- // var hv hmap
- // h = &hv
- h = stackTempAddr(init, hmapType)
-
- // Allocate one bucket pointed to by hmap.buckets on stack if hint
- // is not larger than BUCKETSIZE. In case hint is larger than
- // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
- // Maximum key and elem size is 128 bytes, larger objects
- // are stored with an indirection. So max bucket size is 2048+eps.
- if !ir.IsConst(hint, constant.Int) ||
- constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) {
-
- // In case hint is larger than BUCKETSIZE runtime.makemap
- // will allocate the buckets on the heap, see #20184
- //
- // if hint <= BUCKETSIZE {
- // var bv bmap
- // b = &bv
- // h.buckets = b
- // }
-
- nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.OldMapBucketCount)), nil, nil)
- nif.Likely = true
-
- // var bv bmap
- // b = &bv
- b := stackTempAddr(&nif.Body, reflectdata.OldMapBucketType(t))
-
- // h.buckets = b
- bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
- na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR]))
- nif.Body.Append(na)
- appendWalkStmt(init, nif)
- }
- }
-
- if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) {
- // Handling make(map[any]any) and
- // make(map[any]any, hint) where hint <= BUCKETSIZE
- // special allows for faster map initialization and
- // improves binary size by using calls with fewer arguments.
- // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
- // and no buckets will be allocated by makemap. Therefore,
- // no buckets need to be allocated in this code path.
- if n.Esc() == ir.EscNone {
- // Only need to initialize h.hash0 since
- // hmap h has been allocated on the stack already.
- // h.hash0 = rand32()
- rand := mkcall("rand32", types.Types[types.TUINT32], init)
- hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
- appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
- return typecheck.ConvNop(h, t)
- }
- // Call runtime.makemap_small to allocate an
- // hmap on the heap and initialize hmap's hash0 field.
- fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
- return mkcall1(fn, n.Type(), init)
- }
-
- if n.Esc() != ir.EscNone {
- h = typecheck.NodNil()
- }
- // Map initialization with a variable or large hint is
- // more complicated. We therefore generate a call to
- // runtime.makemap to initialize hmap and allocate the
- // map buckets.
-
- // When hint fits into int, use makemap instead of
- // makemap64, which is faster and shorter on 32 bit platforms.
- fnname := "makemap64"
- argtype := types.Types[types.TINT64]
-
- // Type checking guarantees that TIDEAL hint is positive and fits in an int.
- // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
- // The case of hint overflow when converting TUINT or TUINTPTR to TINT
- // will be handled by the negative range checks in makemap during runtime.
- if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
- fnname = "makemap"
- argtype = types.Types[types.TINT]
- }
-
- fn := typecheck.LookupRuntime(fnname, hmapType, t.Key(), t.Elem())
- return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h)
-}
-
// walkMakeSlice walks an OMAKESLICE node.
func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
len := n.Len
@@ -860,9 +757,9 @@ func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
return walkStmt(typecheck.Stmt(r))
}
-// walkRecoverFP walks an ORECOVERFP node.
-func walkRecoverFP(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
- return mkcall("gorecover", nn.Type(), init, walkExpr(nn.Args[0], init))
+// walkRecover walks an ORECOVER node.
+func walkRecover(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
+ return mkcall("gorecover", nn.Type(), init)
}
// walkUnsafeData walks an OUNSAFESLICEDATA or OUNSAFESTRINGDATA expression.
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
index 6775bc4fc8..fbfc56a39c 100644
--- a/src/cmd/compile/internal/walk/expr.go
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -182,8 +182,8 @@ func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
n := n.(*ir.UnaryExpr)
return mkcall("gopanic", nil, init, n.X)
- case ir.ORECOVERFP:
- return walkRecoverFP(n.(*ir.CallExpr), init)
+ case ir.ORECOVER:
+ return walkRecover(n.(*ir.CallExpr), init)
case ir.OCFUNC:
return n
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
index cb022faddf..4b0e36da85 100644
--- a/src/cmd/compile/internal/walk/order.go
+++ b/src/cmd/compile/internal/walk/order.go
@@ -8,7 +8,6 @@ import (
"fmt"
"go/constant"
"internal/abi"
- "internal/buildcfg"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@@ -846,7 +845,7 @@ func (o *orderState) stmt(n ir.Node) {
o.out = append(o.out, n)
o.popTemp(t)
- case ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+ case ir.OPRINT, ir.OPRINTLN, ir.ORECOVER:
n := n.(*ir.CallExpr)
t := o.markTemp()
o.call(n)
@@ -968,11 +967,7 @@ func (o *orderState) stmt(n ir.Node) {
// n.Prealloc is the temp for the iterator.
// MapIterType contains pointers and needs to be zeroed.
- if buildcfg.Experiment.SwissMap {
- n.Prealloc = o.newTemp(reflectdata.SwissMapIterType(), true)
- } else {
- n.Prealloc = o.newTemp(reflectdata.OldMapIterType(), true)
- }
+ n.Prealloc = o.newTemp(reflectdata.MapIterType(), true)
}
n.Key = o.exprInPlace(n.Key)
n.Value = o.exprInPlace(n.Value)
@@ -1355,7 +1350,7 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
ir.OMIN,
ir.ONEW,
ir.OREAL,
- ir.ORECOVERFP,
+ ir.ORECOVER,
ir.OSTR2BYTES,
ir.OSTR2BYTESTMP,
ir.OSTR2RUNES:
diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go
index 3d3547b84b..139343a8ed 100644
--- a/src/cmd/compile/internal/walk/range.go
+++ b/src/cmd/compile/internal/walk/range.go
@@ -6,7 +6,6 @@ package walk
import (
"go/constant"
- "internal/buildcfg"
"unicode/utf8"
"cmd/compile/internal/base"
@@ -247,20 +246,11 @@ func walkRange(nrange *ir.RangeStmt) ir.Node {
hit := nrange.Prealloc
th := hit.Type()
// depends on layout of iterator struct.
- // See cmd/compile/internal/reflectdata/reflect.go:MapIterType
- var keysym, elemsym *types.Sym
- var iterInit, iterNext string
- if buildcfg.Experiment.SwissMap {
- keysym = th.Field(0).Sym
- elemsym = th.Field(1).Sym // ditto
- iterInit = "mapIterStart"
- iterNext = "mapIterNext"
- } else {
- keysym = th.Field(0).Sym
- elemsym = th.Field(1).Sym // ditto
- iterInit = "mapiterinit"
- iterNext = "mapiternext"
- }
+ // See cmd/compile/internal/reflectdata/map.go:MapIterType
+ keysym := th.Field(0).Sym
+ elemsym := th.Field(1).Sym // ditto
+ iterInit := "mapIterStart"
+ iterNext := "mapIterNext"
fn := typecheck.LookupRuntime(iterInit, t.Key(), t.Elem(), th)
init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))
diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go
index b2a226e078..2c01fd10f1 100644
--- a/src/cmd/compile/internal/walk/stmt.go
+++ b/src/cmd/compile/internal/walk/stmt.go
@@ -50,7 +50,7 @@ func walkStmt(n ir.Node) ir.Node {
ir.OPRINT,
ir.OPRINTLN,
ir.OPANIC,
- ir.ORECOVERFP,
+ ir.ORECOVER,
ir.OGETG:
if n.Typecheck() == 0 {
base.Fatalf("missing typecheck: %+v", n)
diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go
index 8b4381980d..25add3d804 100644
--- a/src/cmd/compile/internal/walk/walk.go
+++ b/src/cmd/compile/internal/walk/walk.go
@@ -7,7 +7,6 @@ package walk
import (
"fmt"
"internal/abi"
- "internal/buildcfg"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
@@ -192,42 +191,7 @@ var mapassign = mkmapnames("mapassign", "ptr")
var mapdelete = mkmapnames("mapdelete", "")
func mapfast(t *types.Type) int {
- if buildcfg.Experiment.SwissMap {
- return mapfastSwiss(t)
- }
- return mapfastOld(t)
-}
-
-func mapfastSwiss(t *types.Type) int {
- if t.Elem().Size() > abi.OldMapMaxElemBytes {
- return mapslow
- }
- switch reflectdata.AlgType(t.Key()) {
- case types.AMEM32:
- if !t.Key().HasPointers() {
- return mapfast32
- }
- if types.PtrSize == 4 {
- return mapfast32ptr
- }
- base.Fatalf("small pointer %v", t.Key())
- case types.AMEM64:
- if !t.Key().HasPointers() {
- return mapfast64
- }
- if types.PtrSize == 8 {
- return mapfast64ptr
- }
- // Two-word object, at least one of which is a pointer.
- // Use the slow path.
- case types.ASTRING:
- return mapfaststr
- }
- return mapslow
-}
-
-func mapfastOld(t *types.Type) int {
- if t.Elem().Size() > abi.OldMapMaxElemBytes {
+ if t.Elem().Size() > abi.MapMaxElemBytes {
return mapslow
}
switch reflectdata.AlgType(t.Key()) {
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
index 347c5cb560..3347b38b28 100644
--- a/src/cmd/compile/internal/x86/ssa.go
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -16,6 +16,7 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
+ "internal/abi"
)
// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
@@ -740,19 +741,165 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.Op386LoweredPanicBoundsA, ssa.Op386LoweredPanicBoundsB, ssa.Op386LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
- p.To.Type = obj.TYPE_MEM
- p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(8) // space used in callee args area by assembly stubs
+ case ssa.Op386LoweredPanicBoundsRR, ssa.Op386LoweredPanicBoundsRC, ssa.Op386LoweredPanicBoundsCR, ssa.Op386LoweredPanicBoundsCC,
+ ssa.Op386LoweredPanicExtendRR, ssa.Op386LoweredPanicExtendRC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ extend := false
+ switch v.Op {
+ case ssa.Op386LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - x86.REG_AX)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - x86.REG_AX)
+ case ssa.Op386LoweredPanicExtendRR:
+ extend = true
+ xIsReg = true
+ hi := int(v.Args[0].Reg() - x86.REG_AX)
+ lo := int(v.Args[1].Reg() - x86.REG_AX)
+ xVal = hi<<2 + lo // encode 2 register numbers
+ yIsReg = true
+ yVal = int(v.Args[2].Reg() - x86.REG_AX)
+ case ssa.Op386LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - x86.REG_AX)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(yVal)
+ }
+ case ssa.Op386LoweredPanicExtendRC:
+ extend = true
+ xIsReg = true
+ hi := int(v.Args[0].Reg() - x86.REG_AX)
+ lo := int(v.Args[1].Reg() - x86.REG_AX)
+ xVal = hi<<2 + lo // encode 2 register numbers
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ for yVal == hi || yVal == lo {
+ yVal++
+ }
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(yVal)
+ }
+ case ssa.Op386LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - x86.REG_AX)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else if signed && int64(int32(c)) == c || !signed && int64(uint32(c)) == c {
+ // Move constant to a register
+ xIsReg = true
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(xVal)
+ } else {
+ // Move constant to two registers
+ extend = true
+ xIsReg = true
+ hi := 0
+ lo := 1
+ if hi == yVal {
+ hi = 2
+ }
+ if lo == yVal {
+ lo = 2
+ }
+ xVal = hi<<2 + lo
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c >> 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(hi)
+ p = s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(int32(c))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(lo)
+ }
+ case ssa.Op386LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else if signed && int64(int32(c)) == c || !signed && int64(uint32(c)) == c {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(xVal)
+ } else {
+ // Move constant to two registers
+ extend = true
+ xIsReg = true
+ hi := 0
+ lo := 1
+ xVal = hi<<2 + lo
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c >> 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(hi)
+ p = s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(int32(c))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(lo)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 2
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
- case ssa.Op386LoweredPanicExtendA, ssa.Op386LoweredPanicExtendB, ssa.Op386LoweredPanicExtendC:
- p := s.Prog(obj.ACALL)
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
- s.UseArgs(12) // space used in callee args area by assembly stubs
+ if extend {
+ p.To.Sym = ir.Syms.PanicExtend
+ } else {
+ p.To.Sym = ir.Syms.PanicBounds
+ }
case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
s.Call(v)
diff --git a/src/cmd/doc/doc.go b/src/cmd/doc/doc.go
deleted file mode 100644
index ac15ad9c7d..0000000000
--- a/src/cmd/doc/doc.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Doc (usually run as go doc) accepts zero, one or two arguments.
-//
-// Zero arguments:
-//
-// go doc
-//
-// Show the documentation for the package in the current directory.
-//
-// One argument:
-//
-// go doc <pkg>
-// go doc <sym>[.<methodOrField>]
-// go doc [<pkg>.]<sym>[.<methodOrField>]
-// go doc [<pkg>.][<sym>.]<methodOrField>
-//
-// The first item in this list that succeeds is the one whose documentation
-// is printed. If there is a symbol but no package, the package in the current
-// directory is chosen. However, if the argument begins with a capital
-// letter it is always assumed to be a symbol in the current directory.
-//
-// Two arguments:
-//
-// go doc <pkg> <sym>[.<methodOrField>]
-//
-// Show the documentation for the package, symbol, and method or field. The
-// first argument must be a full package path. This is similar to the
-// command-line usage for the godoc command.
-//
-// For commands, unless the -cmd flag is present "go doc command"
-// shows only the package-level docs for the package.
-//
-// The -src flag causes doc to print the full source code for the symbol, such
-// as the body of a struct, function or method.
-//
-// The -all flag causes doc to print all documentation for the package and
-// all its visible symbols. The argument must identify a package.
-//
-// For complete documentation, run "go help doc".
-package main
-
-import (
- "cmd/internal/doc"
- "cmd/internal/telemetry/counter"
- "os"
-)
-
-func main() {
- counter.Open()
- counter.Inc("doc/invocations")
- doc.Main(os.Args[1:])
-}
diff --git a/src/cmd/go/internal/base/base.go b/src/cmd/go/internal/base/base.go
index 83cbad401e..d5d5f8d36e 100644
--- a/src/cmd/go/internal/base/base.go
+++ b/src/cmd/go/internal/base/base.go
@@ -62,11 +62,11 @@ var Go = &Command{
// Lookup returns the subcommand with the given name, if any.
// Otherwise it returns nil.
//
-// Lookup ignores subcommands that have len(c.Commands) == 0 and c.Run == nil.
+// Lookup ignores any subcommand `sub` that has len(sub.Commands) == 0 and sub.Run == nil.
// Such subcommands are only for use as arguments to "help".
func (c *Command) Lookup(name string) *Command {
for _, sub := range c.Commands {
- if sub.Name() == name && (len(c.Commands) > 0 || c.Runnable()) {
+ if sub.Name() == name && (len(sub.Commands) > 0 || sub.Runnable()) {
return sub
}
}
diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go
index 63e2dfdbc7..c6f311e026 100644
--- a/src/cmd/go/internal/clean/clean.go
+++ b/src/cmd/go/internal/clean/clean.go
@@ -120,6 +120,7 @@ func init() {
}
func runClean(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
if len(args) > 0 {
cacheFlag := ""
switch {
diff --git a/src/cmd/internal/doc/dirs.go b/src/cmd/go/internal/doc/dirs.go
index 8b1670f61c..8b1670f61c 100644
--- a/src/cmd/internal/doc/dirs.go
+++ b/src/cmd/go/internal/doc/dirs.go
diff --git a/src/cmd/go/internal/doc/doc.go b/src/cmd/go/internal/doc/doc.go
index 74c70e2c7a..37501065fe 100644
--- a/src/cmd/go/internal/doc/doc.go
+++ b/src/cmd/go/internal/doc/doc.go
@@ -2,15 +2,26 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !cmd_go_bootstrap
-
// Package doc implements the “go doc” command.
package doc
import (
- "cmd/go/internal/base"
- "cmd/internal/doc"
+ "bytes"
"context"
+ "flag"
+ "fmt"
+ "go/build"
+ "go/token"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "cmd/go/internal/base"
+ "cmd/internal/telemetry/counter"
)
var CmdDoc = &base.Command{
@@ -136,5 +147,431 @@ Flags:
}
func runDoc(ctx context.Context, cmd *base.Command, args []string) {
- doc.Main(args)
+ log.SetFlags(0)
+ log.SetPrefix("doc: ")
+ dirsInit()
+ var flagSet flag.FlagSet
+ err := do(os.Stdout, &flagSet, args)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+var (
+ unexported bool // -u flag
+ matchCase bool // -c flag
+ chdir string // -C flag
+ showAll bool // -all flag
+ showCmd bool // -cmd flag
+ showSrc bool // -src flag
+ short bool // -short flag
+ serveHTTP bool // -http flag
+)
+
+// usage is a replacement usage function for the flags package.
+func usage(flagSet *flag.FlagSet) {
+ fmt.Fprintf(os.Stderr, "Usage of [go] doc:\n")
+ fmt.Fprintf(os.Stderr, "\tgo doc\n")
+ fmt.Fprintf(os.Stderr, "\tgo doc <pkg>\n")
+ fmt.Fprintf(os.Stderr, "\tgo doc <sym>[.<methodOrField>]\n")
+ fmt.Fprintf(os.Stderr, "\tgo doc [<pkg>.]<sym>[.<methodOrField>]\n")
+ fmt.Fprintf(os.Stderr, "\tgo doc [<pkg>.][<sym>.]<methodOrField>\n")
+ fmt.Fprintf(os.Stderr, "\tgo doc <pkg> <sym>[.<methodOrField>]\n")
+ fmt.Fprintf(os.Stderr, "For more information run\n")
+ fmt.Fprintf(os.Stderr, "\tgo help doc\n\n")
+ fmt.Fprintf(os.Stderr, "Flags:\n")
+ flagSet.PrintDefaults()
+ os.Exit(2)
+}
+
+// do is the workhorse, broken out of runDoc to make testing easier.
+func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) {
+ flagSet.Usage = func() { usage(flagSet) }
+ unexported = false
+ matchCase = false
+ flagSet.StringVar(&chdir, "C", "", "change to `dir` before running command")
+ flagSet.BoolVar(&unexported, "u", false, "show unexported symbols as well as exported")
+ flagSet.BoolVar(&matchCase, "c", false, "symbol matching honors case (paths not affected)")
+ flagSet.BoolVar(&showAll, "all", false, "show all documentation for package")
+ flagSet.BoolVar(&showCmd, "cmd", false, "show symbols with package docs even if package is a command")
+ flagSet.BoolVar(&showSrc, "src", false, "show source code for symbol")
+ flagSet.BoolVar(&short, "short", false, "one-line representation for each symbol")
+ flagSet.BoolVar(&serveHTTP, "http", false, "serve HTML docs over HTTP")
+ flagSet.Parse(args)
+ counter.CountFlags("doc/flag:", *flag.CommandLine)
+ if chdir != "" {
+ if err := os.Chdir(chdir); err != nil {
+ return err
+ }
+ }
+ if serveHTTP {
+ // Special case: if there are no arguments, try to go to an appropriate page
+ // depending on whether we're in a module or workspace. The pkgsite homepage
+ // is often not the most useful page.
+ if len(flagSet.Args()) == 0 {
+ mod, err := runCmd(append(os.Environ(), "GOWORK=off"), "go", "list", "-m")
+ if err == nil && mod != "" && mod != "command-line-arguments" {
+ // If there's a module, go to the module's doc page.
+ return doPkgsite(mod)
+ }
+ gowork, err := runCmd(nil, "go", "env", "GOWORK")
+ if err == nil && gowork != "" {
+ // Outside a module, but in a workspace, go to the home page
+ // with links to each of the modules' pages.
+ return doPkgsite("")
+ }
+ // Outside a module or workspace, go to the documentation for the standard library.
+ return doPkgsite("std")
+ }
+
+ // If args are provided, we need to figure out which page to open on the pkgsite
+ // instance. Run the logic below to determine a match for a symbol, method,
+ // or field, but don't actually print the documentation to the output.
+ writer = io.Discard
+ }
+ var paths []string
+ var symbol, method string
+ // Loop until something is printed.
+ dirs.Reset()
+ for i := 0; ; i++ {
+ buildPackage, userPath, sym, more := parseArgs(flagSet, flagSet.Args())
+ if i > 0 && !more { // Ignore the "more" bit on the first iteration.
+ return failMessage(paths, symbol, method)
+ }
+ if buildPackage == nil {
+ return fmt.Errorf("no such package: %s", userPath)
+ }
+
+ // The builtin package needs special treatment: its symbols are lower
+ // case but we want to see them, always.
+ if buildPackage.ImportPath == "builtin" {
+ unexported = true
+ }
+
+ symbol, method = parseSymbol(flagSet, sym)
+ pkg := parsePackage(writer, buildPackage, userPath)
+ paths = append(paths, pkg.prettyPath())
+
+ defer func() {
+ pkg.flush()
+ e := recover()
+ if e == nil {
+ return
+ }
+ pkgError, ok := e.(PackageError)
+ if ok {
+ err = pkgError
+ return
+ }
+ panic(e)
+ }()
+
+ var found bool
+ switch {
+ case symbol == "":
+ pkg.packageDoc() // The package exists, so we got some output.
+ found = true
+ case method == "":
+ if pkg.symbolDoc(symbol) {
+ found = true
+ }
+ case pkg.printMethodDoc(symbol, method):
+ found = true
+ case pkg.printFieldDoc(symbol, method):
+ found = true
+ }
+ if found {
+ if serveHTTP {
+ path, err := objectPath(userPath, pkg, symbol, method)
+ if err != nil {
+ return err
+ }
+ return doPkgsite(path)
+ }
+ return nil
+ }
+ }
+}
+
+func runCmd(env []string, cmdline ...string) (string, error) {
+ var stdout, stderr strings.Builder
+ cmd := exec.Command(cmdline[0], cmdline[1:]...)
+ cmd.Env = env
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ if err := cmd.Run(); err != nil {
+ return "", fmt.Errorf("go doc: %s: %v\n%s\n", strings.Join(cmdline, " "), err, stderr.String())
+ }
+ return strings.TrimSpace(stdout.String()), nil
+}
+
+func objectPath(userPath string, pkg *Package, symbol, method string) (string, error) {
+ var err error
+ path := pkg.build.ImportPath
+ if path == "." {
+ // go/build couldn't determine the import path, probably
+ // because this was a relative path into a module. Use
+ // go list to get the import path.
+ path, err = runCmd(nil, "go", "list", userPath)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ object := symbol
+ if symbol != "" && method != "" {
+ object = symbol + "." + method
+ }
+ if object != "" {
+ path = path + "#" + object
+ }
+ return path, nil
+}
+
+// failMessage creates a nicely formatted error message when there is no result to show.
+func failMessage(paths []string, symbol, method string) error {
+ var b bytes.Buffer
+ if len(paths) > 1 {
+ b.WriteString("s")
+ }
+ b.WriteString(" ")
+ for i, path := range paths {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(path)
+ }
+ if method == "" {
+ return fmt.Errorf("no symbol %s in package%s", symbol, &b)
+ }
+ return fmt.Errorf("no method or field %s.%s in package%s", symbol, method, &b)
+}
+
+// parseArgs analyzes the arguments (if any) and returns the package
+// it represents, the part of the argument the user used to identify
+// the path (or "" if it's the current package) and the symbol
+// (possibly with a .method) within that package.
+// parseSymbol is used to analyze the symbol itself.
+// The boolean final argument reports whether it is possible that
+// there may be more directories worth looking at. It will only
+// be true if the package path is a partial match for some directory
+// and there may be more matches. For example, if the argument
+// is rand.Float64, we must scan both crypto/rand and math/rand
+// to find the symbol, and the first call will return crypto/rand, true.
+func parseArgs(flagSet *flag.FlagSet, args []string) (pkg *build.Package, path, symbol string, more bool) {
+ wd, err := os.Getwd()
+ if err != nil {
+ log.Fatal(err)
+ }
+ if len(args) == 0 {
+ // Easy: current directory.
+ return importDir(wd), "", "", false
+ }
+ arg := args[0]
+ // We have an argument. If it is a directory name beginning with . or ..,
+ // use the absolute path name. This discriminates "./errors" from "errors"
+ // if the current directory contains a non-standard errors package.
+ if isDotSlash(arg) {
+ arg = filepath.Join(wd, arg)
+ }
+ switch len(args) {
+ default:
+ usage(flagSet)
+ case 1:
+ // Done below.
+ case 2:
+ // Package must be findable and importable.
+ pkg, err := build.Import(args[0], wd, build.ImportComment)
+ if err == nil {
+ return pkg, args[0], args[1], false
+ }
+ for {
+ packagePath, ok := findNextPackage(arg)
+ if !ok {
+ break
+ }
+ if pkg, err := build.ImportDir(packagePath, build.ImportComment); err == nil {
+ return pkg, arg, args[1], true
+ }
+ }
+ return nil, args[0], args[1], false
+ }
+ // Usual case: one argument.
+ // If it contains slashes, it begins with either a package path
+ // or an absolute directory.
+ // First, is it a complete package path as it is? If so, we are done.
+ // This avoids confusion over package paths that have other
+ // package paths as their prefix.
+ var importErr error
+ if filepath.IsAbs(arg) {
+ pkg, importErr = build.ImportDir(arg, build.ImportComment)
+ if importErr == nil {
+ return pkg, arg, "", false
+ }
+ } else {
+ pkg, importErr = build.Import(arg, wd, build.ImportComment)
+ if importErr == nil {
+ return pkg, arg, "", false
+ }
+ }
+ // Another disambiguator: If the argument starts with an upper
+ // case letter, it can only be a symbol in the current directory.
+ // Kills the problem caused by case-insensitive file systems
+ // matching an upper case name as a package name.
+ if !strings.ContainsAny(arg, `/\`) && token.IsExported(arg) {
+ pkg, err := build.ImportDir(".", build.ImportComment)
+ if err == nil {
+ return pkg, "", arg, false
+ }
+ }
+ // If it has a slash, it must be a package path but there is a symbol.
+ // It's the last package path we care about.
+ slash := strings.LastIndex(arg, "/")
+ // There may be periods in the package path before or after the slash
+ // and between a symbol and method.
+ // Split the string at various periods to see what we find.
+ // In general there may be ambiguities but this should almost always
+ // work.
+ var period int
+ // slash+1: if there's no slash, the value is -1 and start is 0; otherwise
+ // start is the byte after the slash.
+ for start := slash + 1; start < len(arg); start = period + 1 {
+ period = strings.Index(arg[start:], ".")
+ symbol := ""
+ if period < 0 {
+ period = len(arg)
+ } else {
+ period += start
+ symbol = arg[period+1:]
+ }
+ // Have we identified a package already?
+ pkg, err := build.Import(arg[0:period], wd, build.ImportComment)
+ if err == nil {
+ return pkg, arg[0:period], symbol, false
+ }
+ // See if we have the basename or tail of a package, as in json for encoding/json
+ // or ivy/value for robpike.io/ivy/value.
+ pkgName := arg[:period]
+ for {
+ path, ok := findNextPackage(pkgName)
+ if !ok {
+ break
+ }
+ if pkg, err = build.ImportDir(path, build.ImportComment); err == nil {
+ return pkg, arg[0:period], symbol, true
+ }
+ }
+ dirs.Reset() // Next iteration of for loop must scan all the directories again.
+ }
+ // If it has a slash, we've failed.
+ if slash >= 0 {
+ // build.Import should always include the path in its error message,
+ // and we should avoid repeating it. Unfortunately, build.Import doesn't
+ // return a structured error. That can't easily be fixed, since it
+ // invokes 'go list' and returns the error text from the loaded package.
+ // TODO(golang.org/issue/34750): load using golang.org/x/tools/go/packages
+ // instead of go/build.
+ importErrStr := importErr.Error()
+ if strings.Contains(importErrStr, arg[:period]) {
+ log.Fatal(importErrStr)
+ } else {
+ log.Fatalf("no such package %s: %s", arg[:period], importErrStr)
+ }
+ }
+ // Guess it's a symbol in the current directory.
+ return importDir(wd), "", arg, false
+}
+
+// dotPaths lists all the dotted paths legal on Unix-like and
+// Windows-like file systems. We check them all, as the chance
+// of error is minute and even on Windows people will use ./
+// sometimes.
+var dotPaths = []string{
+ `./`,
+ `../`,
+ `.\`,
+ `..\`,
+}
+
+// isDotSlash reports whether the path begins with a reference
+// to the local . or .. directory.
+func isDotSlash(arg string) bool {
+ if arg == "." || arg == ".." {
+ return true
+ }
+ for _, dotPath := range dotPaths {
+ if strings.HasPrefix(arg, dotPath) {
+ return true
+ }
+ }
+ return false
+}
+
+// importDir is just an error-catching wrapper for build.ImportDir.
+func importDir(dir string) *build.Package {
+ pkg, err := build.ImportDir(dir, build.ImportComment)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return pkg
+}
+
+// parseSymbol breaks str apart into a symbol and method.
+// Both may be missing or the method may be missing.
+// If present, each must be a valid Go identifier.
+func parseSymbol(flagSet *flag.FlagSet, str string) (symbol, method string) {
+ if str == "" {
+ return
+ }
+ elem := strings.Split(str, ".")
+ switch len(elem) {
+ case 1:
+ case 2:
+ method = elem[1]
+ default:
+ log.Printf("too many periods in symbol specification")
+ usage(flagSet)
+ }
+ symbol = elem[0]
+ return
+}
+
+// isExported reports whether the name is an exported identifier.
+// If the unexported flag (-u) is true, isExported returns true because
+// it means that we treat the name as if it is exported.
+func isExported(name string) bool {
+ return unexported || token.IsExported(name)
+}
+
+// findNextPackage returns the next full file name path that matches the
+// (perhaps partial) package path pkg. The boolean reports if any match was found.
+func findNextPackage(pkg string) (string, bool) {
+ if filepath.IsAbs(pkg) {
+ if dirs.offset == 0 {
+ dirs.offset = -1
+ return pkg, true
+ }
+ return "", false
+ }
+ if pkg == "" || token.IsExported(pkg) { // Upper case symbol cannot be a package name.
+ return "", false
+ }
+ pkg = path.Clean(pkg)
+ pkgSuffix := "/" + pkg
+ for {
+ d, ok := dirs.Next()
+ if !ok {
+ return "", false
+ }
+ if d.importPath == pkg || strings.HasSuffix(d.importPath, pkgSuffix) {
+ return d.dir, true
+ }
+ }
+}
+
+var buildCtx = build.Default
+
+// splitGopath splits $GOPATH into a list of roots.
+func splitGopath() []string {
+ return filepath.SplitList(buildCtx.GOPATH)
}
diff --git a/src/cmd/internal/doc/doc_test.go b/src/cmd/go/internal/doc/doc_test.go
index bccace40c0..f91dcd658f 100644
--- a/src/cmd/internal/doc/doc_test.go
+++ b/src/cmd/go/internal/doc/doc_test.go
@@ -29,7 +29,7 @@ func TestMain(m *testing.M) {
buildCtx.GOROOT = testenv.GOROOT(nil)
build.Default.GOROOT = testenv.GOROOT(nil)
- // Add $GOROOT/src/cmd/doc/testdata explicitly so we can access its contents in the test.
+ // Add $GOROOT/src/cmd/go/internal/doc/testdata explicitly so we can access its contents in the test.
// Normally testdata directories are ignored, but sending it to dirs.scan directly is
// a hack that works around the check.
testdataDir, err := filepath.Abs("testdata")
@@ -90,7 +90,7 @@ type test struct {
no []string // Regular expressions that should not match.
}
-const p = "cmd/internal/doc/testdata"
+const p = "cmd/go/internal/doc/testdata"
var tests = []test{
// Sanity check.
@@ -105,7 +105,7 @@ var tests = []test{
{
"package clause",
[]string{p},
- []string{`package pkg.*cmd/internal/doc/testdata`},
+ []string{`package pkg.*cmd/go/internal/doc/testdata`},
nil,
},
diff --git a/src/cmd/internal/doc/pkg.go b/src/cmd/go/internal/doc/pkg.go
index 953b0d9a28..953b0d9a28 100644
--- a/src/cmd/internal/doc/pkg.go
+++ b/src/cmd/go/internal/doc/pkg.go
diff --git a/src/cmd/go/internal/doc/pkgsite.go b/src/cmd/go/internal/doc/pkgsite.go
new file mode 100644
index 0000000000..6769536ca5
--- /dev/null
+++ b/src/cmd/go/internal/doc/pkgsite.go
@@ -0,0 +1,93 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cmd_go_bootstrap
+
+package doc
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "os/exec"
+ "os/signal"
+ "path/filepath"
+ "strings"
+)
+
+// pickUnusedPort finds an unused port by trying to listen on port 0
+// and letting the OS pick a port, then closing that connection and
+// returning that port number.
+// This is inherently racy.
+func pickUnusedPort() (int, error) {
+ l, err := net.Listen("tcp", "localhost:0")
+ if err != nil {
+ return 0, err
+ }
+ port := l.Addr().(*net.TCPAddr).Port
+ if err := l.Close(); err != nil {
+ return 0, err
+ }
+ return port, nil
+}
+
+func doPkgsite(urlPath string) error {
+ port, err := pickUnusedPort()
+ if err != nil {
+ return fmt.Errorf("failed to find port for documentation server: %v", err)
+ }
+ addr := fmt.Sprintf("localhost:%d", port)
+ path, err := url.JoinPath("http://"+addr, urlPath)
+ if err != nil {
+ return fmt.Errorf("internal error: failed to construct url: %v", err)
+ }
+
+ // Turn off the default signal handler for SIGINT (and SIGQUIT on Unix)
+ // and instead wait for the child process to handle the signal and
+ // exit before exiting ourselves.
+ signal.Ignore(signalsToIgnore...)
+
+ // Prepend the local download cache to GOPROXY to get around deprecation checks.
+ env := os.Environ()
+ vars, err := runCmd(env, goCmd(), "env", "GOPROXY", "GOMODCACHE")
+ fields := strings.Fields(vars)
+ if err == nil && len(fields) == 2 {
+ goproxy, gomodcache := fields[0], fields[1]
+ gomodcache = filepath.Join(gomodcache, "cache", "download")
+ // Convert absolute path to file URL. pkgsite will not accept
+ // Windows absolute paths because they look like a host:path remote.
+ // TODO(golang.org/issue/32456): use url.FromFilePath when implemented.
+ if strings.HasPrefix(gomodcache, "/") {
+ gomodcache = "file://" + gomodcache
+ } else {
+ gomodcache = "file:///" + filepath.ToSlash(gomodcache)
+ }
+ env = append(env, "GOPROXY="+gomodcache+","+goproxy)
+ }
+
+ const version = "v0.0.0-20250714212547-01b046e81fe7"
+ cmd := exec.Command(goCmd(), "run", "golang.org/x/pkgsite/cmd/internal/doc@"+version,
+ "-gorepo", buildCtx.GOROOT,
+ "-http", addr,
+ "-open", path)
+ cmd.Env = env
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+
+ if err := cmd.Run(); err != nil {
+ var ee *exec.ExitError
+ if errors.As(err, &ee) {
+ // Exit with the same exit status as pkgsite to avoid
+ // printing of "exit status" error messages.
+ // Any relevant messages have already been printed
+ // to stdout or stderr.
+ os.Exit(ee.ExitCode())
+ }
+ return err
+ }
+
+ return nil
+}
diff --git a/src/cmd/go/internal/doc/doc_bootstrap.go b/src/cmd/go/internal/doc/pkgsite_bootstrap.go
index 8be95dc9a6..c909d6184a 100644
--- a/src/cmd/go/internal/doc/doc_bootstrap.go
+++ b/src/cmd/go/internal/doc/pkgsite_bootstrap.go
@@ -4,10 +4,8 @@
//go:build cmd_go_bootstrap
-// Don't build cmd/doc into go_bootstrap because it depends on net.
+// Don't build the pkgsite code into go_bootstrap because it depends on net.
package doc
-import "cmd/go/internal/base"
-
-var CmdDoc = &base.Command{}
+func doPkgsite(string) error { return nil }
diff --git a/src/cmd/internal/doc/signal_notunix.go b/src/cmd/go/internal/doc/signal_notunix.go
index b91a67eb5f..b91a67eb5f 100644
--- a/src/cmd/internal/doc/signal_notunix.go
+++ b/src/cmd/go/internal/doc/signal_notunix.go
diff --git a/src/cmd/internal/doc/signal_unix.go b/src/cmd/go/internal/doc/signal_unix.go
index f30612ce9d..f30612ce9d 100644
--- a/src/cmd/internal/doc/signal_unix.go
+++ b/src/cmd/go/internal/doc/signal_unix.go
diff --git a/src/cmd/internal/doc/testdata/merge/aa.go b/src/cmd/go/internal/doc/testdata/merge/aa.go
index f8ab92dfd0..f8ab92dfd0 100644
--- a/src/cmd/internal/doc/testdata/merge/aa.go
+++ b/src/cmd/go/internal/doc/testdata/merge/aa.go
diff --git a/src/cmd/internal/doc/testdata/merge/bb.go b/src/cmd/go/internal/doc/testdata/merge/bb.go
index fd8cf3c446..fd8cf3c446 100644
--- a/src/cmd/internal/doc/testdata/merge/bb.go
+++ b/src/cmd/go/internal/doc/testdata/merge/bb.go
diff --git a/src/cmd/internal/doc/testdata/nested/empty/empty.go b/src/cmd/go/internal/doc/testdata/nested/empty/empty.go
index 609cf0e0a0..609cf0e0a0 100644
--- a/src/cmd/internal/doc/testdata/nested/empty/empty.go
+++ b/src/cmd/go/internal/doc/testdata/nested/empty/empty.go
diff --git a/src/cmd/internal/doc/testdata/nested/ignore.go b/src/cmd/go/internal/doc/testdata/nested/ignore.go
index 5fa811d0a8..5fa811d0a8 100644
--- a/src/cmd/internal/doc/testdata/nested/ignore.go
+++ b/src/cmd/go/internal/doc/testdata/nested/ignore.go
diff --git a/src/cmd/internal/doc/testdata/nested/nested/real.go b/src/cmd/go/internal/doc/testdata/nested/nested/real.go
index 1e5546081c..1e5546081c 100644
--- a/src/cmd/internal/doc/testdata/nested/nested/real.go
+++ b/src/cmd/go/internal/doc/testdata/nested/nested/real.go
diff --git a/src/cmd/internal/doc/testdata/pkg.go b/src/cmd/go/internal/doc/testdata/pkg.go
index 4d269ff0a2..4d269ff0a2 100644
--- a/src/cmd/internal/doc/testdata/pkg.go
+++ b/src/cmd/go/internal/doc/testdata/pkg.go
diff --git a/src/cmd/go/internal/gover/mod.go b/src/cmd/go/internal/gover/mod.go
index d3cc17068d..3ac5ae8824 100644
--- a/src/cmd/go/internal/gover/mod.go
+++ b/src/cmd/go/internal/gover/mod.go
@@ -109,6 +109,9 @@ func ModIsPrefix(path, vers string) bool {
// The caller is assumed to have checked that ModIsValid(path, vers) is true.
func ModIsPrerelease(path, vers string) bool {
if IsToolchain(path) {
+ if path == "toolchain" {
+ return IsPrerelease(FromToolchain(vers))
+ }
return IsPrerelease(vers)
}
return semver.Prerelease(vers) != ""
diff --git a/src/cmd/go/internal/modcmd/edit.go b/src/cmd/go/internal/modcmd/edit.go
index f73269378a..aafd9752a8 100644
--- a/src/cmd/go/internal/modcmd/edit.go
+++ b/src/cmd/go/internal/modcmd/edit.go
@@ -234,7 +234,11 @@ func runEdit(ctx context.Context, cmd *base.Command, args []string) {
}
if *editModule != "" {
- if err := module.CheckImportPath(*editModule); err != nil {
+ err := module.CheckImportPath(*editModule)
+ if err == nil {
+ err = modload.CheckReservedModulePath(*editModule)
+ }
+ if err != nil {
base.Fatalf("go: invalid -module: %v", err)
}
}
diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go
index 3cf447e648..cb9d74df68 100644
--- a/src/cmd/go/internal/modload/init.go
+++ b/src/cmd/go/internal/modload/init.go
@@ -149,7 +149,7 @@ type MainModuleSet struct {
// highest replaced version of each module path; empty string for wildcard-only replacements
highestReplaced map[string]string
- indexMu sync.Mutex
+ indexMu sync.RWMutex
indices map[module.Version]*modFileIndex
}
@@ -228,8 +228,8 @@ func (mms *MainModuleSet) GetSingleIndexOrNil() *modFileIndex {
}
func (mms *MainModuleSet) Index(m module.Version) *modFileIndex {
- mms.indexMu.Lock()
- defer mms.indexMu.Unlock()
+ mms.indexMu.RLock()
+ defer mms.indexMu.RUnlock()
return mms.indices[m]
}
@@ -1122,6 +1122,16 @@ func errWorkTooOld(gomod string, wf *modfile.WorkFile, goVers string) error {
base.ShortPath(filepath.Dir(gomod)), goVers, verb, gover.FromGoWork(wf))
}
+// CheckReservedModulePath checks whether the module path is a reserved module path
+// that can't be used for a user's module.
+func CheckReservedModulePath(path string) error {
+ if gover.IsToolchain(path) {
+ return errors.New("module path is reserved")
+ }
+
+ return nil
+}
+
// CreateModFile initializes a new module by creating a go.mod file.
//
// If modPath is empty, CreateModFile will attempt to infer the path from the
@@ -1156,6 +1166,8 @@ func CreateModFile(ctx context.Context, modPath string) {
}
}
base.Fatal(err)
+ } else if err := CheckReservedModulePath(modPath); err != nil {
+ base.Fatalf(`go: invalid module path %q: `, modPath)
} else if _, _, ok := module.SplitPathVersion(modPath); !ok {
if strings.HasPrefix(modPath, "gopkg.in/") {
invalidMajorVersionMsg := fmt.Errorf("module paths beginning with gopkg.in/ must always have a major version suffix in the form of .vN:\n\tgo mod init %s", suggestGopkgIn(modPath))
diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go
index cb385c3505..04e204cc98 100644
--- a/src/cmd/go/internal/modload/modfile.go
+++ b/src/cmd/go/internal/modload/modfile.go
@@ -68,6 +68,8 @@ func ReadModFile(gomod string, fix modfile.VersionFixer) (data []byte, f *modfil
if f.Module == nil {
// No module declaration. Must add module path.
return nil, nil, fmt.Errorf("error reading %s: missing module declaration. To specify the module path:\n\tgo mod edit -module=example.com/mod", base.ShortPath(gomod))
+ } else if err := CheckReservedModulePath(f.Module.Mod.Path); err != nil {
+ return nil, nil, fmt.Errorf("error reading %s: invalid module path: %q", base.ShortPath(gomod), f.Module.Mod.Path)
}
return data, f, err
diff --git a/src/cmd/go/internal/telemetrystats/telemetrystats.go b/src/cmd/go/internal/telemetrystats/telemetrystats.go
index 950453fa95..d5b642240f 100644
--- a/src/cmd/go/internal/telemetrystats/telemetrystats.go
+++ b/src/cmd/go/internal/telemetrystats/telemetrystats.go
@@ -11,6 +11,7 @@ import (
"cmd/go/internal/cfg"
"cmd/go/internal/modload"
"cmd/internal/telemetry/counter"
+ "strings"
)
func Increment() {
@@ -48,4 +49,16 @@ func incrementConfig() {
case "wasm":
counter.Inc("go/platform/target/gowasm:" + cfg.GOWASM)
}
+
+ // Use cfg.Experiment.String instead of cfg.Experiment.Enabled
+ // because we only want to count the experiments that differ
+ // from the baseline.
+ if cfg.Experiment != nil {
+ for exp := range strings.SplitSeq(cfg.Experiment.String(), ",") {
+ if exp == "" {
+ continue
+ }
+ counter.Inc("go/goexperiment:" + exp)
+ }
+ }
}
diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go
index 16e1a4f47f..120ef5339b 100644
--- a/src/cmd/go/internal/tool/tool.go
+++ b/src/cmd/go/internal/tool/tool.go
@@ -277,6 +277,29 @@ func loadModTool(ctx context.Context, name string) string {
return ""
}
+func builtTool(runAction *work.Action) string {
+ linkAction := runAction.Deps[0]
+ if toolN {
+ // #72824: If -n is set, use the cached path if we can.
+ // This is only necessary if the binary wasn't cached
+ // before this invocation of the go command: if the binary
+ // was cached, BuiltTarget() will be the cached executable.
+ // It's only in the "first run", where we actually do the build
+ // and save the result to the cache that BuiltTarget is not
+ // the cached binary. Ideally, we would set BuiltTarget
+ // to the cached path even in the first run, but if we
+ // copy the binary to the cached path, and try to run it
+ // in the same process, we'll run into the dreaded #22315
+ // resulting in occasional ETXTBSYs. Instead of getting the
+ // ETXTBSY and then retrying just don't use the cached path
+ // on the first run if we're going to actually run the binary.
+ if cached := linkAction.CachedExecutable(); cached != "" {
+ return cached
+ }
+ }
+ return linkAction.BuiltTarget()
+}
+
func buildAndRunBuiltinTool(ctx context.Context, toolName, tool string, args []string) {
// Override GOOS and GOARCH for the build to build the tool using
// the same GOOS and GOARCH as this go command.
@@ -288,7 +311,7 @@ func buildAndRunBuiltinTool(ctx context.Context, toolName, tool string, args []s
modload.RootMode = modload.NoRoot
runFunc := func(b *work.Builder, ctx context.Context, a *work.Action) error {
- cmdline := str.StringList(a.Deps[0].BuiltTarget(), a.Args)
+ cmdline := str.StringList(builtTool(a), a.Args)
return runBuiltTool(toolName, nil, cmdline)
}
@@ -300,7 +323,7 @@ func buildAndRunModtool(ctx context.Context, toolName, tool string, args []strin
// Use the ExecCmd to run the binary, as go run does. ExecCmd allows users
// to provide a runner to run the binary, for example a simulator for binaries
// that are cross-compiled to a different platform.
- cmdline := str.StringList(work.FindExecCmd(), a.Deps[0].BuiltTarget(), a.Args)
+ cmdline := str.StringList(work.FindExecCmd(), builtTool(a), a.Args)
// Use same environment go run uses to start the executable:
// the original environment with cfg.GOROOTbin added to the path.
env := slices.Clip(cfg.OrigEnv)
diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go
index 2426720021..ecc3337131 100644
--- a/src/cmd/go/internal/work/action.go
+++ b/src/cmd/go/internal/work/action.go
@@ -97,11 +97,12 @@ type Action struct {
CacheExecutable bool // Whether to cache executables produced by link steps
// Generated files, directories.
- Objdir string // directory for intermediate objects
- Target string // goal of the action: the created package or executable
- built string // the actual created package or executable
- actionID cache.ActionID // cache ID of action input
- buildID string // build ID of action output
+ Objdir string // directory for intermediate objects
+ Target string // goal of the action: the created package or executable
+ built string // the actual created package or executable
+ cachedExecutable string // the cached executable, if CacheExecutable was set
+ actionID cache.ActionID // cache ID of action input
+ buildID string // build ID of action output
VetxOnly bool // Mode=="vet": only being called to supply info about dependencies
needVet bool // Mode=="build": need to fill in vet config
@@ -133,6 +134,10 @@ func (a *Action) BuildID() string { return a.buildID }
// from Target when the result was cached.
func (a *Action) BuiltTarget() string { return a.built }
+// CachedExecutable returns the cached executable, if CacheExecutable
+// was set and the executable could be cached, and "" otherwise.
+func (a *Action) CachedExecutable() string { return a.cachedExecutable }
+
// An actionQueue is a priority queue of actions.
type actionQueue []*Action
diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go
index 0bf9ba1781..c272131c77 100644
--- a/src/cmd/go/internal/work/buildid.go
+++ b/src/cmd/go/internal/work/buildid.go
@@ -745,8 +745,9 @@ func (b *Builder) updateBuildID(a *Action, target string) error {
}
outputID, _, err := c.PutExecutable(a.actionID, name+cfg.ExeSuffix, r)
r.Close()
+ a.cachedExecutable = c.OutputFile(outputID)
if err == nil && cfg.BuildX {
- sh.ShowCmd("", "%s # internal", joinUnambiguously(str.StringList("cp", target, c.OutputFile(outputID))))
+ sh.ShowCmd("", "%s # internal", joinUnambiguously(str.StringList("cp", target, a.cachedExecutable)))
}
}
}
diff --git a/src/cmd/go/internal/work/shell.go b/src/cmd/go/internal/work/shell.go
index 284ed26f22..e75b1c33fc 100644
--- a/src/cmd/go/internal/work/shell.go
+++ b/src/cmd/go/internal/work/shell.go
@@ -132,47 +132,11 @@ func (sh *Shell) moveOrCopyFile(dst, src string, perm fs.FileMode, force bool) e
return sh.CopyFile(dst, src, perm, force)
}
- // On Windows, always copy the file, so that we respect the NTFS
- // permissions of the parent folder. https://golang.org/issue/22343.
- // What matters here is not cfg.Goos (the system we are building
- // for) but runtime.GOOS (the system we are building on).
- if runtime.GOOS == "windows" {
- return sh.CopyFile(dst, src, perm, force)
- }
-
- // If the destination directory has the group sticky bit set,
- // we have to copy the file to retain the correct permissions.
- // https://golang.org/issue/18878
- if fi, err := os.Stat(filepath.Dir(dst)); err == nil {
- if fi.IsDir() && (fi.Mode()&fs.ModeSetgid) != 0 {
- return sh.CopyFile(dst, src, perm, force)
- }
- }
-
- // The perm argument is meant to be adjusted according to umask,
- // but we don't know what the umask is.
- // Create a dummy file to find out.
- // This avoids build tags and works even on systems like Plan 9
- // where the file mask computation incorporates other information.
- mode := perm
- f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
- if err == nil {
- fi, err := f.Stat()
- if err == nil {
- mode = fi.Mode() & 0777
- }
- name := f.Name()
- f.Close()
- os.Remove(name)
- }
-
- if err := os.Chmod(src, mode); err == nil {
- if err := os.Rename(src, dst); err == nil {
- if cfg.BuildX {
- sh.ShowCmd("", "mv %s %s", src, dst)
- }
- return nil
+ if err := sh.move(src, dst, perm); err == nil {
+ if cfg.BuildX {
+ sh.ShowCmd("", "mv %s %s", src, dst)
}
+ return nil
}
return sh.CopyFile(dst, src, perm, force)
diff --git a/src/cmd/go/internal/work/shell_nonwindows.go b/src/cmd/go/internal/work/shell_nonwindows.go
new file mode 100644
index 0000000000..c517fbe196
--- /dev/null
+++ b/src/cmd/go/internal/work/shell_nonwindows.go
@@ -0,0 +1,49 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package work
+
+import (
+ "errors"
+ "io/fs"
+ "os"
+ "path/filepath"
+)
+
+// move moves a file from src to dst setting the permissions
+// on the destination file to inherit the permissions from the
+// destination parent directory.
+func (sh *Shell) move(src, dst string, perm fs.FileMode) error {
+ // If the destination directory has the group sticky bit set,
+ // we have to copy the file to retain the correct permissions.
+ // https://golang.org/issue/18878
+ if fi, err := os.Stat(filepath.Dir(dst)); err == nil {
+ if fi.IsDir() && (fi.Mode()&fs.ModeSetgid) != 0 {
+ return errors.ErrUnsupported
+ }
+ }
+ // The perm argument is meant to be adjusted according to umask,
+ // but we don't know what the umask is.
+ // Create a dummy file to find out.
+ // This works even on systems like Plan 9 where the
+ // file mask computation incorporates other information.
+ mode := perm
+ f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
+ if err == nil {
+ fi, err := f.Stat()
+ if err == nil {
+ mode = fi.Mode() & 0777
+ }
+ name := f.Name()
+ f.Close()
+ os.Remove(name)
+ }
+
+ if err := os.Chmod(src, mode); err != nil {
+ return err
+ }
+ return os.Rename(src, dst)
+}
diff --git a/src/cmd/go/internal/work/shell_windows.go b/src/cmd/go/internal/work/shell_windows.go
new file mode 100644
index 0000000000..9b80eab07f
--- /dev/null
+++ b/src/cmd/go/internal/work/shell_windows.go
@@ -0,0 +1,37 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+ "internal/syscall/windows"
+ "io/fs"
+ "os"
+ "unsafe"
+)
+
+// move moves a file from src to dst, setting the security information
+// on the destination file to inherit the permissions from the
+// destination parent directory.
+func (sh *Shell) move(src, dst string, perm fs.FileMode) (err error) {
+ if err := os.Rename(src, dst); err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ os.Remove(dst) // clean up if we failed to set the mode or security info
+ }
+ }()
+ if err := os.Chmod(dst, perm); err != nil {
+ return err
+ }
+ // We need to respect the ACL permissions of the destination parent folder.
+ // https://go.dev/issue/22343.
+ var acl windows.ACL
+ if err := windows.InitializeAcl(&acl, uint32(unsafe.Sizeof(acl)), windows.ACL_REVISION); err != nil {
+ return err
+ }
+ secInfo := windows.DACL_SECURITY_INFORMATION | windows.UNPROTECTED_DACL_SECURITY_INFORMATION
+ return windows.SetNamedSecurityInfo(dst, windows.SE_FILE_OBJECT, secInfo, nil, nil, &acl, nil)
+}
diff --git a/src/cmd/go/testdata/script/mod_get_toolchain.txt b/src/cmd/go/testdata/script/mod_get_toolchain.txt
index 87e84ae15e..83cef4a0fd 100644
--- a/src/cmd/go/testdata/script/mod_get_toolchain.txt
+++ b/src/cmd/go/testdata/script/mod_get_toolchain.txt
@@ -94,12 +94,14 @@ stderr '^go: added toolchain go1.24rc1$'
grep 'go 1.22.9' go.mod # no longer implied
grep 'toolchain go1.24rc1' go.mod
-# go get toolchain@latest finds go1.999testmod.
+# go get toolchain@latest finds go1.23.9.
cp go.mod.orig go.mod
go get toolchain@latest
-stderr '^go: added toolchain go1.999testmod$'
+stderr '^go: added toolchain go1.23.9$'
grep 'go 1.21' go.mod
-grep 'toolchain go1.999testmod' go.mod
+grep 'toolchain go1.23.9' go.mod
+
+
# Bug fixes.
@@ -115,7 +117,7 @@ stderr '^go: upgraded go 1.19 => 1.21.0'
# go get toolchain@1.24rc1 is OK too.
go get toolchain@1.24rc1
-stderr '^go: downgraded toolchain go1.999testmod => go1.24rc1$'
+stderr '^go: upgraded toolchain go1.23.9 => go1.24rc1$'
# go get go@1.21 should work if we are the Go 1.21 language version,
# even though there's no toolchain for it.
diff --git a/src/cmd/go/testdata/script/mod_init_issue74784.txt b/src/cmd/go/testdata/script/mod_init_issue74784.txt
new file mode 100644
index 0000000000..f7863636e5
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_init_issue74784.txt
@@ -0,0 +1,26 @@
+# Don't allow the creation of modules with special "go" or "toolchain" paths.
+! go mod init go
+! stderr 'panic'
+stderr 'invalid module path'
+
+! go mod init toolchain
+! stderr 'panic'
+stderr 'invalid module path'
+
+# A module that contains the path element "go" is okay.
+go mod init example.com/go
+stderr 'creating new go.mod'
+
+# go mod edit won't allow a reserved module path either
+! go mod edit -module=go
+stderr 'invalid -module'
+
+# The go command should check for work modules for bad
+# names to return a proper error and avoid a panic.
+cp badmod.txt go.mod
+! go list
+! stderr panic
+stderr 'invalid module path'
+
+-- badmod.txt --
+module go
diff --git a/src/cmd/go/testdata/script/tool_n_issue72824.txt b/src/cmd/go/testdata/script/tool_n_issue72824.txt
new file mode 100644
index 0000000000..0c90fce290
--- /dev/null
+++ b/src/cmd/go/testdata/script/tool_n_issue72824.txt
@@ -0,0 +1,27 @@
+[short] skip 'does a build in using an empty cache'
+
+# Start with a fresh cache because we want to verify the behavior
+# when the tool hasn't been cached previously.
+env GOCACHE=$WORK${/}cache
+
+# Even when the tool hasn't been previously cached but was built and
+# saved to the cache in the invocation of 'go tool -n' we should return
+# its cached location.
+go tool -n foo
+stdout $GOCACHE
+
+# And of course we should also return the cached location on subsequent
+# runs.
+go tool -n foo
+stdout $GOCACHE
+
+-- go.mod --
+module example.com/foo
+
+go 1.25
+
+tool example.com/foo
+-- main.go --
+package main
+
+func main() {} \ No newline at end of file
diff --git a/src/cmd/internal/doc/main.go b/src/cmd/internal/doc/main.go
deleted file mode 100644
index 5032000cda..0000000000
--- a/src/cmd/internal/doc/main.go
+++ /dev/null
@@ -1,532 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package doc provides the implementation of the "go doc" subcommand and cmd/doc.
-package doc
-
-import (
- "bytes"
- "errors"
- "flag"
- "fmt"
- "go/build"
- "go/token"
- "io"
- "log"
- "net"
- "net/url"
- "os"
- "os/exec"
- "os/signal"
- "path"
- "path/filepath"
- "strings"
-
- "cmd/internal/telemetry/counter"
-)
-
-var (
- unexported bool // -u flag
- matchCase bool // -c flag
- chdir string // -C flag
- showAll bool // -all flag
- showCmd bool // -cmd flag
- showSrc bool // -src flag
- short bool // -short flag
- serveHTTP bool // -http flag
-)
-
-// usage is a replacement usage function for the flags package.
-func usage(flagSet *flag.FlagSet) {
- fmt.Fprintf(os.Stderr, "Usage of [go] doc:\n")
- fmt.Fprintf(os.Stderr, "\tgo doc\n")
- fmt.Fprintf(os.Stderr, "\tgo doc <pkg>\n")
- fmt.Fprintf(os.Stderr, "\tgo doc <sym>[.<methodOrField>]\n")
- fmt.Fprintf(os.Stderr, "\tgo doc [<pkg>.]<sym>[.<methodOrField>]\n")
- fmt.Fprintf(os.Stderr, "\tgo doc [<pkg>.][<sym>.]<methodOrField>\n")
- fmt.Fprintf(os.Stderr, "\tgo doc <pkg> <sym>[.<methodOrField>]\n")
- fmt.Fprintf(os.Stderr, "For more information run\n")
- fmt.Fprintf(os.Stderr, "\tgo help doc\n\n")
- fmt.Fprintf(os.Stderr, "Flags:\n")
- flagSet.PrintDefaults()
- os.Exit(2)
-}
-
-// Main is the entry point, invoked both by go doc and cmd/doc.
-func Main(args []string) {
- log.SetFlags(0)
- log.SetPrefix("doc: ")
- dirsInit()
- var flagSet flag.FlagSet
- err := do(os.Stdout, &flagSet, args)
- if err != nil {
- log.Fatal(err)
- }
-}
-
-// do is the workhorse, broken out of main to make testing easier.
-func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) {
- flagSet.Usage = func() { usage(flagSet) }
- unexported = false
- matchCase = false
- flagSet.StringVar(&chdir, "C", "", "change to `dir` before running command")
- flagSet.BoolVar(&unexported, "u", false, "show unexported symbols as well as exported")
- flagSet.BoolVar(&matchCase, "c", false, "symbol matching honors case (paths not affected)")
- flagSet.BoolVar(&showAll, "all", false, "show all documentation for package")
- flagSet.BoolVar(&showCmd, "cmd", false, "show symbols with package docs even if package is a command")
- flagSet.BoolVar(&showSrc, "src", false, "show source code for symbol")
- flagSet.BoolVar(&short, "short", false, "one-line representation for each symbol")
- flagSet.BoolVar(&serveHTTP, "http", false, "serve HTML docs over HTTP")
- flagSet.Parse(args)
- counter.CountFlags("doc/flag:", *flag.CommandLine)
- if chdir != "" {
- if err := os.Chdir(chdir); err != nil {
- return err
- }
- }
- if serveHTTP {
- // Special case: if there are no arguments, try to go to an appropriate page
- // depending on whether we're in a module or workspace. The pkgsite homepage
- // is often not the most useful page.
- if len(flagSet.Args()) == 0 {
- mod, err := runCmd(append(os.Environ(), "GOWORK=off"), "go", "list", "-m")
- if err == nil && mod != "" && mod != "command-line-arguments" {
- // If there's a module, go to the module's doc page.
- return doPkgsite(mod)
- }
- gowork, err := runCmd(nil, "go", "env", "GOWORK")
- if err == nil && gowork != "" {
- // Outside a module, but in a workspace, go to the home page
- // with links to each of the modules' pages.
- return doPkgsite("")
- }
- // Outside a module or workspace, go to the documentation for the standard library.
- return doPkgsite("std")
- }
-
- // If args are provided, we need to figure out which page to open on the pkgsite
- // instance. Run the logic below to determine a match for a symbol, method,
- // or field, but don't actually print the documentation to the output.
- writer = io.Discard
- }
- var paths []string
- var symbol, method string
- // Loop until something is printed.
- dirs.Reset()
- for i := 0; ; i++ {
- buildPackage, userPath, sym, more := parseArgs(flagSet, flagSet.Args())
- if i > 0 && !more { // Ignore the "more" bit on the first iteration.
- return failMessage(paths, symbol, method)
- }
- if buildPackage == nil {
- return fmt.Errorf("no such package: %s", userPath)
- }
-
- // The builtin package needs special treatment: its symbols are lower
- // case but we want to see them, always.
- if buildPackage.ImportPath == "builtin" {
- unexported = true
- }
-
- symbol, method = parseSymbol(flagSet, sym)
- pkg := parsePackage(writer, buildPackage, userPath)
- paths = append(paths, pkg.prettyPath())
-
- defer func() {
- pkg.flush()
- e := recover()
- if e == nil {
- return
- }
- pkgError, ok := e.(PackageError)
- if ok {
- err = pkgError
- return
- }
- panic(e)
- }()
-
- var found bool
- switch {
- case symbol == "":
- pkg.packageDoc() // The package exists, so we got some output.
- found = true
- case method == "":
- if pkg.symbolDoc(symbol) {
- found = true
- }
- case pkg.printMethodDoc(symbol, method):
- found = true
- case pkg.printFieldDoc(symbol, method):
- found = true
- }
- if found {
- if serveHTTP {
- path, err := objectPath(userPath, pkg, symbol, method)
- if err != nil {
- return err
- }
- return doPkgsite(path)
- }
- return nil
- }
- }
-}
-
-func runCmd(env []string, cmdline ...string) (string, error) {
- var stdout, stderr strings.Builder
- cmd := exec.Command(cmdline[0], cmdline[1:]...)
- cmd.Env = env
- cmd.Stdout = &stdout
- cmd.Stderr = &stderr
- if err := cmd.Run(); err != nil {
- return "", fmt.Errorf("go doc: %s: %v\n%s\n", strings.Join(cmdline, " "), err, stderr.String())
- }
- return strings.TrimSpace(stdout.String()), nil
-}
-
-func objectPath(userPath string, pkg *Package, symbol, method string) (string, error) {
- var err error
- path := pkg.build.ImportPath
- if path == "." {
- // go/build couldn't determine the import path, probably
- // because this was a relative path into a module. Use
- // go list to get the import path.
- path, err = runCmd(nil, "go", "list", userPath)
- if err != nil {
- return "", err
- }
- }
-
- object := symbol
- if symbol != "" && method != "" {
- object = symbol + "." + method
- }
- if object != "" {
- path = path + "#" + object
- }
- return path, nil
-}
-
-func doPkgsite(urlPath string) error {
- port, err := pickUnusedPort()
- if err != nil {
- return fmt.Errorf("failed to find port for documentation server: %v", err)
- }
- addr := fmt.Sprintf("localhost:%d", port)
- path, err := url.JoinPath("http://"+addr, urlPath)
- if err != nil {
- return fmt.Errorf("internal error: failed to construct url: %v", err)
- }
-
- // Turn off the default signal handler for SIGINT (and SIGQUIT on Unix)
- // and instead wait for the child process to handle the signal and
- // exit before exiting ourselves.
- signal.Ignore(signalsToIgnore...)
-
- // Prepend the local download cache to GOPROXY to get around deprecation checks.
- env := os.Environ()
- vars, err := runCmd(env, goCmd(), "env", "GOPROXY", "GOMODCACHE")
- fields := strings.Fields(vars)
- if err == nil && len(fields) == 2 {
- goproxy, gomodcache := fields[0], fields[1]
- gomodcache = filepath.Join(gomodcache, "cache", "download")
- // Convert absolute path to file URL. pkgsite will not accept
- // Windows absolute paths because they look like a host:path remote.
- // TODO(golang.org/issue/32456): use url.FromFilePath when implemented.
- if strings.HasPrefix(gomodcache, "/") {
- gomodcache = "file://" + gomodcache
- } else {
- gomodcache = "file:///" + filepath.ToSlash(gomodcache)
- }
- env = append(env, "GOPROXY="+gomodcache+","+goproxy)
- }
-
- const version = "v0.0.0-20250608123103-82c52f1754cd"
- cmd := exec.Command(goCmd(), "run", "golang.org/x/pkgsite/cmd/internal/doc@"+version,
- "-gorepo", buildCtx.GOROOT,
- "-http", addr,
- "-open", path)
- cmd.Env = env
- cmd.Stdout = os.Stderr
- cmd.Stderr = os.Stderr
-
- if err := cmd.Run(); err != nil {
- var ee *exec.ExitError
- if errors.As(err, &ee) {
- // Exit with the same exit status as pkgsite to avoid
- // printing of "exit status" error messages.
- // Any relevant messages have already been printed
- // to stdout or stderr.
- os.Exit(ee.ExitCode())
- }
- return err
- }
-
- return nil
-}
-
-// pickUnusedPort finds an unused port by trying to listen on port 0
-// and letting the OS pick a port, then closing that connection and
-// returning that port number.
-// This is inherently racy.
-func pickUnusedPort() (int, error) {
- l, err := net.Listen("tcp", "localhost:0")
- if err != nil {
- return 0, err
- }
- port := l.Addr().(*net.TCPAddr).Port
- if err := l.Close(); err != nil {
- return 0, err
- }
- return port, nil
-}
-
-// failMessage creates a nicely formatted error message when there is no result to show.
-func failMessage(paths []string, symbol, method string) error {
- var b bytes.Buffer
- if len(paths) > 1 {
- b.WriteString("s")
- }
- b.WriteString(" ")
- for i, path := range paths {
- if i > 0 {
- b.WriteString(", ")
- }
- b.WriteString(path)
- }
- if method == "" {
- return fmt.Errorf("no symbol %s in package%s", symbol, &b)
- }
- return fmt.Errorf("no method or field %s.%s in package%s", symbol, method, &b)
-}
-
-// parseArgs analyzes the arguments (if any) and returns the package
-// it represents, the part of the argument the user used to identify
-// the path (or "" if it's the current package) and the symbol
-// (possibly with a .method) within that package.
-// parseSymbol is used to analyze the symbol itself.
-// The boolean final argument reports whether it is possible that
-// there may be more directories worth looking at. It will only
-// be true if the package path is a partial match for some directory
-// and there may be more matches. For example, if the argument
-// is rand.Float64, we must scan both crypto/rand and math/rand
-// to find the symbol, and the first call will return crypto/rand, true.
-func parseArgs(flagSet *flag.FlagSet, args []string) (pkg *build.Package, path, symbol string, more bool) {
- wd, err := os.Getwd()
- if err != nil {
- log.Fatal(err)
- }
- if len(args) == 0 {
- // Easy: current directory.
- return importDir(wd), "", "", false
- }
- arg := args[0]
- // We have an argument. If it is a directory name beginning with . or ..,
- // use the absolute path name. This discriminates "./errors" from "errors"
- // if the current directory contains a non-standard errors package.
- if isDotSlash(arg) {
- arg = filepath.Join(wd, arg)
- }
- switch len(args) {
- default:
- usage(flagSet)
- case 1:
- // Done below.
- case 2:
- // Package must be findable and importable.
- pkg, err := build.Import(args[0], wd, build.ImportComment)
- if err == nil {
- return pkg, args[0], args[1], false
- }
- for {
- packagePath, ok := findNextPackage(arg)
- if !ok {
- break
- }
- if pkg, err := build.ImportDir(packagePath, build.ImportComment); err == nil {
- return pkg, arg, args[1], true
- }
- }
- return nil, args[0], args[1], false
- }
- // Usual case: one argument.
- // If it contains slashes, it begins with either a package path
- // or an absolute directory.
- // First, is it a complete package path as it is? If so, we are done.
- // This avoids confusion over package paths that have other
- // package paths as their prefix.
- var importErr error
- if filepath.IsAbs(arg) {
- pkg, importErr = build.ImportDir(arg, build.ImportComment)
- if importErr == nil {
- return pkg, arg, "", false
- }
- } else {
- pkg, importErr = build.Import(arg, wd, build.ImportComment)
- if importErr == nil {
- return pkg, arg, "", false
- }
- }
- // Another disambiguator: If the argument starts with an upper
- // case letter, it can only be a symbol in the current directory.
- // Kills the problem caused by case-insensitive file systems
- // matching an upper case name as a package name.
- if !strings.ContainsAny(arg, `/\`) && token.IsExported(arg) {
- pkg, err := build.ImportDir(".", build.ImportComment)
- if err == nil {
- return pkg, "", arg, false
- }
- }
- // If it has a slash, it must be a package path but there is a symbol.
- // It's the last package path we care about.
- slash := strings.LastIndex(arg, "/")
- // There may be periods in the package path before or after the slash
- // and between a symbol and method.
- // Split the string at various periods to see what we find.
- // In general there may be ambiguities but this should almost always
- // work.
- var period int
- // slash+1: if there's no slash, the value is -1 and start is 0; otherwise
- // start is the byte after the slash.
- for start := slash + 1; start < len(arg); start = period + 1 {
- period = strings.Index(arg[start:], ".")
- symbol := ""
- if period < 0 {
- period = len(arg)
- } else {
- period += start
- symbol = arg[period+1:]
- }
- // Have we identified a package already?
- pkg, err := build.Import(arg[0:period], wd, build.ImportComment)
- if err == nil {
- return pkg, arg[0:period], symbol, false
- }
- // See if we have the basename or tail of a package, as in json for encoding/json
- // or ivy/value for robpike.io/ivy/value.
- pkgName := arg[:period]
- for {
- path, ok := findNextPackage(pkgName)
- if !ok {
- break
- }
- if pkg, err = build.ImportDir(path, build.ImportComment); err == nil {
- return pkg, arg[0:period], symbol, true
- }
- }
- dirs.Reset() // Next iteration of for loop must scan all the directories again.
- }
- // If it has a slash, we've failed.
- if slash >= 0 {
- // build.Import should always include the path in its error message,
- // and we should avoid repeating it. Unfortunately, build.Import doesn't
- // return a structured error. That can't easily be fixed, since it
- // invokes 'go list' and returns the error text from the loaded package.
- // TODO(golang.org/issue/34750): load using golang.org/x/tools/go/packages
- // instead of go/build.
- importErrStr := importErr.Error()
- if strings.Contains(importErrStr, arg[:period]) {
- log.Fatal(importErrStr)
- } else {
- log.Fatalf("no such package %s: %s", arg[:period], importErrStr)
- }
- }
- // Guess it's a symbol in the current directory.
- return importDir(wd), "", arg, false
-}
-
-// dotPaths lists all the dotted paths legal on Unix-like and
-// Windows-like file systems. We check them all, as the chance
-// of error is minute and even on Windows people will use ./
-// sometimes.
-var dotPaths = []string{
- `./`,
- `../`,
- `.\`,
- `..\`,
-}
-
-// isDotSlash reports whether the path begins with a reference
-// to the local . or .. directory.
-func isDotSlash(arg string) bool {
- if arg == "." || arg == ".." {
- return true
- }
- for _, dotPath := range dotPaths {
- if strings.HasPrefix(arg, dotPath) {
- return true
- }
- }
- return false
-}
-
-// importDir is just an error-catching wrapper for build.ImportDir.
-func importDir(dir string) *build.Package {
- pkg, err := build.ImportDir(dir, build.ImportComment)
- if err != nil {
- log.Fatal(err)
- }
- return pkg
-}
-
-// parseSymbol breaks str apart into a symbol and method.
-// Both may be missing or the method may be missing.
-// If present, each must be a valid Go identifier.
-func parseSymbol(flagSet *flag.FlagSet, str string) (symbol, method string) {
- if str == "" {
- return
- }
- elem := strings.Split(str, ".")
- switch len(elem) {
- case 1:
- case 2:
- method = elem[1]
- default:
- log.Printf("too many periods in symbol specification")
- usage(flagSet)
- }
- symbol = elem[0]
- return
-}
-
-// isExported reports whether the name is an exported identifier.
-// If the unexported flag (-u) is true, isExported returns true because
-// it means that we treat the name as if it is exported.
-func isExported(name string) bool {
- return unexported || token.IsExported(name)
-}
-
-// findNextPackage returns the next full file name path that matches the
-// (perhaps partial) package path pkg. The boolean reports if any match was found.
-func findNextPackage(pkg string) (string, bool) {
- if filepath.IsAbs(pkg) {
- if dirs.offset == 0 {
- dirs.offset = -1
- return pkg, true
- }
- return "", false
- }
- if pkg == "" || token.IsExported(pkg) { // Upper case symbol cannot be a package name.
- return "", false
- }
- pkg = path.Clean(pkg)
- pkgSuffix := "/" + pkg
- for {
- d, ok := dirs.Next()
- if !ok {
- return "", false
- }
- if d.importPath == pkg || strings.HasSuffix(d.importPath, pkgSuffix) {
- return d.dir, true
- }
- }
-}
-
-var buildCtx = build.Default
-
-// splitGopath splits $GOPATH into a list of roots.
-func splitGopath() []string {
- return filepath.SplitList(buildCtx.GOPATH)
-}
diff --git a/src/cmd/internal/goobj/mkbuiltin.go b/src/cmd/internal/goobj/mkbuiltin.go
index 9a6ad8c8c1..6d1e7bb429 100644
--- a/src/cmd/internal/goobj/mkbuiltin.go
+++ b/src/cmd/internal/goobj/mkbuiltin.go
@@ -78,7 +78,7 @@ func mkbuiltin(w io.Writer) {
continue
}
if decl.Tok != token.VAR {
- log.Fatal("unhandled declaration kind", decl.Tok)
+ log.Fatal("unhandled declaration kind: ", decl.Tok)
}
for _, spec := range decl.Specs {
spec := spec.(*ast.ValueSpec)
@@ -92,7 +92,7 @@ func mkbuiltin(w io.Writer) {
}
}
default:
- log.Fatal("unhandled decl type", decl)
+ log.Fatal("unhandled decl type: ", decl)
}
}
diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go
index 2f04fd7316..79f08a41ea 100644
--- a/src/cmd/internal/obj/arm/obj5.go
+++ b/src/cmd/internal/obj/arm/obj5.go
@@ -350,117 +350,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.To.Reg = REGSP
p.Spadj = autosize
- if cursym.Func().Text.From.Sym.Wrapper() {
- // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
- //
- // MOVW g_panic(g), R1
- // CMP $0, R1
- // B.NE checkargp
- // end:
- // NOP
- // ... function ...
- // checkargp:
- // MOVW panic_argp(R1), R2
- // ADD $(autosize+4), R13, R3
- // CMP R2, R3
- // B.NE end
- // ADD $4, R13, R4
- // MOVW R4, panic_argp(R1)
- // B end
- //
- // The NOP is needed to give the jumps somewhere to land.
- // It is a liblink NOP, not an ARM NOP: it encodes to 0 instruction bytes.
-
- p = obj.Appendp(p, newprog)
- p.As = AMOVW
- p.From.Type = obj.TYPE_MEM
- p.From.Reg = REGG
- p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic
- p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R1
-
- p = obj.Appendp(p, newprog)
- p.As = ACMP
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = 0
- p.Reg = REG_R1
-
- // B.NE checkargp
- bne := obj.Appendp(p, newprog)
- bne.As = ABNE
- bne.To.Type = obj.TYPE_BRANCH
-
- // end: NOP
- end := obj.Appendp(bne, newprog)
- end.As = obj.ANOP
-
- // find end of function
- var last *obj.Prog
- for last = end; last.Link != nil; last = last.Link {
- }
-
- // MOVW panic_argp(R1), R2
- mov := obj.Appendp(last, newprog)
- mov.As = AMOVW
- mov.From.Type = obj.TYPE_MEM
- mov.From.Reg = REG_R1
- mov.From.Offset = 0 // Panic.argp
- mov.To.Type = obj.TYPE_REG
- mov.To.Reg = REG_R2
-
- // B.NE branch target is MOVW above
- bne.To.SetTarget(mov)
-
- // ADD $(autosize+4), R13, R3
- p = obj.Appendp(mov, newprog)
- p.As = AADD
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(autosize) + 4
- p.Reg = REG_R13
- p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R3
-
- // CMP R2, R3
- p = obj.Appendp(p, newprog)
- p.As = ACMP
- p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R2
- p.Reg = REG_R3
-
- // B.NE end
- p = obj.Appendp(p, newprog)
- p.As = ABNE
- p.To.Type = obj.TYPE_BRANCH
- p.To.SetTarget(end)
-
- // ADD $4, R13, R4
- p = obj.Appendp(p, newprog)
- p.As = AADD
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = 4
- p.Reg = REG_R13
- p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R4
-
- // MOVW R4, panic_argp(R1)
- p = obj.Appendp(p, newprog)
- p.As = AMOVW
- p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R4
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = REG_R1
- p.To.Offset = 0 // Panic.argp
-
- // B end
- p = obj.Appendp(p, newprog)
- p.As = AB
- p.To.Type = obj.TYPE_BRANCH
- p.To.SetTarget(end)
-
- // reset for subsequent passes
- p = end
- }
-
case obj.ARET:
nocache(p)
if cursym.Func().Text.Mark&LEAF != 0 {
diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go
index 368a631ff5..6cdb72359e 100644
--- a/src/cmd/internal/obj/arm64/obj7.go
+++ b/src/cmd/internal/obj/arm64/obj7.go
@@ -733,111 +733,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q1.To.Type = obj.TYPE_REG
q1.To.Reg = REGFP
- if c.cursym.Func().Text.From.Sym.Wrapper() {
- // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
- //
- // MOV g_panic(g), RT1
- // CBNZ checkargp
- // end:
- // NOP
- // ... function body ...
- // checkargp:
- // MOV panic_argp(RT1), RT2
- // ADD $(autosize+8), RSP, R20
- // CMP RT2, R20
- // BNE end
- // ADD $8, RSP, R20
- // MOVD R20, panic_argp(RT1)
- // B end
- //
- // The NOP is needed to give the jumps somewhere to land.
- // It is a liblink NOP, not an ARM64 NOP: it encodes to 0 instruction bytes.
- q = q1
-
- // MOV g_panic(g), RT1
- q = obj.Appendp(q, c.newprog)
- q.As = AMOVD
- q.From.Type = obj.TYPE_MEM
- q.From.Reg = REGG
- q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REGRT1
-
- // CBNZ RT1, checkargp
- cbnz := obj.Appendp(q, c.newprog)
- cbnz.As = ACBNZ
- cbnz.From.Type = obj.TYPE_REG
- cbnz.From.Reg = REGRT1
- cbnz.To.Type = obj.TYPE_BRANCH
-
- // Empty branch target at the top of the function body
- end := obj.Appendp(cbnz, c.newprog)
- end.As = obj.ANOP
-
- // find the end of the function
- var last *obj.Prog
- for last = end; last.Link != nil; last = last.Link {
- }
-
- // MOV panic_argp(RT1), RT2
- mov := obj.Appendp(last, c.newprog)
- mov.As = AMOVD
- mov.From.Type = obj.TYPE_MEM
- mov.From.Reg = REGRT1
- mov.From.Offset = 0 // Panic.argp
- mov.To.Type = obj.TYPE_REG
- mov.To.Reg = REGRT2
-
- // CBNZ branches to the MOV above
- cbnz.To.SetTarget(mov)
-
- // ADD $(autosize+8), SP, R20
- q = obj.Appendp(mov, c.newprog)
- q.As = AADD
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = int64(c.autosize) + 8
- q.Reg = REGSP
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R20
-
- // CMP RT2, R20
- q = obj.Appendp(q, c.newprog)
- q.As = ACMP
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REGRT2
- q.Reg = REG_R20
-
- // BNE end
- q = obj.Appendp(q, c.newprog)
- q.As = ABNE
- q.To.Type = obj.TYPE_BRANCH
- q.To.SetTarget(end)
-
- // ADD $8, SP, R20
- q = obj.Appendp(q, c.newprog)
- q.As = AADD
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = 8
- q.Reg = REGSP
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R20
-
- // MOV R20, panic_argp(RT1)
- q = obj.Appendp(q, c.newprog)
- q.As = AMOVD
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R20
- q.To.Type = obj.TYPE_MEM
- q.To.Reg = REGRT1
- q.To.Offset = 0 // Panic.argp
-
- // B end
- q = obj.Appendp(q, c.newprog)
- q.As = AB
- q.To.Type = obj.TYPE_BRANCH
- q.To.SetTarget(end)
- }
-
case obj.ARET:
nocache(p)
if p.From.Type == obj.TYPE_CONST {
@@ -907,18 +802,49 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.To.Reg = REGFP
p.To.Offset = REGLINK
- // ADD $aoffset, RSP, RSP
- q = newprog()
- q.As = AADD
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = int64(aoffset)
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REGSP
- q.Spadj = -aoffset
- q.Pos = p.Pos
- q.Link = p.Link
- p.Link = q
- p = q
+ if aoffset < 1<<12 {
+ // ADD $aoffset, RSP, RSP
+ q = newprog()
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(aoffset)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGSP
+ q.Spadj = -aoffset
+ q.Pos = p.Pos
+ q.Link = p.Link
+ p.Link = q
+ p = q
+ } else {
+ // Put frame size in a separate register and
+ // add it in with a single instruction,
+ // so we never have a partial frame during
+ // the epilog. See issue 73259.
+
+ // MOVD $aoffset, REGTMP
+ q = newprog()
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(aoffset)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGTMP
+ q.Pos = p.Pos
+ q.Link = p.Link
+ p.Link = q
+ p = q
+ // ADD REGTMP, RSP, RSP
+ q = newprog()
+ q.As = AADD
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REGTMP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGSP
+ q.Spadj = -aoffset
+ q.Pos = p.Pos
+ q.Link = p.Link
+ p.Link = q
+ p = q
+ }
}
// If enabled, this code emits 'MOV PC, R27' before every 'MOV LR, PC',
diff --git a/src/cmd/internal/obj/fips140.go b/src/cmd/internal/obj/fips140.go
index eb6ffff009..ea36849a21 100644
--- a/src/cmd/internal/obj/fips140.go
+++ b/src/cmd/internal/obj/fips140.go
@@ -384,6 +384,7 @@ func (s *LSym) checkFIPSReloc(ctxt *Link, rel Reloc) {
objabi.R_RISCV_TLS_IE,
objabi.R_RISCV_TLS_LE,
objabi.R_RISCV_GOT_HI20,
+ objabi.R_RISCV_GOT_PCREL_ITYPE,
objabi.R_RISCV_PCREL_HI20,
objabi.R_RISCV_PCREL_LO12_I,
objabi.R_RISCV_PCREL_LO12_S,
diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go
index 193993ec4d..162e36be8c 100644
--- a/src/cmd/internal/obj/loong64/a.out.go
+++ b/src/cmd/internal/obj/loong64/a.out.go
@@ -816,6 +816,31 @@ const (
AXVPCNTW
AXVPCNTV
+ AVBITCLRB
+ AVBITCLRH
+ AVBITCLRW
+ AVBITCLRV
+ AVBITSETB
+ AVBITSETH
+ AVBITSETW
+ AVBITSETV
+ AVBITREVB
+ AVBITREVH
+ AVBITREVW
+ AVBITREVV
+ AXVBITCLRB
+ AXVBITCLRH
+ AXVBITCLRW
+ AXVBITCLRV
+ AXVBITSETB
+ AXVBITSETH
+ AXVBITSETW
+ AXVBITSETV
+ AXVBITREVB
+ AXVBITREVH
+ AXVBITREVW
+ AXVBITREVV
+
// LSX and LASX integer comparison instruction
AVSEQB
AXVSEQB
diff --git a/src/cmd/internal/obj/loong64/anames.go b/src/cmd/internal/obj/loong64/anames.go
index bf9b0722cc..d9ff3b7bc9 100644
--- a/src/cmd/internal/obj/loong64/anames.go
+++ b/src/cmd/internal/obj/loong64/anames.go
@@ -327,6 +327,30 @@ var Anames = []string{
"XVPCNTH",
"XVPCNTW",
"XVPCNTV",
+ "VBITCLRB",
+ "VBITCLRH",
+ "VBITCLRW",
+ "VBITCLRV",
+ "VBITSETB",
+ "VBITSETH",
+ "VBITSETW",
+ "VBITSETV",
+ "VBITREVB",
+ "VBITREVH",
+ "VBITREVW",
+ "VBITREVV",
+ "XVBITCLRB",
+ "XVBITCLRH",
+ "XVBITCLRW",
+ "XVBITCLRV",
+ "XVBITSETB",
+ "XVBITSETH",
+ "XVBITSETW",
+ "XVBITSETV",
+ "XVBITREVB",
+ "XVBITREVH",
+ "XVBITREVW",
+ "XVBITREVV",
"VSEQB",
"XVSEQB",
"VSEQH",
diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go
index 6e09930183..2ed12698e6 100644
--- a/src/cmd/internal/obj/loong64/asm.go
+++ b/src/cmd/internal/obj/loong64/asm.go
@@ -416,8 +416,11 @@ var optab = []Optab{
{AVMOVQ, C_ELEM, C_NONE, C_NONE, C_ARNG, C_NONE, 45, 4, 0, 0},
- {APRELD, C_SOREG, C_U5CON, C_NONE, C_NONE, C_NONE, 46, 4, 0, 0},
- {APRELDX, C_SOREG, C_DCON, C_U5CON, C_NONE, C_NONE, 47, 20, 0, 0},
+ {AVMOVQ, C_SOREG, C_NONE, C_NONE, C_ARNG, C_NONE, 46, 4, 0, 0},
+ {AXVMOVQ, C_SOREG, C_NONE, C_NONE, C_ARNG, C_NONE, 46, 4, 0, 0},
+
+ {APRELD, C_SOREG, C_U5CON, C_NONE, C_NONE, C_NONE, 47, 4, 0, 0},
+ {APRELDX, C_SOREG, C_DCON, C_U5CON, C_NONE, C_NONE, 48, 20, 0, 0},
{obj.APCALIGN, C_U12CON, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0},
{obj.APCDATA, C_32CON, C_NONE, C_NONE, C_32CON, C_NONE, 0, 0, 0, 0},
@@ -1830,21 +1833,33 @@ func buildop(ctxt *obj.Link) {
opset(AVSRLB, r0)
opset(AVSRAB, r0)
opset(AVROTRB, r0)
+ opset(AVBITCLRB, r0)
+ opset(AVBITSETB, r0)
+ opset(AVBITREVB, r0)
case AXVSLLB:
opset(AXVSRLB, r0)
opset(AXVSRAB, r0)
opset(AXVROTRB, r0)
+ opset(AXVBITCLRB, r0)
+ opset(AXVBITSETB, r0)
+ opset(AXVBITREVB, r0)
case AVSLLH:
opset(AVSRLH, r0)
opset(AVSRAH, r0)
opset(AVROTRH, r0)
+ opset(AVBITCLRH, r0)
+ opset(AVBITSETH, r0)
+ opset(AVBITREVH, r0)
case AXVSLLH:
opset(AXVSRLH, r0)
opset(AXVSRAH, r0)
opset(AXVROTRH, r0)
+ opset(AXVBITCLRH, r0)
+ opset(AXVBITSETH, r0)
+ opset(AXVBITREVH, r0)
case AVSLLW:
opset(AVSRLW, r0)
@@ -1858,6 +1873,9 @@ func buildop(ctxt *obj.Link) {
opset(AVSUBHU, r0)
opset(AVSUBWU, r0)
opset(AVSUBVU, r0)
+ opset(AVBITCLRW, r0)
+ opset(AVBITSETW, r0)
+ opset(AVBITREVW, r0)
case AXVSLLW:
opset(AXVSRLW, r0)
@@ -1871,16 +1889,25 @@ func buildop(ctxt *obj.Link) {
opset(AXVSUBHU, r0)
opset(AXVSUBWU, r0)
opset(AXVSUBVU, r0)
+ opset(AXVBITCLRW, r0)
+ opset(AXVBITSETW, r0)
+ opset(AXVBITREVW, r0)
case AVSLLV:
opset(AVSRLV, r0)
opset(AVSRAV, r0)
opset(AVROTRV, r0)
+ opset(AVBITCLRV, r0)
+ opset(AVBITSETV, r0)
+ opset(AVBITREVV, r0)
case AXVSLLV:
opset(AXVSRLV, r0)
opset(AXVSRAV, r0)
opset(AXVROTRV, r0)
+ opset(AXVBITCLRV, r0)
+ opset(AXVBITSETV, r0)
+ opset(AXVBITREVV, r0)
case AVSETEQV:
opset(AVSETNEV, r0)
@@ -2395,7 +2422,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = uint32(c.regoff(&p.From))
case 39: // vmov Rn, Vd.<T>[index]
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2407,7 +2434,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Rj << 5) | Vd
case 40: // vmov Vd.<T>[index], Rn
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2419,7 +2446,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Vj << 5) | Rd
case 41: // vmov Rn, Vd.<T>
- v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2429,7 +2456,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (Rj << 5) | Vd
case 42: // vmov xj, xd.<T>
- v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2439,7 +2466,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (Xj << 5) | Xd
case 43: // vmov xj, xd.<T>[index]
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2451,7 +2478,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Xj << 5) | Xd
case 44: // vmov xj.<T>[index], xd
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2463,7 +2490,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 = v | (index << 10) | (Xj << 5) | Xd
case 45: // vmov vj.<T>[index], vd.<T>
- v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg)
+ v, m := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, false)
if v == 0 {
c.ctxt.Diag("illegal arng type combination: %v\n", p)
}
@@ -2474,12 +2501,23 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) {
c.checkindex(p, index, m)
o1 = v | (index << 10) | (vj << 5) | vd
- case 46: // preld offset(Rbase), $hint
+ case 46: // vmov offset(vj), vd.<T>
+ v, _ := c.specialLsxMovInst(p.As, p.From.Reg, p.To.Reg, true)
+ if v == 0 {
+ c.ctxt.Diag("illegal arng type combination: %v\n", p)
+ }
+
+ si := c.regoff(&p.From)
+ Rj := uint32(p.From.Reg & EXT_REG_MASK)
+ Vd := uint32(p.To.Reg & EXT_REG_MASK)
+ o1 = v | uint32(si<<10) | (Rj << 5) | Vd
+
+ case 47: // preld offset(Rbase), $hint
offs := c.regoff(&p.From)
hint := p.GetFrom3().Offset
o1 = OP_12IR_5I(c.opiir(p.As), uint32(offs), uint32(p.From.Reg), uint32(hint))
- case 47: // preldx offset(Rbase), $n, $hint
+ case 48: // preldx offset(Rbase), $n, $hint
offs := c.regoff(&p.From)
hint := p.RestArgs[1].Offset
n := uint64(p.GetFrom3().Offset)
@@ -3504,6 +3542,54 @@ func (c *ctxt0) oprrr(a obj.As) uint32 {
return 0xea75 << 15 // xvfdiv.s
case AXVDIVD:
return 0xea76 << 15 // xvfdiv.d
+ case AVBITCLRB:
+ return 0xe218 << 15 // vbitclr.b
+ case AVBITCLRH:
+ return 0xe219 << 15 // vbitclr.h
+ case AVBITCLRW:
+ return 0xe21a << 15 // vbitclr.w
+ case AVBITCLRV:
+ return 0xe21b << 15 // vbitclr.d
+ case AVBITSETB:
+ return 0xe21c << 15 // vbitset.b
+ case AVBITSETH:
+ return 0xe21d << 15 // vbitset.h
+ case AVBITSETW:
+ return 0xe21e << 15 // vbitset.w
+ case AVBITSETV:
+ return 0xe21f << 15 // vbitset.d
+ case AVBITREVB:
+ return 0xe220 << 15 // vbitrev.b
+ case AVBITREVH:
+ return 0xe221 << 15 // vbitrev.h
+ case AVBITREVW:
+ return 0xe222 << 15 // vbitrev.w
+ case AVBITREVV:
+ return 0xe223 << 15 // vbitrev.d
+ case AXVBITCLRB:
+ return 0xea18 << 15 // xvbitclr.b
+ case AXVBITCLRH:
+ return 0xea19 << 15 // xvbitclr.h
+ case AXVBITCLRW:
+ return 0xea1a << 15 // xvbitclr.w
+ case AXVBITCLRV:
+ return 0xea1b << 15 // xvbitclr.d
+ case AXVBITSETB:
+ return 0xea1c << 15 // xvbitset.b
+ case AXVBITSETH:
+ return 0xea1d << 15 // xvbitset.h
+ case AXVBITSETW:
+ return 0xea1e << 15 // xvbitset.w
+ case AXVBITSETV:
+ return 0xea1f << 15 // xvbitset.d
+ case AXVBITREVB:
+ return 0xea20 << 15 // xvbitrev.b
+ case AXVBITREVH:
+ return 0xea21 << 15 // xvbitrev.h
+ case AXVBITREVW:
+ return 0xea22 << 15 // xvbitrev.w
+ case AXVBITREVV:
+ return 0xea23 << 15 // xvbitrev.d
}
if a < 0 {
@@ -4104,6 +4190,54 @@ func (c *ctxt0) opirr(a obj.As) uint32 {
return 0x1de6 << 18 // xvshuf4i.w
case AXVSHUF4IV:
return 0x1de7 << 18 // xvshuf4i.d
+ case AVBITCLRB:
+ return 0x1CC4<<18 | 0x1<<13 // vbitclri.b
+ case AVBITCLRH:
+ return 0x1CC4<<18 | 0x1<<14 // vbitclri.h
+ case AVBITCLRW:
+ return 0x1CC4<<18 | 0x1<<15 // vbitclri.w
+ case AVBITCLRV:
+ return 0x1CC4<<18 | 0x1<<16 // vbitclri.d
+ case AVBITSETB:
+ return 0x1CC5<<18 | 0x1<<13 // vbitseti.b
+ case AVBITSETH:
+ return 0x1CC5<<18 | 0x1<<14 // vbitseti.h
+ case AVBITSETW:
+ return 0x1CC5<<18 | 0x1<<15 // vbitseti.w
+ case AVBITSETV:
+ return 0x1CC5<<18 | 0x1<<16 // vbitseti.d
+ case AVBITREVB:
+ return 0x1CC6<<18 | 0x1<<13 // vbitrevi.b
+ case AVBITREVH:
+ return 0x1CC6<<18 | 0x1<<14 // vbitrevi.h
+ case AVBITREVW:
+ return 0x1CC6<<18 | 0x1<<15 // vbitrevi.w
+ case AVBITREVV:
+ return 0x1CC6<<18 | 0x1<<16 // vbitrevi.d
+ case AXVBITCLRB:
+ return 0x1DC4<<18 | 0x1<<13 // xvbitclri.b
+ case AXVBITCLRH:
+ return 0x1DC4<<18 | 0x1<<14 // xvbitclri.h
+ case AXVBITCLRW:
+ return 0x1DC4<<18 | 0x1<<15 // xvbitclri.w
+ case AXVBITCLRV:
+ return 0x1DC4<<18 | 0x1<<16 // xvbitclri.d
+ case AXVBITSETB:
+ return 0x1DC5<<18 | 0x1<<13 // xvbitseti.b
+ case AXVBITSETH:
+ return 0x1DC5<<18 | 0x1<<14 // xvbitseti.h
+ case AXVBITSETW:
+ return 0x1DC5<<18 | 0x1<<15 // xvbitseti.w
+ case AXVBITSETV:
+ return 0x1DC5<<18 | 0x1<<16 // xvbitseti.d
+ case AXVBITREVB:
+ return 0x1DC6<<18 | 0x1<<13 // xvbitrevi.b
+ case AXVBITREVH:
+ return 0x1DC6<<18 | 0x1<<14 // xvbitrevi.h
+ case AXVBITREVW:
+ return 0x1DC6<<18 | 0x1<<15 // xvbitrevi.w
+ case AXVBITREVV:
+ return 0x1DC6<<18 | 0x1<<16 // xvbitrevi.d
}
if a < 0 {
@@ -4192,7 +4326,7 @@ func (c *ctxt0) specialFpMovInst(a obj.As, fclass int, tclass int) uint32 {
return 0
}
-func (c *ctxt0) specialLsxMovInst(a obj.As, fReg, tReg int16) (op_code, index_mask uint32) {
+func (c *ctxt0) specialLsxMovInst(a obj.As, fReg, tReg int16, offset_flag bool) (op_code, index_mask uint32) {
farng := (fReg >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK
tarng := (tReg >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK
fclass := c.rclass(fReg)
@@ -4258,29 +4392,58 @@ func (c *ctxt0) specialLsxMovInst(a obj.As, fReg, tReg int16) (op_code, index_ma
}
case C_REG | (C_ARNG << 16):
- // vmov Rn, Vd.<T>
- switch a {
- case AVMOVQ:
- switch tarng {
- case ARNG_16B:
- return (0x1CA7C0 << 10), 0x0 // vreplgr2vr.b
- case ARNG_8H:
- return (0x1CA7C1 << 10), 0x0 // vreplgr2vr.h
- case ARNG_4W:
- return (0x1CA7C2 << 10), 0x0 // vreplgr2vr.w
- case ARNG_2V:
- return (0x1CA7C3 << 10), 0x0 // vreplgr2vr.d
+ switch {
+ case offset_flag:
+ // vmov offset(vj), vd.<T>
+ switch a {
+ case AVMOVQ:
+ switch tarng {
+ case ARNG_16B:
+ return (0xC2 << 22), 0x0 // vldrepl.b
+ case ARNG_8H:
+ return (0x182 << 21), 0x0 // vldrepl.h
+ case ARNG_4W:
+ return (0x302 << 20), 0x0 // vldrepl.w
+ case ARNG_2V:
+ return (0x602 << 19), 0x0 // vldrepl.d
+ }
+ case AXVMOVQ:
+ switch tarng {
+ case ARNG_32B:
+ return (0xCA << 22), 0x0 // xvldrepl.b
+ case ARNG_16H:
+ return (0x192 << 21), 0x0 // xvldrepl.h
+ case ARNG_8W:
+ return (0x322 << 20), 0x0 // xvldrepl.w
+ case ARNG_4V:
+ return (0x642 << 19), 0x0 // xvldrepl.d
+ }
}
- case AXVMOVQ:
- switch tarng {
- case ARNG_32B:
- return (0x1DA7C0 << 10), 0x0 // xvreplgr2vr.b
- case ARNG_16H:
- return (0x1DA7C1 << 10), 0x0 // xvreplgr2vr.h
- case ARNG_8W:
- return (0x1DA7C2 << 10), 0x0 // xvreplgr2vr.w
- case ARNG_4V:
- return (0x1DA7C3 << 10), 0x0 // xvreplgr2vr.d
+ default:
+ // vmov Rn, Vd.<T>
+ switch a {
+ case AVMOVQ:
+ switch tarng {
+ case ARNG_16B:
+ return (0x1CA7C0 << 10), 0x0 // vreplgr2vr.b
+ case ARNG_8H:
+ return (0x1CA7C1 << 10), 0x0 // vreplgr2vr.h
+ case ARNG_4W:
+ return (0x1CA7C2 << 10), 0x0 // vreplgr2vr.w
+ case ARNG_2V:
+ return (0x1CA7C3 << 10), 0x0 // vreplgr2vr.d
+ }
+ case AXVMOVQ:
+ switch tarng {
+ case ARNG_32B:
+ return (0x1DA7C0 << 10), 0x0 // xvreplgr2vr.b
+ case ARNG_16H:
+ return (0x1DA7C1 << 10), 0x0 // xvreplgr2vr.h
+ case ARNG_8W:
+ return (0x1DA7C2 << 10), 0x0 // xvreplgr2vr.w
+ case ARNG_4V:
+ return (0x1DA7C3 << 10), 0x0 // xvreplgr2vr.d
+ }
}
}
diff --git a/src/cmd/internal/obj/loong64/doc.go b/src/cmd/internal/obj/loong64/doc.go
index 0818389c8d..a990b23089 100644
--- a/src/cmd/internal/obj/loong64/doc.go
+++ b/src/cmd/internal/obj/loong64/doc.go
@@ -203,6 +203,23 @@ Note: In the following sections 3.1 to 3.6, "ui4" (4-bit unsigned int immediate)
VMOVQ Vj.W[index], Vd.W4 | vreplvei.w vd, vj, ui2 | for i in range(4) : VR[vd].w[i] = VR[vj].w[ui2]
VMOVQ Vj.V[index], Vd.V2 | vreplvei.d vd, vj, ui1 | for i in range(2) : VR[vd].d[i] = VR[vj].d[ui1]
+3.7 Load data from memory and broadcast to each element of a vector register.
+
+ Instruction format:
+ VMOVQ offset(Rj), <Vd>.<T>
+
+ Mapping between Go and platform assembly:
+ Go assembly | platform assembly | semantics
+ -------------------------------------------------------------------------------------------------------------------------------------------------------
+ VMOVQ offset(Rj), Vd.B16 | vldrepl.b Vd, Rj, si12 | for i in range(16): VR[vd].b[i] = load 8 bit memory data from (GR[rj]+SignExtend(si12))
+ VMOVQ offset(Rj), Vd.H8 | vldrepl.h Vd, Rj, si11 | for i in range(8) : VR[vd].h[i] = load 16 bit memory data from (GR[rj]+SignExtend(si11<<1))
+ VMOVQ offset(Rj), Vd.W4 | vldrepl.w Vd, Rj, si10 | for i in range(4) : VR[vd].w[i] = load 32 bit memory data from (GR[rj]+SignExtend(si10<<2))
+ VMOVQ offset(Rj), Vd.V2 | vldrepl.d Vd, Rj, si9 | for i in range(2) : VR[vd].d[i] = load 64 bit memory data from (GR[rj]+SignExtend(si9<<3))
+ XVMOVQ offset(Rj), Xd.B32 | xvldrepl.b Xd, Rj, si12 | for i in range(32): XR[xd].b[i] = load 8 bit memory data from (GR[rj]+SignExtend(si12))
+ XVMOVQ offset(Rj), Xd.H16 | xvldrepl.h Xd, Rj, si11 | for i in range(16): XR[xd].h[i] = load 16 bit memory data from (GR[rj]+SignExtend(si11<<1))
+ XVMOVQ offset(Rj), Xd.W8 | xvldrepl.w Xd, Rj, si10 | for i in range(8) : XR[xd].w[i] = load 32 bit memory data from (GR[rj]+SignExtend(si10<<2))
+ XVMOVQ offset(Rj), Xd.V4 | xvldrepl.d Xd, Rj, si9 | for i in range(4) : XR[xd].d[i] = load 64 bit memory data from (GR[rj]+SignExtend(si9<<3))
+
# Special instruction encoding definition and description on LoongArch
1. DBAR hint encoding for LA664(Loongson 3A6000) and later micro-architectures, paraphrased
diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go
index f75e2d8716..79fbb23fef 100644
--- a/src/cmd/internal/obj/loong64/obj.go
+++ b/src/cmd/internal/obj/loong64/obj.go
@@ -301,8 +301,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
var q *obj.Prog
var q1 *obj.Prog
autosize := int32(0)
- var p1 *obj.Prog
- var p2 *obj.Prog
for p := c.cursym.Func().Text; p != nil; p = p.Link {
o := p.As
switch o {
@@ -401,90 +399,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Reg = REGSP
}
- if c.cursym.Func().Text.From.Sym.Wrapper() && c.cursym.Func().Text.Mark&LEAF == 0 {
- // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
- //
- // MOV g_panic(g), R20
- // BEQ R20, end
- // MOV panic_argp(R20), R24
- // ADD $(autosize+FIXED_FRAME), R3, R30
- // BNE R24, R30, end
- // ADD $FIXED_FRAME, R3, R24
- // MOV R24, panic_argp(R20)
- // end:
- // NOP
- //
- // The NOP is needed to give the jumps somewhere to land.
- // It is a liblink NOP, not a hardware NOP: it encodes to 0 instruction bytes.
- //
- // We don't generate this for leaves because that means the wrapped
- // function was inlined into the wrapper.
-
- q = obj.Appendp(q, newprog)
-
- q.As = mov
- q.From.Type = obj.TYPE_MEM
- q.From.Reg = REGG
- q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R20
-
- q = obj.Appendp(q, newprog)
- q.As = ABEQ
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R20
- q.To.Type = obj.TYPE_BRANCH
- q.Mark |= BRANCH
- p1 = q
-
- q = obj.Appendp(q, newprog)
- q.As = mov
- q.From.Type = obj.TYPE_MEM
- q.From.Reg = REG_R20
- q.From.Offset = 0 // Panic.argp
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R24
-
- q = obj.Appendp(q, newprog)
- q.As = add
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = int64(autosize) + ctxt.Arch.FixedFrameSize
- q.Reg = REGSP
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R30
-
- q = obj.Appendp(q, newprog)
- q.As = ABNE
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R24
- q.Reg = REG_R30
- q.To.Type = obj.TYPE_BRANCH
- q.Mark |= BRANCH
- p2 = q
-
- q = obj.Appendp(q, newprog)
- q.As = add
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = ctxt.Arch.FixedFrameSize
- q.Reg = REGSP
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R24
-
- q = obj.Appendp(q, newprog)
- q.As = mov
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R24
- q.To.Type = obj.TYPE_MEM
- q.To.Reg = REG_R20
- q.To.Offset = 0 // Panic.argp
-
- q = obj.Appendp(q, newprog)
-
- q.As = obj.ANOP
- p1.To.SetTarget(q)
- p2.To.SetTarget(q)
- }
-
case ARET:
if p.From.Type == obj.TYPE_CONST {
ctxt.Diag("using BECOME (%v) is not supported!", p)
diff --git a/src/cmd/internal/obj/mips/obj0.go b/src/cmd/internal/obj/mips/obj0.go
index b9152fe57e..bbed88e9d6 100644
--- a/src/cmd/internal/obj/mips/obj0.go
+++ b/src/cmd/internal/obj/mips/obj0.go
@@ -267,7 +267,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
var q1 *obj.Prog
autosize := int32(0)
var p1 *obj.Prog
- var p2 *obj.Prog
for p := c.cursym.Func().Text; p != nil; p = p.Link {
o := p.As
switch o {
@@ -359,90 +358,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Reg = REGSP
}
- if c.cursym.Func().Text.From.Sym.Wrapper() && c.cursym.Func().Text.Mark&LEAF == 0 {
- // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
- //
- // MOV g_panic(g), R1
- // BEQ R1, end
- // MOV panic_argp(R1), R2
- // ADD $(autosize+FIXED_FRAME), R29, R3
- // BNE R2, R3, end
- // ADD $FIXED_FRAME, R29, R2
- // MOV R2, panic_argp(R1)
- // end:
- // NOP
- //
- // The NOP is needed to give the jumps somewhere to land.
- // It is a liblink NOP, not an mips NOP: it encodes to 0 instruction bytes.
- //
- // We don't generate this for leafs because that means the wrapped
- // function was inlined into the wrapper.
-
- q = obj.Appendp(q, newprog)
-
- q.As = mov
- q.From.Type = obj.TYPE_MEM
- q.From.Reg = REGG
- q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R1
-
- q = obj.Appendp(q, newprog)
- q.As = ABEQ
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R1
- q.To.Type = obj.TYPE_BRANCH
- q.Mark |= BRANCH
- p1 = q
-
- q = obj.Appendp(q, newprog)
- q.As = mov
- q.From.Type = obj.TYPE_MEM
- q.From.Reg = REG_R1
- q.From.Offset = 0 // Panic.argp
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R2
-
- q = obj.Appendp(q, newprog)
- q.As = add
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = int64(autosize) + ctxt.Arch.FixedFrameSize
- q.Reg = REGSP
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R3
-
- q = obj.Appendp(q, newprog)
- q.As = ABNE
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R2
- q.Reg = REG_R3
- q.To.Type = obj.TYPE_BRANCH
- q.Mark |= BRANCH
- p2 = q
-
- q = obj.Appendp(q, newprog)
- q.As = add
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = ctxt.Arch.FixedFrameSize
- q.Reg = REGSP
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R2
-
- q = obj.Appendp(q, newprog)
- q.As = mov
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R2
- q.To.Type = obj.TYPE_MEM
- q.To.Reg = REG_R1
- q.To.Offset = 0 // Panic.argp
-
- q = obj.Appendp(q, newprog)
-
- q.As = obj.ANOP
- p1.To.SetTarget(q)
- p2.To.SetTarget(q)
- }
-
case ARET:
if p.From.Type == obj.TYPE_CONST {
ctxt.Diag("using BECOME (%v) is not supported!", p)
diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go
index 2d2c198ab9..5615b70aad 100644
--- a/src/cmd/internal/obj/ppc64/obj9.go
+++ b/src/cmd/internal/obj/ppc64/obj9.go
@@ -806,8 +806,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
autosize := int32(0)
- var p1 *obj.Prog
- var p2 *obj.Prog
for p := c.cursym.Func().Text; p != nil; p = p.Link {
o := p.As
switch o {
@@ -967,96 +965,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.To.Offset = 24
}
- if c.cursym.Func().Text.From.Sym.Wrapper() {
- // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
- //
- // MOVD g_panic(g), R22
- // CMP R22, $0
- // BEQ end
- // MOVD panic_argp(R22), R23
- // ADD $(autosize+8), R1, R24
- // CMP R23, R24
- // BNE end
- // ADD $8, R1, R25
- // MOVD R25, panic_argp(R22)
- // end:
- // NOP
- //
- // The NOP is needed to give the jumps somewhere to land.
- // It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes.
-
- q = obj.Appendp(q, c.newprog)
-
- q.As = AMOVD
- q.From.Type = obj.TYPE_MEM
- q.From.Reg = REGG
- q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R22
-
- q = obj.Appendp(q, c.newprog)
- q.As = ACMP
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R22
- q.To.Type = obj.TYPE_CONST
- q.To.Offset = 0
-
- q = obj.Appendp(q, c.newprog)
- q.As = ABEQ
- q.To.Type = obj.TYPE_BRANCH
- p1 = q
-
- q = obj.Appendp(q, c.newprog)
- q.As = AMOVD
- q.From.Type = obj.TYPE_MEM
- q.From.Reg = REG_R22
- q.From.Offset = 0 // Panic.argp
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R23
-
- q = obj.Appendp(q, c.newprog)
- q.As = AADD
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = int64(autosize) + c.ctxt.Arch.FixedFrameSize
- q.Reg = REGSP
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R24
-
- q = obj.Appendp(q, c.newprog)
- q.As = ACMP
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R23
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R24
-
- q = obj.Appendp(q, c.newprog)
- q.As = ABNE
- q.To.Type = obj.TYPE_BRANCH
- p2 = q
-
- q = obj.Appendp(q, c.newprog)
- q.As = AADD
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = c.ctxt.Arch.FixedFrameSize
- q.Reg = REGSP
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R25
-
- q = obj.Appendp(q, c.newprog)
- q.As = AMOVD
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R25
- q.To.Type = obj.TYPE_MEM
- q.To.Reg = REG_R22
- q.To.Offset = 0 // Panic.argp
-
- q = obj.Appendp(q, c.newprog)
-
- q.As = obj.ANOP
- p1.To.SetTarget(q)
- p2.To.SetTarget(q)
- }
-
case obj.ARET:
if p.From.Type == obj.TYPE_CONST {
c.ctxt.Diag("using BECOME (%v) is not supported!", p)
diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go
index 5b598b5757..078e81a2f7 100644
--- a/src/cmd/internal/obj/riscv/obj.go
+++ b/src/cmd/internal/obj/riscv/obj.go
@@ -564,85 +564,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
prologue.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_SP, Offset: 0}
}
- if cursym.Func().Text.From.Sym.Wrapper() {
- // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
- //
- // MOV g_panic(g), X5
- // BNE X5, ZERO, adjust
- // end:
- // NOP
- // ...rest of function..
- // adjust:
- // MOV panic_argp(X5), X6
- // ADD $(autosize+FIXED_FRAME), SP, X7
- // BNE X6, X7, end
- // ADD $FIXED_FRAME, SP, X6
- // MOV X6, panic_argp(X5)
- // JMP end
- //
- // The NOP is needed to give the jumps somewhere to land.
-
- ldpanic := obj.Appendp(prologue, newprog)
-
- ldpanic.As = AMOV
- ldpanic.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REGG, Offset: 4 * int64(ctxt.Arch.PtrSize)} // G.panic
- ldpanic.Reg = obj.REG_NONE
- ldpanic.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X5}
-
- bneadj := obj.Appendp(ldpanic, newprog)
- bneadj.As = ABNE
- bneadj.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X5}
- bneadj.Reg = REG_ZERO
- bneadj.To.Type = obj.TYPE_BRANCH
-
- endadj := obj.Appendp(bneadj, newprog)
- endadj.As = obj.ANOP
-
- last := endadj
- for last.Link != nil {
- last = last.Link
- }
-
- getargp := obj.Appendp(last, newprog)
- getargp.As = AMOV
- getargp.From = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_X5, Offset: 0} // Panic.argp
- getargp.Reg = obj.REG_NONE
- getargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X6}
-
- bneadj.To.SetTarget(getargp)
-
- calcargp := obj.Appendp(getargp, newprog)
- calcargp.As = AADDI
- calcargp.From = obj.Addr{Type: obj.TYPE_CONST, Offset: stacksize + ctxt.Arch.FixedFrameSize}
- calcargp.Reg = REG_SP
- calcargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X7}
-
- testargp := obj.Appendp(calcargp, newprog)
- testargp.As = ABNE
- testargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X6}
- testargp.Reg = REG_X7
- testargp.To.Type = obj.TYPE_BRANCH
- testargp.To.SetTarget(endadj)
-
- adjargp := obj.Appendp(testargp, newprog)
- adjargp.As = AADDI
- adjargp.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(ctxt.Arch.PtrSize)}
- adjargp.Reg = REG_SP
- adjargp.To = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X6}
-
- setargp := obj.Appendp(adjargp, newprog)
- setargp.As = AMOV
- setargp.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_X6}
- setargp.Reg = obj.REG_NONE
- setargp.To = obj.Addr{Type: obj.TYPE_MEM, Reg: REG_X5, Offset: 0} // Panic.argp
-
- godone := obj.Appendp(setargp, newprog)
- godone.As = AJAL
- godone.From = obj.Addr{Type: obj.TYPE_REG, Reg: REG_ZERO}
- godone.To.Type = obj.TYPE_BRANCH
- godone.To.SetTarget(endadj)
- }
-
// Update stack-based offsets.
for p := cursym.Func().Text; p != nil; p = p.Link {
stackOffset(&p.From, stacksize)
diff --git a/src/cmd/internal/obj/s390x/a.out.go b/src/cmd/internal/obj/s390x/a.out.go
index 3eed4624b1..dc715182f5 100644
--- a/src/cmd/internal/obj/s390x/a.out.go
+++ b/src/cmd/internal/obj/s390x/a.out.go
@@ -444,6 +444,7 @@ const (
// storage-and-storage
AMVC
AMVCIN
+ AMVCLE
ACLC
AXC
AOC
@@ -714,6 +715,14 @@ const (
AWFLNDB
AVFLPDB
AWFLPDB
+ AVFMAXDB
+ AWFMAXDB
+ AVFMAXSB
+ AWFMAXSB
+ AVFMINDB
+ AWFMINDB
+ AVFMINSB
+ AWFMINSB
AVFSQ
AVFSQDB
AWFSQDB
diff --git a/src/cmd/internal/obj/s390x/anames.go b/src/cmd/internal/obj/s390x/anames.go
index ae86d2092b..a6f2820f85 100644
--- a/src/cmd/internal/obj/s390x/anames.go
+++ b/src/cmd/internal/obj/s390x/anames.go
@@ -181,6 +181,7 @@ var Anames = []string{
"CMPUBNE",
"MVC",
"MVCIN",
+ "MVCLE",
"CLC",
"XC",
"OC",
@@ -437,6 +438,14 @@ var Anames = []string{
"WFLNDB",
"VFLPDB",
"WFLPDB",
+ "VFMAXDB",
+ "WFMAXDB",
+ "VFMAXSB",
+ "WFMAXSB",
+ "VFMINDB",
+ "WFMINDB",
+ "VFMINSB",
+ "WFMINSB",
"VFSQ",
"VFSQDB",
"WFSQDB",
diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go
index 6511549eeb..957222a155 100644
--- a/src/cmd/internal/obj/s390x/asmz.go
+++ b/src/cmd/internal/obj/s390x/asmz.go
@@ -441,6 +441,11 @@ var optab = []Optab{
{i: 119, as: AVERLLVG, a1: C_VREG, a2: C_VREG, a6: C_VREG},
{i: 119, as: AVERLLVG, a1: C_VREG, a6: C_VREG},
+ // VRR-c floating point min/max
+ {i: 128, as: AVFMAXDB, a1: C_SCON, a2: C_VREG, a3: C_VREG, a6: C_VREG},
+ {i: 128, as: AWFMAXDB, a1: C_SCON, a2: C_VREG, a3: C_VREG, a6: C_VREG},
+ {i: 128, as: AWFMAXDB, a1: C_SCON, a2: C_FREG, a3: C_FREG, a6: C_FREG},
+
// VRR-d
{i: 120, as: AVACQ, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG},
@@ -449,6 +454,10 @@ var optab = []Optab{
// VRR-f
{i: 122, as: AVLVGP, a1: C_REG, a2: C_REG, a6: C_VREG},
+
+ // MVC storage and storage
+ {i: 127, as: AMVCLE, a1: C_LOREG, a2: C_REG, a6: C_REG},
+ {i: 127, as: AMVCLE, a1: C_SCON, a2: C_REG, a6: C_REG},
}
var oprange [ALAST & obj.AMask][]Optab
@@ -1476,6 +1485,14 @@ func buildop(ctxt *obj.Link) {
opset(AVFMSDB, r)
opset(AWFMSDB, r)
opset(AVPERM, r)
+ case AVFMAXDB:
+ opset(AVFMAXSB, r)
+ opset(AVFMINDB, r)
+ opset(AVFMINSB, r)
+ case AWFMAXDB:
+ opset(AWFMAXSB, r)
+ opset(AWFMINDB, r)
+ opset(AWFMINSB, r)
case AKM:
opset(AKMC, r)
opset(AKLMD, r)
@@ -2632,6 +2649,8 @@ const (
op_VUPLL uint32 = 0xE7D4 // VRR-a VECTOR UNPACK LOGICAL LOW
op_VUPL uint32 = 0xE7D6 // VRR-a VECTOR UNPACK LOW
op_VMSL uint32 = 0xE7B8 // VRR-d VECTOR MULTIPLY SUM LOGICAL
+ op_VFMAX uint32 = 0xE7EF // VRR-c VECTOR FP MAXIMUM
+ op_VFMIN uint32 = 0xE7EE // VRR-c VECTOR FP MINIMUM
// added in z15
op_KDSA uint32 = 0xB93A // FORMAT_RRE COMPUTE DIGITAL SIGNATURE AUTHENTICATION (KDSA)
@@ -4453,6 +4472,30 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) {
}
}
zRRF(opcode, uint32(p.Reg), 0, uint32(p.From.Reg), uint32(p.To.Reg), asm)
+
+ case 127:
+ // NOTE: Mapping MVCLE operands is as follows:
+ // Instruction Format: MVCLE R1,R3,D2(B2)
+ // R1 - prog.To (for Destination)
+ // R3 - prog.Reg (for Source)
+ // B2 - prog.From (for Padding Byte)
+ d2 := c.regoff(&p.From)
+ if p.To.Reg&1 != 0 {
+ c.ctxt.Diag("output argument must be even register in %v", p)
+ }
+ if p.Reg&1 != 0 {
+ c.ctxt.Diag("input argument must be an even register in %v", p)
+ }
+ if (p.From.Reg == p.To.Reg) || (p.From.Reg == p.Reg) {
+ c.ctxt.Diag("padding byte register cannot be same as input or output register %v", p)
+ }
+ zRS(op_MVCLE, uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), uint32(d2), asm)
+
+ case 128: // VRR-c floating point max/min
+ op, m4, _ := vop(p.As)
+ m5 := singleElementMask(p.As)
+ m6 := uint32(c.vregoff(&p.From))
+ zVRRc(op, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), m6, m5, m4, asm)
}
}
diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go
index 80b233d832..44c1a7d586 100644
--- a/src/cmd/internal/obj/s390x/objz.go
+++ b/src/cmd/internal/obj/s390x/objz.go
@@ -384,96 +384,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
break
}
- if c.cursym.Func().Text.From.Sym.Wrapper() {
- // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
- //
- // MOVD g_panic(g), R3
- // CMP R3, $0
- // BEQ end
- // MOVD panic_argp(R3), R4
- // ADD $(autosize+8), R1, R5
- // CMP R4, R5
- // BNE end
- // ADD $8, R1, R6
- // MOVD R6, panic_argp(R3)
- // end:
- // NOP
- //
- // The NOP is needed to give the jumps somewhere to land.
- // It is a liblink NOP, not a s390x NOP: it encodes to 0 instruction bytes.
-
- q = obj.Appendp(q, c.newprog)
-
- q.As = AMOVD
- q.From.Type = obj.TYPE_MEM
- q.From.Reg = REGG
- q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R3
-
- q = obj.Appendp(q, c.newprog)
- q.As = ACMP
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R3
- q.To.Type = obj.TYPE_CONST
- q.To.Offset = 0
-
- q = obj.Appendp(q, c.newprog)
- q.As = ABEQ
- q.To.Type = obj.TYPE_BRANCH
- p1 := q
-
- q = obj.Appendp(q, c.newprog)
- q.As = AMOVD
- q.From.Type = obj.TYPE_MEM
- q.From.Reg = REG_R3
- q.From.Offset = 0 // Panic.argp
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R4
-
- q = obj.Appendp(q, c.newprog)
- q.As = AADD
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = int64(autosize) + c.ctxt.Arch.FixedFrameSize
- q.Reg = REGSP
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R5
-
- q = obj.Appendp(q, c.newprog)
- q.As = ACMP
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R4
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R5
-
- q = obj.Appendp(q, c.newprog)
- q.As = ABNE
- q.To.Type = obj.TYPE_BRANCH
- p2 := q
-
- q = obj.Appendp(q, c.newprog)
- q.As = AADD
- q.From.Type = obj.TYPE_CONST
- q.From.Offset = c.ctxt.Arch.FixedFrameSize
- q.Reg = REGSP
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R6
-
- q = obj.Appendp(q, c.newprog)
- q.As = AMOVD
- q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R6
- q.To.Type = obj.TYPE_MEM
- q.To.Reg = REG_R3
- q.To.Offset = 0 // Panic.argp
-
- q = obj.Appendp(q, c.newprog)
-
- q.As = obj.ANOP
- p1.To.SetTarget(q)
- p2.To.SetTarget(q)
- }
-
case obj.ARET:
retTarget := p.To.Sym
diff --git a/src/cmd/internal/obj/s390x/vector.go b/src/cmd/internal/obj/s390x/vector.go
index e7e36eaf15..966cd04c27 100644
--- a/src/cmd/internal/obj/s390x/vector.go
+++ b/src/cmd/internal/obj/s390x/vector.go
@@ -1027,6 +1027,22 @@ func vop(as obj.As) (opcode, es, cs uint32) {
return op_VUPL, 1, 0
case AVUPLF:
return op_VUPL, 2, 0
+ case AVFMAXDB:
+ return op_VFMAX, 3, 0
+ case AWFMAXDB:
+ return op_VFMAX, 3, 0
+ case AVFMAXSB:
+ return op_VFMAX, 2, 0
+ case AWFMAXSB:
+ return op_VFMAX, 2, 0
+ case AVFMINDB:
+ return op_VFMIN, 3, 0
+ case AWFMINDB:
+ return op_VFMIN, 3, 0
+ case AVFMINSB:
+ return op_VFMIN, 2, 0
+ case AWFMINSB:
+ return op_VFMIN, 2, 0
}
}
@@ -1062,7 +1078,11 @@ func singleElementMask(as obj.As) uint32 {
AWFSQDB,
AWFSDB,
AWFTCIDB,
- AWFIDB:
+ AWFIDB,
+ AWFMAXDB,
+ AWFMAXSB,
+ AWFMINDB,
+ AWFMINSB:
return 8
}
return 0
diff --git a/src/cmd/internal/obj/wasm/wasmobj.go b/src/cmd/internal/obj/wasm/wasmobj.go
index 9a3520f319..011294597a 100644
--- a/src/cmd/internal/obj/wasm/wasmobj.go
+++ b/src/cmd/internal/obj/wasm/wasmobj.go
@@ -202,59 +202,6 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
framesize = 0
} else if s.Func().WasmExport != nil {
genWasmExportWrapper(s, appendp)
- } else if s.Func().Text.From.Sym.Wrapper() {
- // if g._panic != nil && g._panic.argp == FP {
- // g._panic.argp = bottom-of-frame
- // }
- //
- // MOVD g_panic(g), R0
- // Get R0
- // I64Eqz
- // Not
- // If
- // Get SP
- // I64ExtendI32U
- // I64Const $framesize+8
- // I64Add
- // I64Load panic_argp(R0)
- // I64Eq
- // If
- // MOVD SP, panic_argp(R0)
- // End
- // End
-
- gpanic := obj.Addr{
- Type: obj.TYPE_MEM,
- Reg: REGG,
- Offset: 4 * 8, // g_panic
- }
-
- panicargp := obj.Addr{
- Type: obj.TYPE_MEM,
- Reg: REG_R0,
- Offset: 0, // panic.argp
- }
-
- p := s.Func().Text
- p = appendp(p, AMOVD, gpanic, regAddr(REG_R0))
-
- p = appendp(p, AGet, regAddr(REG_R0))
- p = appendp(p, AI64Eqz)
- p = appendp(p, ANot)
- p = appendp(p, AIf)
-
- p = appendp(p, AGet, regAddr(REG_SP))
- p = appendp(p, AI64ExtendI32U)
- p = appendp(p, AI64Const, constAddr(framesize+8))
- p = appendp(p, AI64Add)
- p = appendp(p, AI64Load, panicargp)
-
- p = appendp(p, AI64Eq)
- p = appendp(p, AIf)
- p = appendp(p, AMOVD, regAddr(REG_SP), panicargp)
- p = appendp(p, AEnd)
-
- p = appendp(p, AEnd)
}
if framesize > 0 && s.Func().WasmExport == nil { // genWasmExportWrapper has its own prologue generation
@@ -372,6 +319,9 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
}
tableIdxs = append(tableIdxs, uint64(numResumePoints))
s.Size = pc + 1
+ if pc >= 1<<16 {
+ ctxt.Diag("function too big: %s exceeds 65536 blocks", s)
+ }
if needMoreStack {
p := pMorestack
@@ -465,9 +415,9 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
case obj.TYPE_NONE:
// (target PC is on stack)
+ p = appendp(p, AI64Const, constAddr(16)) // only needs PC_F bits (16-63), PC_B bits (0-15) are zero
+ p = appendp(p, AI64ShrU)
p = appendp(p, AI32WrapI64)
- p = appendp(p, AI32Const, constAddr(16)) // only needs PC_F bits (16-31), PC_B bits (0-15) are zero
- p = appendp(p, AI32ShrU)
// Set PC_B parameter to function entry.
// We need to push this before pushing the target PC_F,
@@ -521,9 +471,9 @@ func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
case obj.TYPE_NONE:
// (target PC is on stack)
+ p = appendp(p, AI64Const, constAddr(16)) // only needs PC_F bits (16-63), PC_B bits (0-15) are zero
+ p = appendp(p, AI64ShrU)
p = appendp(p, AI32WrapI64)
- p = appendp(p, AI32Const, constAddr(16)) // only needs PC_F bits (16-31), PC_B bits (0-15) are zero
- p = appendp(p, AI32ShrU)
// Set PC_B parameter to function entry.
// We need to push this before pushing the target PC_F,
diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go
index 53c0918254..7f308686c1 100644
--- a/src/cmd/internal/obj/x86/obj6.go
+++ b/src/cmd/internal/obj/x86/obj6.go
@@ -653,6 +653,11 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
case obj.ACALL:
// Treat common runtime calls that take no arguments
// the same as duffcopy and duffzero.
+
+ // Note that of these functions, panicBounds does
+ // use some stack, but its stack together with the
+ // < StackSmall used by this function is still
+ // less than stackNosplit. See issue 31219.
if !isZeroArgRuntimeCall(q.To.Sym) {
leaf = false
break LeafSearch
@@ -671,20 +676,9 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
}
}
- var regEntryTmp0, regEntryTmp1 int16
- if ctxt.Arch.Family == sys.AMD64 {
- regEntryTmp0, regEntryTmp1 = REGENTRYTMP0, REGENTRYTMP1
- } else {
- regEntryTmp0, regEntryTmp1 = REG_BX, REG_DI
- }
-
- var regg int16
if !p.From.Sym.NoSplit() {
- // Emit split check and load G register
- p, regg = stacksplit(ctxt, cursym, p, newprog, autoffset, int32(textarg))
- } else if p.From.Sym.Wrapper() {
- // Load G register for the wrapper code
- p, regg = loadG(ctxt, cursym, p, newprog)
+ // Emit split check.
+ p = stacksplit(ctxt, cursym, p, newprog, autoffset, int32(textarg))
}
if bpsize > 0 {
@@ -726,123 +720,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
p.Pos = p.Pos.WithXlogue(src.PosPrologueEnd)
}
- if cursym.Func().Text.From.Sym.Wrapper() {
- // if g._panic != nil && g._panic.argp == FP {
- // g._panic.argp = bottom-of-frame
- // }
- //
- // MOVQ g_panic(g), regEntryTmp0
- // TESTQ regEntryTmp0, regEntryTmp0
- // JNE checkargp
- // end:
- // NOP
- // ... rest of function ...
- // checkargp:
- // LEAQ (autoffset+8)(SP), regEntryTmp1
- // CMPQ panic_argp(regEntryTmp0), regEntryTmp1
- // JNE end
- // MOVQ SP, panic_argp(regEntryTmp0)
- // JMP end
- //
- // The NOP is needed to give the jumps somewhere to land.
- // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.
- //
- // The layout is chosen to help static branch prediction:
- // Both conditional jumps are unlikely, so they are arranged to be forward jumps.
-
- // MOVQ g_panic(g), regEntryTmp0
- p = obj.Appendp(p, newprog)
- p.As = AMOVQ
- p.From.Type = obj.TYPE_MEM
- p.From.Reg = regg
- p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // g_panic
- p.To.Type = obj.TYPE_REG
- p.To.Reg = regEntryTmp0
- if ctxt.Arch.Family == sys.I386 {
- p.As = AMOVL
- }
-
- // TESTQ regEntryTmp0, regEntryTmp0
- p = obj.Appendp(p, newprog)
- p.As = ATESTQ
- p.From.Type = obj.TYPE_REG
- p.From.Reg = regEntryTmp0
- p.To.Type = obj.TYPE_REG
- p.To.Reg = regEntryTmp0
- if ctxt.Arch.Family == sys.I386 {
- p.As = ATESTL
- }
-
- // JNE checkargp (checkargp to be resolved later)
- jne := obj.Appendp(p, newprog)
- jne.As = AJNE
- jne.To.Type = obj.TYPE_BRANCH
-
- // end:
- // NOP
- end := obj.Appendp(jne, newprog)
- end.As = obj.ANOP
-
- // Fast forward to end of function.
- var last *obj.Prog
- for last = end; last.Link != nil; last = last.Link {
- }
-
- // LEAQ (autoffset+8)(SP), regEntryTmp1
- p = obj.Appendp(last, newprog)
- p.As = ALEAQ
- p.From.Type = obj.TYPE_MEM
- p.From.Reg = REG_SP
- p.From.Offset = int64(autoffset) + int64(ctxt.Arch.RegSize)
- p.To.Type = obj.TYPE_REG
- p.To.Reg = regEntryTmp1
- if ctxt.Arch.Family == sys.I386 {
- p.As = ALEAL
- }
-
- // Set jne branch target.
- jne.To.SetTarget(p)
-
- // CMPQ panic_argp(regEntryTmp0), regEntryTmp1
- p = obj.Appendp(p, newprog)
- p.As = ACMPQ
- p.From.Type = obj.TYPE_MEM
- p.From.Reg = regEntryTmp0
- p.From.Offset = 0 // Panic.argp
- p.To.Type = obj.TYPE_REG
- p.To.Reg = regEntryTmp1
- if ctxt.Arch.Family == sys.I386 {
- p.As = ACMPL
- }
-
- // JNE end
- p = obj.Appendp(p, newprog)
- p.As = AJNE
- p.To.Type = obj.TYPE_BRANCH
- p.To.SetTarget(end)
-
- // MOVQ SP, panic_argp(regEntryTmp0)
- p = obj.Appendp(p, newprog)
- p.As = AMOVQ
- p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_SP
- p.To.Type = obj.TYPE_MEM
- p.To.Reg = regEntryTmp0
- p.To.Offset = 0 // Panic.argp
- if ctxt.Arch.Family == sys.I386 {
- p.As = AMOVL
- }
-
- // JMP end
- p = obj.Appendp(p, newprog)
- p.As = obj.AJMP
- p.To.Type = obj.TYPE_BRANCH
- p.To.SetTarget(end)
-
- // Reset p for following code.
- p = end
- }
-
var deltasp int32
for p = cursym.Func().Text; p != nil; p = p.Link {
pcsize := ctxt.Arch.RegSize
@@ -969,13 +846,7 @@ func isZeroArgRuntimeCall(s *obj.LSym) bool {
return false
}
switch s.Name {
- case "runtime.panicdivide", "runtime.panicwrap", "runtime.panicshift":
- return true
- }
- if strings.HasPrefix(s.Name, "runtime.panicIndex") || strings.HasPrefix(s.Name, "runtime.panicSlice") {
- // These functions do take arguments (in registers),
- // but use no stack before they do a stack check. We
- // should include them. See issue 31219.
+ case "runtime.panicdivide", "runtime.panicwrap", "runtime.panicshift", "runtime.panicBounds", "runtime.panicExtend":
return true
}
return false
@@ -1029,8 +900,8 @@ func loadG(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgAlloc)
// Append code to p to check for stack split.
// Appends to (does not overwrite) p.
// Assumes g is in rg.
-// Returns last new instruction and G register.
-func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgAlloc, framesize int32, textarg int32) (*obj.Prog, int16) {
+// Returns last new instruction.
+func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgAlloc, framesize int32, textarg int32) *obj.Prog {
cmp := ACMPQ
lea := ALEAQ
mov := AMOVQ
@@ -1244,7 +1115,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA
q1.To.SetTarget(spill)
}
- return end, rg
+ return end
}
func isR15(r int16) bool {
diff --git a/src/cmd/internal/objabi/pkgspecial.go b/src/cmd/internal/objabi/pkgspecial.go
index e09aeadbc2..fe510160b3 100644
--- a/src/cmd/internal/objabi/pkgspecial.go
+++ b/src/cmd/internal/objabi/pkgspecial.go
@@ -56,7 +56,8 @@ var runtimePkgs = []string{
"internal/runtime/math",
"internal/runtime/strconv",
"internal/runtime/sys",
- "internal/runtime/syscall",
+ "internal/runtime/syscall/linux",
+ "internal/runtime/syscall/windows",
"internal/abi",
"internal/bytealg",
@@ -94,7 +95,8 @@ var allowAsmABIPkgs = []string{
"syscall",
"internal/bytealg",
"internal/chacha8rand",
- "internal/runtime/syscall",
+ "internal/runtime/syscall/linux",
+ "internal/runtime/syscall/windows",
"internal/runtime/startlinetest",
}
diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go
index fbf6844b8d..9b9b4b7ee3 100644
--- a/src/cmd/internal/objabi/reloctype.go
+++ b/src/cmd/internal/objabi/reloctype.go
@@ -466,7 +466,7 @@ func (r RelocType) IsDwTxtAddr() bool {
// FuncCountToDwTxtAddrFlavor returns the correct DWARF .debug_addr
// section relocation to use when compiling a package with a total of
// fncount functions, along with the size of the ULEB128-encoded blob
-// needed to store the the eventual .debug_addr index.
+// needed to store the eventual .debug_addr index.
func FuncCountToDwTxtAddrFlavor(fncount int) (RelocType, int) {
switch {
case fncount <= 127:
diff --git a/src/cmd/internal/testdir/testdir_test.go b/src/cmd/internal/testdir/testdir_test.go
index 483a9ec33c..666645873b 100644
--- a/src/cmd/internal/testdir/testdir_test.go
+++ b/src/cmd/internal/testdir/testdir_test.go
@@ -1462,9 +1462,10 @@ func (t test) wantedErrors(file, short string) (errs []wantedError) {
const (
// Regexp to match a single opcode check: optionally begin with "-" (to indicate
- // a negative check), followed by a string literal enclosed in "" or ``. For "",
+ // a negative check) or a positive number (to specify the expected number of
+ // matches), followed by a string literal enclosed in "" or ``. For "",
// backslashes must be handled.
- reMatchCheck = `-?(?:\x60[^\x60]*\x60|"(?:[^"\\]|\\.)*")`
+ reMatchCheck = `(-|[1-9]\d*)?(?:\x60[^\x60]*\x60|"(?:[^"\\]|\\.)*")`
)
var (
@@ -1516,6 +1517,8 @@ type wantedAsmOpcode struct {
fileline string // original source file/line (eg: "/path/foo.go:45")
line int // original source line
opcode *regexp.Regexp // opcode check to be performed on assembly output
+ expected int // expected number of matches
+ actual int // actual number that matched
negative bool // true if the check is supposed to fail rather than pass
found bool // true if the opcode check matched at least one in the output
}
@@ -1622,9 +1625,16 @@ func (t test) wantedAsmOpcodes(fn string) asmChecks {
for _, m := range rxAsmCheck.FindAllString(allchecks, -1) {
negative := false
+ expected := 0
if m[0] == '-' {
negative = true
m = m[1:]
+ } else if '1' <= m[0] && m[0] <= '9' {
+ for '0' <= m[0] && m[0] <= '9' {
+ expected *= 10
+ expected += int(m[0] - '0')
+ m = m[1:]
+ }
}
rxsrc, err := strconv.Unquote(m)
@@ -1650,6 +1660,7 @@ func (t test) wantedAsmOpcodes(fn string) asmChecks {
ops[env] = make(map[string][]wantedAsmOpcode)
}
ops[env][lnum] = append(ops[env][lnum], wantedAsmOpcode{
+ expected: expected,
negative: negative,
fileline: lnum,
line: i + 1,
@@ -1698,7 +1709,8 @@ func (t test) asmCheck(outStr string, fn string, env buildEnv, fullops map[strin
// run the checks.
if ops, found := fullops[srcFileLine]; found {
for i := range ops {
- if !ops[i].found && ops[i].opcode.FindString(asm) != "" {
+ if (!ops[i].found || ops[i].expected > 0) && ops[i].opcode.FindString(asm) != "" {
+ ops[i].actual++
ops[i].found = true
}
}
@@ -1714,6 +1726,9 @@ func (t test) asmCheck(outStr string, fn string, env buildEnv, fullops map[strin
if o.negative == o.found {
failed = append(failed, o)
}
+ if o.expected > 0 && o.expected != o.actual {
+ failed = append(failed, o)
+ }
}
}
if len(failed) == 0 {
@@ -1737,6 +1752,8 @@ func (t test) asmCheck(outStr string, fn string, env buildEnv, fullops map[strin
if o.negative {
fmt.Fprintf(&errbuf, "%s:%d: %s: wrong opcode found: %q\n", t.goFileName(), o.line, env, o.opcode.String())
+ } else if o.expected > 0 {
+ fmt.Fprintf(&errbuf, "%s:%d: %s: wrong number of opcodes: %q\n", t.goFileName(), o.line, env, o.opcode.String())
} else {
fmt.Fprintf(&errbuf, "%s:%d: %s: opcode not found: %q\n", t.goFileName(), o.line, env, o.opcode.String())
}
diff --git a/src/cmd/link/dwarf_test.go b/src/cmd/link/dwarf_test.go
index 68849d7db9..d269aa70c6 100644
--- a/src/cmd/link/dwarf_test.go
+++ b/src/cmd/link/dwarf_test.go
@@ -256,3 +256,105 @@ func TestDWARFiOS(t *testing.T) {
testDWARF(t, "c-archive", true, cc, "CGO_ENABLED=1", "GOOS=ios", "GOARCH=arm64")
})
}
+
+// This test ensures that variables promoted to the heap, specifically
+// function return parameters, have correct location lists generated.
+//
+// TODO(deparker): This test is intentionally limited to GOOS=="linux"
+// and scoped to net.sendFile, which was the function reported originally in
+// issue #65405. There is relevant discussion in https://go-review.googlesource.com/c/go/+/684377
+// pertaining to these limitations. There are other missing location lists which must be fixed
+// particularly in functions where `linkname` is involved.
+func TestDWARFLocationList(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skip("skipping test on non-linux OS")
+ }
+ testenv.MustHaveCGO(t)
+ testenv.MustHaveGoBuild(t)
+
+ if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) {
+ t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH)
+ }
+
+ t.Parallel()
+
+ tmpDir := t.TempDir()
+ exe := filepath.Join(tmpDir, "issue65405.exe")
+ dir := "./testdata/dwarf/issue65405"
+
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-toolexec", os.Args[0], "-gcflags=all=-N -l", "-o", exe, dir)
+ cmd.Env = append(os.Environ(), "CGO_CFLAGS=")
+ cmd.Env = append(cmd.Env, "LINK_TEST_TOOLEXEC=1")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("go build -o %v %v: %v\n%s", exe, dir, err, out)
+ }
+
+ f, err := objfile.Open(exe)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ d, err := f.DWARF()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Find the net.sendFile function and check its return parameter location list
+ reader := d.Reader()
+
+ for {
+ entry, err := reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ break
+ }
+
+ // Look for the net.sendFile subprogram
+ if entry.Tag == dwarf.TagSubprogram {
+ fnName, ok := entry.Val(dwarf.AttrName).(string)
+ if !ok || fnName != "net.sendFile" {
+ reader.SkipChildren()
+ continue
+ }
+
+ for {
+ paramEntry, err := reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if paramEntry == nil || paramEntry.Tag == 0 {
+ break
+ }
+
+ if paramEntry.Tag == dwarf.TagFormalParameter {
+ paramName, _ := paramEntry.Val(dwarf.AttrName).(string)
+
+ // Check if this parameter has a location attribute
+ if loc := paramEntry.Val(dwarf.AttrLocation); loc != nil {
+ switch locData := loc.(type) {
+ case []byte:
+ if len(locData) == 0 {
+ t.Errorf("%s return parameter %q has empty location list", fnName, paramName)
+ return
+ }
+ case int64:
+ // Location list offset - this means it has a location list
+ if locData == 0 {
+ t.Errorf("%s return parameter %q has zero location list offset", fnName, paramName)
+ return
+ }
+ default:
+ t.Errorf("%s return parameter %q has unexpected location type %T: %v", fnName, paramName, locData, locData)
+ }
+ } else {
+ t.Errorf("%s return parameter %q has no location attribute", fnName, paramName)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go
index 42756e86bb..138547a3d3 100644
--- a/src/cmd/link/internal/ld/data.go
+++ b/src/cmd/link/internal/ld/data.go
@@ -496,6 +496,15 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) {
// to the start of the first text section, even if there are multiple.
if sect.Name == ".text" {
o = ldr.SymValue(rs) - int64(Segtext.Sections[0].Vaddr) + r.Add()
+ if target.IsWasm() {
+ // On Wasm, textoff (e.g. in the method table) is just the function index,
+ // whereas the "PC" (rs's Value) is function index << 16 + block index (see
+ // ../wasm/asm.go:assignAddress).
+ if o&(1<<16-1) != 0 {
+ st.err.Errorf(s, "textoff relocation %s does not target function entry: %s %#x", rt, ldr.SymName(rs), o)
+ }
+ o >>= 16
+ }
} else {
o = ldr.SymValue(rs) - int64(ldr.SymSect(rs).Vaddr) + r.Add()
}
@@ -606,16 +615,16 @@ func (st *relocSymState) relocsym(s loader.Sym, P []byte) {
P[off] = byte(int8(o))
case 2:
if (rt == objabi.R_PCREL || rt == objabi.R_CALL) && o != int64(int16(o)) {
- st.err.Errorf(s, "pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), o)
+ st.err.Errorf(s, "pc-relative relocation %s address for %s is too big: %#x", rt, ldr.SymName(rs), o)
} else if o != int64(int16(o)) && o != int64(uint16(o)) {
- st.err.Errorf(s, "non-pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), uint64(o))
+ st.err.Errorf(s, "non-pc-relative relocation %s address for %s is too big: %#x", rt, ldr.SymName(rs), uint64(o))
}
target.Arch.ByteOrder.PutUint16(P[off:], uint16(o))
case 4:
if (rt == objabi.R_PCREL || rt == objabi.R_CALL) && o != int64(int32(o)) {
- st.err.Errorf(s, "pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), o)
+ st.err.Errorf(s, "pc-relative relocation %s address for %s is too big: %#x", rt, ldr.SymName(rs), o)
} else if o != int64(int32(o)) && o != int64(uint32(o)) {
- st.err.Errorf(s, "non-pc-relative relocation address for %s is too big: %#x", ldr.SymName(rs), uint64(o))
+ st.err.Errorf(s, "non-pc-relative relocation %s address for %s is too big: %#x", rt, ldr.SymName(rs), uint64(o))
}
target.Arch.ByteOrder.PutUint32(P[off:], uint32(o))
case 8:
diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go
index cdf7deb31b..055b4efe5d 100644
--- a/src/cmd/link/internal/ld/deadcode.go
+++ b/src/cmd/link/internal/ld/deadcode.go
@@ -560,13 +560,9 @@ func (d *deadcodePass) decodetypeMethods(ldr *loader.Loader, arch *sys.Arch, sym
case abi.Chan: // reflect.chanType
off += 2 * arch.PtrSize
case abi.Map:
- if buildcfg.Experiment.SwissMap {
- off += 7*arch.PtrSize + 4 // internal/abi.SwissMapType
- if arch.PtrSize == 8 {
- off += 4 // padding for final uint32 field (Flags).
- }
- } else {
- off += 4*arch.PtrSize + 8 // internal/abi.OldMapType
+ off += 7*arch.PtrSize + 4 // internal/abi.MapType
+ if arch.PtrSize == 8 {
+ off += 4 // padding for final uint32 field (Flags).
}
case abi.Interface: // reflect.interfaceType
off += 3 * arch.PtrSize
diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go
index 4d38ec1a77..8c9fa8efab 100644
--- a/src/cmd/link/internal/ld/decodesym.go
+++ b/src/cmd/link/internal/ld/decodesym.go
@@ -38,7 +38,7 @@ func uncommonSize(arch *sys.Arch) int { return int(abi.UncommonSize()) }
// Type.commonType.kind
func decodetypeKind(arch *sys.Arch, p []byte) abi.Kind {
- return abi.Kind(p[2*arch.PtrSize+7]) & abi.KindMask // 0x13 / 0x1f
+ return abi.Kind(p[2*arch.PtrSize+7]) // 0x13
}
// Type.commonType.size
@@ -158,7 +158,7 @@ func decodetypeMapValue(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) l
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))+int32(arch.PtrSize)) // 0x20 / 0x38
}
-func decodetypeMapSwissGroup(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
+func decodetypeMapGroup(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
relocs := ldr.Relocs(symIdx)
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))+2*int32(arch.PtrSize)) // 0x24 / 0x40
}
diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go
index 602d70ddb9..0003938ef2 100644
--- a/src/cmd/link/internal/ld/dwarf.go
+++ b/src/cmd/link/internal/ld/dwarf.go
@@ -872,14 +872,6 @@ func (d *dwctxt) mkinternaltype(ctxt *Link, abbrev int, typename, keyname, valna
}
func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) {
- if buildcfg.Experiment.SwissMap {
- d.synthesizemaptypesSwiss(ctxt, die)
- } else {
- d.synthesizemaptypesOld(ctxt, die)
- }
-}
-
-func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) {
mapType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.Map"))
tableType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.table"))
groupsReferenceType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.groupsReference"))
@@ -892,7 +884,7 @@ func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) {
keyType := decodetypeMapKey(d.ldr, d.arch, gotype)
valType := decodetypeMapValue(d.ldr, d.arch, gotype)
- groupType := decodetypeMapSwissGroup(d.ldr, d.arch, gotype)
+ groupType := decodetypeMapGroup(d.ldr, d.arch, gotype)
keyType = d.walksymtypedef(d.defgotype(keyType))
valType = d.walksymtypedef(d.defgotype(valType))
@@ -941,102 +933,6 @@ func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) {
}
}
-func (d *dwctxt) synthesizemaptypesOld(ctxt *Link, die *dwarf.DWDie) {
- hash := walktypedef(d.findprotodie(ctxt, "type:runtime.hmap"))
- bucket := walktypedef(d.findprotodie(ctxt, "type:runtime.bmap"))
-
- if hash == nil {
- return
- }
-
- for ; die != nil; die = die.Link {
- if die.Abbrev != dwarf.DW_ABRV_MAPTYPE {
- continue
- }
- gotype := loader.Sym(getattr(die, dwarf.DW_AT_type).Data.(dwSym))
- keytype := decodetypeMapKey(d.ldr, d.arch, gotype)
- valtype := decodetypeMapValue(d.ldr, d.arch, gotype)
- keydata := d.ldr.Data(keytype)
- valdata := d.ldr.Data(valtype)
- keysize, valsize := decodetypeSize(d.arch, keydata), decodetypeSize(d.arch, valdata)
- keytype, valtype = d.walksymtypedef(d.defgotype(keytype)), d.walksymtypedef(d.defgotype(valtype))
-
- // compute size info like hashmap.c does.
- indirectKey, indirectVal := false, false
- if keysize > abi.OldMapMaxKeyBytes {
- keysize = int64(d.arch.PtrSize)
- indirectKey = true
- }
- if valsize > abi.OldMapMaxElemBytes {
- valsize = int64(d.arch.PtrSize)
- indirectVal = true
- }
-
- // Construct type to represent an array of BucketSize keys
- keyname := d.nameFromDIESym(keytype)
- dwhks := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) {
- newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount*keysize, 0)
- t := keytype
- if indirectKey {
- t = d.defptrto(keytype)
- }
- d.newrefattr(dwhk, dwarf.DW_AT_type, t)
- fld := d.newdie(dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size")
- newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount, 0)
- d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
- })
-
- // Construct type to represent an array of BucketSize values
- valname := d.nameFromDIESym(valtype)
- dwhvs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) {
- newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount*valsize, 0)
- t := valtype
- if indirectVal {
- t = d.defptrto(valtype)
- }
- d.newrefattr(dwhv, dwarf.DW_AT_type, t)
- fld := d.newdie(dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size")
- newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount, 0)
- d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
- })
-
- // Construct bucket<K,V>
- dwhbs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "bucket", keyname, valname, func(dwhb *dwarf.DWDie) {
- // Copy over all fields except the field "data" from the generic
- // bucket. "data" will be replaced with keys/values below.
- d.copychildrenexcept(ctxt, dwhb, bucket, findchild(bucket, "data"))
-
- fld := d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys")
- d.newrefattr(fld, dwarf.DW_AT_type, dwhks)
- newmemberoffsetattr(fld, abi.OldMapBucketCount)
- fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values")
- d.newrefattr(fld, dwarf.DW_AT_type, dwhvs)
- newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*int32(keysize))
- fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow")
- d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.dtolsym(dwhb.Sym)))
- newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*(int32(keysize)+int32(valsize)))
- if d.arch.RegSize > d.arch.PtrSize {
- fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad")
- d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym)
- newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize))
- }
-
- newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount+abi.OldMapBucketCount*keysize+abi.OldMapBucketCount*valsize+int64(d.arch.RegSize), 0)
- })
-
- // Construct hash<K,V>
- dwhs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hash", keyname, valname, func(dwh *dwarf.DWDie) {
- d.copychildren(ctxt, dwh, hash)
- d.substitutetype(dwh, "buckets", d.defptrto(dwhbs))
- d.substitutetype(dwh, "oldbuckets", d.defptrto(dwhbs))
- newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hash, dwarf.DW_AT_byte_size).Value, nil)
- })
-
- // make map type a pointer to hash<K,V>
- d.newrefattr(die, dwarf.DW_AT_type, d.defptrto(dwhs))
- }
-}
-
func (d *dwctxt) synthesizechantypes(ctxt *Link, die *dwarf.DWDie) {
sudog := walktypedef(d.findprotodie(ctxt, "type:runtime.sudog"))
waitq := walktypedef(d.findprotodie(ctxt, "type:runtime.waitq"))
@@ -2010,19 +1906,14 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
// Prototypes needed for type synthesis.
prototypedies = map[string]*dwarf.DWDie{
- "type:runtime.stringStructDWARF": nil,
- "type:runtime.slice": nil,
- "type:runtime.sudog": nil,
- "type:runtime.waitq": nil,
- "type:runtime.hchan": nil,
- }
- if buildcfg.Experiment.SwissMap {
- prototypedies["type:internal/runtime/maps.Map"] = nil
- prototypedies["type:internal/runtime/maps.table"] = nil
- prototypedies["type:internal/runtime/maps.groupsReference"] = nil
- } else {
- prototypedies["type:runtime.hmap"] = nil
- prototypedies["type:runtime.bmap"] = nil
+ "type:runtime.stringStructDWARF": nil,
+ "type:runtime.slice": nil,
+ "type:runtime.sudog": nil,
+ "type:runtime.waitq": nil,
+ "type:runtime.hchan": nil,
+ "type:internal/runtime/maps.Map": nil,
+ "type:internal/runtime/maps.table": nil,
+ "type:internal/runtime/maps.groupsReference": nil,
}
// Needed by the prettyprinter code for interface inspection.
@@ -2031,6 +1922,7 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
"type:internal/abi.ArrayType",
"type:internal/abi.ChanType",
"type:internal/abi.FuncType",
+ "type:internal/abi.MapType",
"type:internal/abi.PtrType",
"type:internal/abi.SliceType",
"type:internal/abi.StructType",
@@ -2039,11 +1931,6 @@ func dwarfGenerateDebugInfo(ctxt *Link) {
"type:internal/abi.Imethod"} {
d.defgotype(d.lookupOrDiag(typ))
}
- if buildcfg.Experiment.SwissMap {
- d.defgotype(d.lookupOrDiag("type:internal/abi.SwissMapType"))
- } else {
- d.defgotype(d.lookupOrDiag("type:internal/abi.OldMapType"))
- }
// fake root DIE for compile unit DIEs
var dwroot dwarf.DWDie
diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go
index ab086c57f4..cc493f2c69 100644
--- a/src/cmd/link/internal/ld/dwarf_test.go
+++ b/src/cmd/link/internal/ld/dwarf_test.go
@@ -60,6 +60,7 @@ func TestRuntimeTypesPresent(t *testing.T) {
"internal/abi.ArrayType": true,
"internal/abi.ChanType": true,
"internal/abi.FuncType": true,
+ "internal/abi.MapType": true,
"internal/abi.PtrType": true,
"internal/abi.SliceType": true,
"internal/abi.StructType": true,
@@ -71,16 +72,6 @@ func TestRuntimeTypesPresent(t *testing.T) {
if len(found) != len(want) {
t.Errorf("found %v, want %v", found, want)
}
-
- // Must have one of OldMapType or SwissMapType.
- want = map[string]bool{
- "internal/abi.OldMapType": true,
- "internal/abi.SwissMapType": true,
- }
- found = findTypes(t, dwarf, want)
- if len(found) != 1 {
- t.Errorf("map type want one of %v found %v", want, found)
- }
}
func findTypes(t *testing.T, dw *dwarf.Data, want map[string]bool) (found map[string]bool) {
diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go
index a09d3acd5e..9532b33a9b 100644
--- a/src/cmd/link/internal/ld/pcln.go
+++ b/src/cmd/link/internal/ld/pcln.go
@@ -614,16 +614,36 @@ func (state pclntab) calculateFunctabSize(ctxt *Link, funcs []loader.Sym) (int64
return size, startLocations
}
+// textOff computes the offset of a text symbol, relative to textStart,
+// similar to an R_ADDROFF relocation, for various runtime metadata and
+// tables (see runtime/symtab.go:(*moduledata).textAddr).
+func textOff(ctxt *Link, s loader.Sym, textStart int64) uint32 {
+ ldr := ctxt.loader
+ off := ldr.SymValue(s) - textStart
+ if off < 0 {
+ panic(fmt.Sprintf("expected func %s(%x) to be placed at or after textStart (%x)", ldr.SymName(s), ldr.SymValue(s), textStart))
+ }
+ if ctxt.IsWasm() {
+ // On Wasm, the function table contains just the function index, whereas
+ // the "PC" (s's Value) is function index << 16 + block index (see
+ // ../wasm/asm.go:assignAddress).
+ if off&(1<<16-1) != 0 {
+ ctxt.Errorf(s, "nonzero PC_B at function entry: %#x", off)
+ }
+ off >>= 16
+ }
+ if int64(uint32(off)) != off {
+ ctxt.Errorf(s, "textOff overflow: %#x", off)
+ }
+ return uint32(off)
+}
+
// writePCToFunc writes the PC->func lookup table.
func writePCToFunc(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, startLocations []uint32) {
ldr := ctxt.loader
textStart := ldr.SymValue(ldr.Lookup("runtime.text", 0))
pcOff := func(s loader.Sym) uint32 {
- off := ldr.SymValue(s) - textStart
- if off < 0 {
- panic(fmt.Sprintf("expected func %s(%x) to be placed at or after textStart (%x)", ldr.SymName(s), ldr.SymValue(s), textStart))
- }
- return uint32(off)
+ return textOff(ctxt, s, textStart)
}
for i, s := range funcs {
sb.SetUint32(ctxt.Arch, int64(i*2*4), pcOff(s))
@@ -632,7 +652,11 @@ func writePCToFunc(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, sta
// Final entry of table is just end pc offset.
lastFunc := funcs[len(funcs)-1]
- sb.SetUint32(ctxt.Arch, int64(len(funcs))*2*4, pcOff(lastFunc)+uint32(ldr.SymSize(lastFunc)))
+ lastPC := pcOff(lastFunc) + uint32(ldr.SymSize(lastFunc))
+ if ctxt.IsWasm() {
+ lastPC = pcOff(lastFunc) + 1 // On Wasm it is function index (see above)
+ }
+ sb.SetUint32(ctxt.Arch, int64(len(funcs))*2*4, lastPC)
}
// writeFuncs writes the func structures and pcdata to runtime.functab.
@@ -646,7 +670,7 @@ func writeFuncs(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, inlSym
var pcsp, pcfile, pcline, pcinline loader.Sym
var pcdata []loader.Sym
- // Write the individual func objects.
+ // Write the individual func objects (runtime._func struct).
for i, s := range funcs {
startLine := int32(0)
fi := ldr.FuncInfo(s)
@@ -658,10 +682,7 @@ func writeFuncs(ctxt *Link, sb *loader.SymbolBuilder, funcs []loader.Sym, inlSym
off := int64(startLocations[i])
// entryOff uint32 (offset of func entry PC from textStart)
- entryOff := ldr.SymValue(s) - textStart
- if entryOff < 0 {
- panic(fmt.Sprintf("expected func %s(%x) to be placed before or at textStart (%x)", ldr.SymName(s), ldr.SymValue(s), textStart))
- }
+ entryOff := textOff(ctxt, s, textStart)
off = sb.SetUint32(ctxt.Arch, off, uint32(entryOff))
// nameOff int32
diff --git a/src/cmd/link/testdata/dwarf/issue65405/main.go b/src/cmd/link/testdata/dwarf/issue65405/main.go
new file mode 100644
index 0000000000..f76e464b23
--- /dev/null
+++ b/src/cmd/link/testdata/dwarf/issue65405/main.go
@@ -0,0 +1,8 @@
+package main
+
+import "net/http"
+
+func main() {
+ http.Handle("/", http.StripPrefix("/static/", http.FileServer(http.Dir("./output"))))
+ http.ListenAndServe(":8000", nil)
+}
diff --git a/src/crypto/elliptic/params.go b/src/crypto/elliptic/params.go
index 0507d22b27..8cf9a6dc40 100644
--- a/src/crypto/elliptic/params.go
+++ b/src/crypto/elliptic/params.go
@@ -295,13 +295,13 @@ func (curve *CurveParams) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.
Bz := new(big.Int).SetInt64(1)
x, y, z := new(big.Int), new(big.Int), new(big.Int)
- for _, byte := range k {
- for bitNum := 0; bitNum < 8; bitNum++ {
+ for _, b := range k {
+ for range 8 {
x, y, z = curve.doubleJacobian(x, y, z)
- if byte&0x80 == 0x80 {
+ if b&0x80 == 0x80 {
x, y, z = curve.addJacobian(Bx, By, Bz, x, y, z)
}
- byte <<= 1
+ b <<= 1
}
}
diff --git a/src/crypto/rsa/rsa.go b/src/crypto/rsa/rsa.go
index 95bb4becd2..c557c3710a 100644
--- a/src/crypto/rsa/rsa.go
+++ b/src/crypto/rsa/rsa.go
@@ -30,14 +30,15 @@
// with a key smaller than 1024 bits. Such keys are insecure and should not be
// used.
//
-// The `rsa1024min=0` GODEBUG setting suppresses this error, but we recommend
-// doing so only in tests, if necessary. Tests can use [testing.T.Setenv] or
-// include `//go:debug rsa1024min=0` in a `_test.go` source file to set it.
+// The rsa1024min=0 GODEBUG setting suppresses this error, but we recommend
+// doing so only in tests, if necessary. Tests can set this option using
+// [testing.T.Setenv] or by including "//go:debug rsa1024min=0" in a *_test.go
+// source file.
//
// Alternatively, see the [GenerateKey (TestKey)] example for a pregenerated
// test-only 2048-bit key.
//
-// [GenerateKey (TestKey)]: #example-GenerateKey-TestKey
+// [GenerateKey (TestKey)]: https://pkg.go.dev/crypto/rsa#example-GenerateKey-TestKey
package rsa
import (
@@ -62,9 +63,8 @@ var bigOne = big.NewInt(1)
// A PublicKey represents the public part of an RSA key.
//
-// The value of the modulus N is considered secret by this library and protected
-// from leaking through timing side-channels. However, neither the value of the
-// exponent E nor the precise bit size of N are similarly protected.
+// The values of N and E are not considered confidential, and may leak through
+// side channels, or could be mathematically derived from other public values.
type PublicKey struct {
N *big.Int // modulus
E int // public exponent
@@ -226,7 +226,7 @@ type CRTValue struct {
// Validate performs basic sanity checks on the key.
// It returns nil if the key is valid, or else an error describing a problem.
//
-// It runs faster on valid keys if run after [Precompute].
+// It runs faster on valid keys if run after [PrivateKey.Precompute].
func (priv *PrivateKey) Validate() error {
// We can operate on keys based on d alone, but it isn't possible to encode
// with [crypto/x509.MarshalPKCS1PrivateKey], which unfortunately doesn't
@@ -274,7 +274,7 @@ func checkPublicKeySize(k *PublicKey) error {
// returned key does not depend deterministically on the bytes read from rand,
// and may change between calls and/or between versions.
//
-// [Minimum key size]: #hdr-Minimum_key_size
+// [Minimum key size]: https://pkg.go.dev/crypto/rsa#hdr-Minimum_key_size
func GenerateKey(random io.Reader, bits int) (*PrivateKey, error) {
if err := checkKeySize(bits); err != nil {
return nil, err
diff --git a/src/crypto/tls/cipher_suites.go b/src/crypto/tls/cipher_suites.go
index 2a96fa6903..6ed63ccc2d 100644
--- a/src/crypto/tls/cipher_suites.go
+++ b/src/crypto/tls/cipher_suites.go
@@ -149,8 +149,8 @@ type cipherSuite struct {
}
var cipherSuites = []*cipherSuite{ // TODO: replace with a map, since the order doesn't matter.
- {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
- {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
+ {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, 32, 0, 12, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
+ {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, 32, 0, 12, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM},
{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadAESGCM},
{TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
@@ -284,7 +284,7 @@ var cipherSuitesPreferenceOrder = []uint16{
// AEADs w/ ECDHE
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
// CBC w/ ECDHE
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
@@ -313,7 +313,7 @@ var cipherSuitesPreferenceOrder = []uint16{
var cipherSuitesPreferenceOrderNoAES = []uint16{
// ChaCha20Poly1305
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
// AES-GCM w/ ECDHE
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
diff --git a/src/crypto/tls/handshake_client_test.go b/src/crypto/tls/handshake_client_test.go
index 6118711a0e..9c94016f13 100644
--- a/src/crypto/tls/handshake_client_test.go
+++ b/src/crypto/tls/handshake_client_test.go
@@ -638,7 +638,7 @@ func TestHandshakeClientHelloRetryRequest(t *testing.T) {
func TestHandshakeClientECDHERSAChaCha20(t *testing.T) {
config := testConfig.Clone()
- config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305}
+ config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256}
test := &clientTest{
name: "ECDHE-RSA-CHACHA20-POLY1305",
@@ -651,7 +651,7 @@ func TestHandshakeClientECDHERSAChaCha20(t *testing.T) {
func TestHandshakeClientECDHEECDSAChaCha20(t *testing.T) {
config := testConfig.Clone()
- config.CipherSuites = []uint16{TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305}
+ config.CipherSuites = []uint16{TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256}
test := &clientTest{
name: "ECDHE-ECDSA-CHACHA20-POLY1305",
diff --git a/src/crypto/tls/handshake_server_test.go b/src/crypto/tls/handshake_server_test.go
index a6d64a506a..4df3f5a737 100644
--- a/src/crypto/tls/handshake_server_test.go
+++ b/src/crypto/tls/handshake_server_test.go
@@ -1379,31 +1379,31 @@ func BenchmarkHandshakeServer(b *testing.B) {
})
b.Run("ECDHE-P256-RSA", func(b *testing.B) {
b.Run("TLSv13", func(b *testing.B) {
- benchmarkHandshakeServer(b, VersionTLS13, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ benchmarkHandshakeServer(b, VersionTLS13, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
CurveP256, testRSACertificate, testRSAPrivateKey)
})
b.Run("TLSv12", func(b *testing.B) {
- benchmarkHandshakeServer(b, VersionTLS12, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ benchmarkHandshakeServer(b, VersionTLS12, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
CurveP256, testRSACertificate, testRSAPrivateKey)
})
})
b.Run("ECDHE-P256-ECDSA-P256", func(b *testing.B) {
b.Run("TLSv13", func(b *testing.B) {
- benchmarkHandshakeServer(b, VersionTLS13, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ benchmarkHandshakeServer(b, VersionTLS13, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
CurveP256, testP256Certificate, testP256PrivateKey)
})
b.Run("TLSv12", func(b *testing.B) {
- benchmarkHandshakeServer(b, VersionTLS12, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ benchmarkHandshakeServer(b, VersionTLS12, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
CurveP256, testP256Certificate, testP256PrivateKey)
})
})
b.Run("ECDHE-X25519-ECDSA-P256", func(b *testing.B) {
b.Run("TLSv13", func(b *testing.B) {
- benchmarkHandshakeServer(b, VersionTLS13, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ benchmarkHandshakeServer(b, VersionTLS13, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
X25519, testP256Certificate, testP256PrivateKey)
})
b.Run("TLSv12", func(b *testing.B) {
- benchmarkHandshakeServer(b, VersionTLS12, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ benchmarkHandshakeServer(b, VersionTLS12, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
X25519, testP256Certificate, testP256PrivateKey)
})
})
@@ -1412,11 +1412,11 @@ func BenchmarkHandshakeServer(b *testing.B) {
b.Fatal("test ECDSA key doesn't use curve P-521")
}
b.Run("TLSv13", func(b *testing.B) {
- benchmarkHandshakeServer(b, VersionTLS13, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ benchmarkHandshakeServer(b, VersionTLS13, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
CurveP521, testECDSACertificate, testECDSAPrivateKey)
})
b.Run("TLSv12", func(b *testing.B) {
- benchmarkHandshakeServer(b, VersionTLS12, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
+ benchmarkHandshakeServer(b, VersionTLS12, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
CurveP521, testECDSACertificate, testECDSAPrivateKey)
})
})
@@ -1792,28 +1792,28 @@ func TestAESCipherReordering(t *testing.T) {
{
name: "server has hardware AES, client doesn't (pick ChaCha)",
clientCiphers: []uint16{
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
TLS_RSA_WITH_AES_128_CBC_SHA,
},
serverHasAESGCM: true,
- expectedCipher: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ expectedCipher: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
},
{
name: "client prefers AES-GCM, server doesn't have hardware AES (pick ChaCha)",
clientCiphers: []uint16{
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
TLS_RSA_WITH_AES_128_CBC_SHA,
},
serverHasAESGCM: false,
- expectedCipher: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ expectedCipher: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
},
{
name: "client prefers AES-GCM, server has hardware AES (pick AES-GCM)",
clientCiphers: []uint16{
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
TLS_RSA_WITH_AES_128_CBC_SHA,
},
serverHasAESGCM: true,
@@ -1824,7 +1824,7 @@ func TestAESCipherReordering(t *testing.T) {
clientCiphers: []uint16{
0x0A0A, // GREASE value
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
TLS_RSA_WITH_AES_128_CBC_SHA,
},
serverHasAESGCM: true,
@@ -1845,27 +1845,27 @@ func TestAESCipherReordering(t *testing.T) {
clientCiphers: []uint16{
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
TLS_RSA_WITH_AES_128_CBC_SHA,
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
},
serverHasAESGCM: false,
- expectedCipher: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ expectedCipher: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
},
{
name: "client prefers AES-GCM over ChaCha and sends GREASE, server doesn't have hardware AES (pick ChaCha)",
clientCiphers: []uint16{
0x0A0A, // GREASE value
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
TLS_RSA_WITH_AES_128_CBC_SHA,
},
serverHasAESGCM: false,
- expectedCipher: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ expectedCipher: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
},
{
name: "client supports multiple AES-GCM, server doesn't have hardware AES and doesn't support ChaCha (AES-GCM)",
clientCiphers: []uint16{
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
},
serverHasAESGCM: false,
@@ -1879,14 +1879,14 @@ func TestAESCipherReordering(t *testing.T) {
name: "client prefers AES-GCM, server has hardware but doesn't support AES (pick ChaCha)",
clientCiphers: []uint16{
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
TLS_RSA_WITH_AES_128_CBC_SHA,
},
serverHasAESGCM: true,
serverCiphers: []uint16{
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
},
- expectedCipher: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
+ expectedCipher: TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
},
}
diff --git a/src/debug/dwarf/entry.go b/src/debug/dwarf/entry.go
index 07b9259be5..8741479483 100644
--- a/src/debug/dwarf/entry.go
+++ b/src/debug/dwarf/entry.go
@@ -554,7 +554,7 @@ func (b *buf) entry(cu *Entry, u *unit) *Entry {
case formData16:
val = b.bytes(16)
case formSdata:
- val = int64(b.int())
+ val = b.int()
case formUdata:
val = int64(b.uint())
case formImplicitConst:
diff --git a/src/encoding/json/decode_test.go b/src/encoding/json/decode_test.go
index 473fd02833..d12495f90b 100644
--- a/src/encoding/json/decode_test.go
+++ b/src/encoding/json/decode_test.go
@@ -12,6 +12,7 @@ import (
"errors"
"fmt"
"image"
+ "io"
"maps"
"math"
"math/big"
@@ -415,6 +416,8 @@ type DoublePtr struct {
J **int
}
+type NestedUnamed struct{ F struct{ V int } }
+
var unmarshalTests = []struct {
CaseName
in string
@@ -469,11 +472,13 @@ var unmarshalTests = []struct {
{CaseName: Name(""), in: `{"alphabet": "xyz"}`, ptr: new(U), err: fmt.Errorf("json: unknown field \"alphabet\""), disallowUnknownFields: true},
// syntax errors
+ {CaseName: Name(""), in: ``, ptr: new(any), err: &SyntaxError{"unexpected end of JSON input", 0}},
+ {CaseName: Name(""), in: " \n\r\t", ptr: new(any), err: &SyntaxError{"unexpected end of JSON input", 4}},
+ {CaseName: Name(""), in: `[2, 3`, ptr: new(any), err: &SyntaxError{"unexpected end of JSON input", 5}},
{CaseName: Name(""), in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}},
{CaseName: Name(""), in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}},
{CaseName: Name(""), in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true},
- {CaseName: Name(""), in: `[2, 3`, err: &SyntaxError{msg: "unexpected end of JSON input", Offset: 5}},
- {CaseName: Name(""), in: `{"F3": -}`, ptr: new(V), err: &SyntaxError{msg: "invalid character '}' in numeric literal", Offset: 9}},
+ {CaseName: Name(""), in: `{"F3": -}`, ptr: new(V), err: &SyntaxError{"invalid character '}' in numeric literal", 9}},
// raw value errors
{CaseName: Name(""), in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
@@ -1210,6 +1215,28 @@ var unmarshalTests = []struct {
F string `json:"-,omitempty"`
}{"hello"},
},
+
+ {
+ CaseName: Name("ErrorForNestedUnamed"),
+ in: `{"F":{"V":"s"}}`,
+ ptr: new(NestedUnamed),
+ out: NestedUnamed{},
+ err: &UnmarshalTypeError{Value: "string", Type: reflect.TypeFor[int](), Offset: 13, Field: "F.V"},
+ },
+ {
+ CaseName: Name("ErrorInterface"),
+ in: `1`,
+ ptr: new(error),
+ out: error(nil),
+ err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[error](), Offset: 1},
+ },
+ {
+ CaseName: Name("ErrorChan"),
+ in: `1`,
+ ptr: new(chan int),
+ out: (chan int)(nil),
+ err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[chan int](), Offset: 1},
+ },
}
func TestMarshal(t *testing.T) {
@@ -1377,6 +1404,14 @@ func TestUnmarshal(t *testing.T) {
if tt.disallowUnknownFields {
dec.DisallowUnknownFields()
}
+ if tt.err != nil && strings.Contains(tt.err.Error(), "unexpected end of JSON input") {
+ // In streaming mode, we expect EOF or ErrUnexpectedEOF instead.
+ if strings.TrimSpace(tt.in) == "" {
+ tt.err = io.EOF
+ } else {
+ tt.err = io.ErrUnexpectedEOF
+ }
+ }
if err := dec.Decode(v.Interface()); !equalError(err, tt.err) {
t.Fatalf("%s: Decode error:\n\tgot: %v\n\twant: %v\n\n\tgot: %#v\n\twant: %#v", tt.Where, err, tt.err, err, tt.err)
} else if err != nil && tt.out == nil {
diff --git a/src/encoding/json/internal/internal.go b/src/encoding/json/internal/internal.go
index f587c7b32c..c95f83fe44 100644
--- a/src/encoding/json/internal/internal.go
+++ b/src/encoding/json/internal/internal.go
@@ -21,6 +21,7 @@ var AllowInternalUse NotForPublicUse
var (
ErrCycle = errors.New("encountered a cycle")
ErrNonNilReference = errors.New("value must be passed as a non-nil pointer reference")
+ ErrNilInterface = errors.New("cannot derive concrete type for nil interface with finite type set")
)
var (
diff --git a/src/encoding/json/internal/jsonflags/flags.go b/src/encoding/json/internal/jsonflags/flags.go
index 4496359c89..da13adff4d 100644
--- a/src/encoding/json/internal/jsonflags/flags.go
+++ b/src/encoding/json/internal/jsonflags/flags.go
@@ -52,18 +52,20 @@ const (
AllowInvalidUTF8 |
EscapeForHTML |
EscapeForJS |
- EscapeInvalidUTF8 |
PreserveRawStrings |
Deterministic |
FormatNilMapAsNull |
FormatNilSliceAsNull |
MatchCaseInsensitiveNames |
CallMethodsWithLegacySemantics |
+ FormatByteArrayAsArray |
FormatBytesWithLegacySemantics |
- FormatTimeWithLegacySemantics |
+ FormatDurationAsNano |
MatchCaseSensitiveDelimiter |
MergeWithLegacySemantics |
- OmitEmptyWithLegacyDefinition |
+ OmitEmptyWithLegacySemantics |
+ ParseBytesWithLooseRFC4648 |
+ ParseTimeWithLooseRFC3339 |
ReportErrorsWithLegacySemantics |
StringifyWithLegacySemantics |
UnmarshalArrayFromAnyLength
@@ -77,7 +79,7 @@ const (
WhitespaceFlags = AnyWhitespace | Indent | IndentPrefix
// AnyEscape is the set of flags related to escaping in a JSON string.
- AnyEscape = EscapeForHTML | EscapeForJS | EscapeInvalidUTF8
+ AnyEscape = EscapeForHTML | EscapeForJS
// CanonicalizeNumbers is the set of flags related to raw number canonicalization.
CanonicalizeNumbers = CanonicalizeRawInts | CanonicalizeRawFloats
@@ -97,7 +99,6 @@ const (
ReorderRawObjects // encode only
EscapeForHTML // encode only
EscapeForJS // encode only
- EscapeInvalidUTF8 // encode only; only exposed in v1
Multiline // encode only
SpaceAfterColon // encode only
SpaceAfterComma // encode only
@@ -132,11 +133,14 @@ const (
_ Bools = (maxArshalV2Flag >> 1) << iota
CallMethodsWithLegacySemantics // marshal or unmarshal
+ FormatByteArrayAsArray // marshal or unmarshal
FormatBytesWithLegacySemantics // marshal or unmarshal
- FormatTimeWithLegacySemantics // marshal or unmarshal
+ FormatDurationAsNano // marshal or unmarshal
MatchCaseSensitiveDelimiter // marshal or unmarshal
MergeWithLegacySemantics // unmarshal
- OmitEmptyWithLegacyDefinition // marshal
+ OmitEmptyWithLegacySemantics // marshal
+ ParseBytesWithLooseRFC4648 // unmarshal
+ ParseTimeWithLooseRFC3339 // unmarshal
ReportErrorsWithLegacySemantics // marshal or unmarshal
StringifyWithLegacySemantics // marshal or unmarshal
StringifyBoolsAndStrings // marshal or unmarshal; for internal use by jsonv2.makeStructArshaler
@@ -146,6 +150,12 @@ const (
maxArshalV1Flag
)
+// bitsUsed is the number of bits used in the 64-bit boolean flags
+const bitsUsed = 42
+
+// Static compile check that bitsUsed and maxArshalV1Flag are in sync.
+const _ = uint64((1<<bitsUsed)-maxArshalV1Flag) + uint64(maxArshalV1Flag-(1<<bitsUsed))
+
// Flags is a set of boolean flags.
// If the presence bit is zero, then the value bit must also be zero.
// The least-significant bit of both fields is always zero.
diff --git a/src/encoding/json/internal/jsonwire/encode.go b/src/encoding/json/internal/jsonwire/encode.go
index 3901ff8bed..8f9b8ab09e 100644
--- a/src/encoding/json/internal/jsonwire/encode.go
+++ b/src/encoding/json/internal/jsonwire/encode.go
@@ -92,11 +92,7 @@ func AppendQuote[Bytes ~[]byte | ~string](dst []byte, src Bytes, flags *jsonflag
case isInvalidUTF8(r, rn):
hasInvalidUTF8 = true
dst = append(dst, src[i:n-rn]...)
- if flags.Get(jsonflags.EscapeInvalidUTF8) {
- dst = append(dst, `\ufffd`...)
- } else {
- dst = append(dst, "\ufffd"...)
- }
+ dst = append(dst, "\ufffd"...)
i = n
case (r == '\u2028' || r == '\u2029') && flags.Get(jsonflags.EscapeForJS):
dst = append(dst, src[i:n-rn]...)
diff --git a/src/encoding/json/jsontext/decode.go b/src/encoding/json/jsontext/decode.go
index 784ae4709a..f505de4468 100644
--- a/src/encoding/json/jsontext/decode.go
+++ b/src/encoding/json/jsontext/decode.go
@@ -138,7 +138,12 @@ func (d *Decoder) Reset(r io.Reader, opts ...Options) {
case d.s.Flags.Get(jsonflags.WithinArshalCall):
panic("jsontext: cannot reset Decoder passed to json.UnmarshalerFrom")
}
- d.s.reset(nil, r, opts...)
+ // Reuse the buffer if it does not alias a previous [bytes.Buffer].
+ b := d.s.buf[:0]
+ if _, ok := d.s.rd.(*bytes.Buffer); ok {
+ b = nil
+ }
+ d.s.reset(b, r, opts...)
}
func (d *decoderState) reset(b []byte, r io.Reader, opts ...Options) {
@@ -769,7 +774,8 @@ func (d *decoderState) ReadValue(flags *jsonwire.ValueFlags) (Value, error) {
// CheckNextValue checks whether the next value is syntactically valid,
// but does not advance the read offset.
-func (d *decoderState) CheckNextValue() error {
+// If last, it verifies that the stream cleanly terminates with [io.EOF].
+func (d *decoderState) CheckNextValue(last bool) error {
d.PeekKind() // populates d.peekPos and d.peekErr
pos, err := d.peekPos, d.peekErr
d.peekPos, d.peekErr = 0, nil
@@ -780,13 +786,18 @@ func (d *decoderState) CheckNextValue() error {
var flags jsonwire.ValueFlags
if pos, err := d.consumeValue(&flags, pos, d.Tokens.Depth()); err != nil {
return wrapSyntacticError(d, err, pos, +1)
+ } else if last {
+ return d.checkEOF(pos)
}
return nil
}
// CheckEOF verifies that the input has no more data.
func (d *decoderState) CheckEOF() error {
- switch pos, err := d.consumeWhitespace(d.prevEnd); err {
+ return d.checkEOF(d.prevEnd)
+}
+func (d *decoderState) checkEOF(pos int) error {
+ switch pos, err := d.consumeWhitespace(pos); err {
case nil:
err := jsonwire.NewInvalidCharacterError(d.buf[pos:], "after top-level value")
return wrapSyntacticError(d, err, pos, 0)
diff --git a/src/encoding/json/jsontext/decode_test.go b/src/encoding/json/jsontext/decode_test.go
index 67580e6f4f..209ff65ec8 100644
--- a/src/encoding/json/jsontext/decode_test.go
+++ b/src/encoding/json/jsontext/decode_test.go
@@ -1265,3 +1265,86 @@ func TestPeekableDecoder(t *testing.T) {
}
}
}
+
+// TestDecoderReset tests that the decoder preserves its internal
+// buffer between Reset calls to avoid frequent allocations when reusing the decoder.
+// It ensures that the buffer capacity is maintained while avoiding aliasing
+// issues with [bytes.Buffer].
+func TestDecoderReset(t *testing.T) {
+ // Create a decoder with a reasonably large JSON input to ensure buffer growth.
+ largeJSON := `{"key1":"value1","key2":"value2","key3":"value3","key4":"value4","key5":"value5"}`
+ dec := NewDecoder(strings.NewReader(largeJSON))
+
+ t.Run("Test capacity preservation", func(t *testing.T) {
+ // Read the first JSON value to grow the internal buffer.
+ val1, err := dec.ReadValue()
+ if err != nil {
+ t.Fatalf("first ReadValue failed: %v", err)
+ }
+ if string(val1) != largeJSON {
+ t.Fatalf("first ReadValue = %q, want %q", val1, largeJSON)
+ }
+
+ // Get the buffer capacity after first use.
+ initialCapacity := cap(dec.s.buf)
+ if initialCapacity == 0 {
+ t.Fatalf("expected non-zero buffer capacity after first use")
+ }
+
+ // Reset with a new reader - this should preserve the buffer capacity.
+ dec.Reset(strings.NewReader(largeJSON))
+
+ // Verify the buffer capacity is preserved (or at least not smaller).
+ preservedCapacity := cap(dec.s.buf)
+ if preservedCapacity < initialCapacity {
+ t.Fatalf("buffer capacity reduced after Reset: got %d, want at least %d", preservedCapacity, initialCapacity)
+ }
+
+ // Read the second JSON value to ensure the decoder still works correctly.
+ val2, err := dec.ReadValue()
+ if err != nil {
+ t.Fatalf("second ReadValue failed: %v", err)
+ }
+ if string(val2) != largeJSON {
+ t.Fatalf("second ReadValue = %q, want %q", val2, largeJSON)
+ }
+ })
+
+ var bbBuf []byte
+ t.Run("Test aliasing with bytes.Buffer", func(t *testing.T) {
+ // Test with bytes.Buffer to verify proper aliasing behavior.
+ bb := bytes.NewBufferString(largeJSON)
+ dec.Reset(bb)
+ bbBuf = bb.Bytes()
+
+ // Read the third JSON value to ensure functionality with bytes.Buffer.
+ val3, err := dec.ReadValue()
+ if err != nil {
+ t.Fatalf("fourth ReadValue failed: %v", err)
+ }
+ if string(val3) != largeJSON {
+ t.Fatalf("fourth ReadValue = %q, want %q", val3, largeJSON)
+ }
+ // The decoder buffer should alias bytes.Buffer's internal buffer.
+ if len(dec.s.buf) == 0 || len(bbBuf) == 0 || &dec.s.buf[0] != &bbBuf[0] {
+ t.Fatalf("decoder buffer does not alias bytes.Buffer")
+ }
+ })
+
+ t.Run("Test aliasing removed after Reset", func(t *testing.T) {
+ // Reset with a new reader and verify the buffer is not aliased.
+ dec.Reset(strings.NewReader(largeJSON))
+ val4, err := dec.ReadValue()
+ if err != nil {
+ t.Fatalf("fifth ReadValue failed: %v", err)
+ }
+ if string(val4) != largeJSON {
+ t.Fatalf("fourth ReadValue = %q, want %q", val4, largeJSON)
+ }
+
+ // The decoder buffer should not alias the bytes.Buffer's internal buffer.
+ if len(dec.s.buf) == 0 || len(bbBuf) == 0 || &dec.s.buf[0] == &bbBuf[0] {
+ t.Fatalf("decoder buffer aliases bytes.Buffer")
+ }
+ })
+}
diff --git a/src/encoding/json/jsontext/doc.go b/src/encoding/json/jsontext/doc.go
index 8e4bced015..d890692686 100644
--- a/src/encoding/json/jsontext/doc.go
+++ b/src/encoding/json/jsontext/doc.go
@@ -103,6 +103,10 @@
// RFC 7493 is a stricter subset of RFC 8259 and fully compliant with it.
// In particular, it makes specific choices about behavior that RFC 8259
// leaves as undefined in order to ensure greater interoperability.
+//
+// # Security Considerations
+//
+// See the "Security Considerations" section in [encoding/json/v2].
package jsontext
// requireKeyedLiterals can be embedded in a struct to require keyed literals.
diff --git a/src/encoding/json/jsontext/encode.go b/src/encoding/json/jsontext/encode.go
index 562d217fef..e3b9c04ca6 100644
--- a/src/encoding/json/jsontext/encode.go
+++ b/src/encoding/json/jsontext/encode.go
@@ -107,12 +107,17 @@ func (e *Encoder) Reset(w io.Writer, opts ...Options) {
case e.s.Flags.Get(jsonflags.WithinArshalCall):
panic("jsontext: cannot reset Encoder passed to json.MarshalerTo")
}
- e.s.reset(nil, w, opts...)
+ // Reuse the buffer if it does not alias a previous [bytes.Buffer].
+ b := e.s.Buf[:0]
+ if _, ok := e.s.wr.(*bytes.Buffer); ok {
+ b = nil
+ }
+ e.s.reset(b, w, opts...)
}
func (e *encoderState) reset(b []byte, w io.Writer, opts ...Options) {
e.state.reset()
- e.encodeBuffer = encodeBuffer{Buf: b, wr: w, bufStats: e.bufStats}
+ e.encodeBuffer = encodeBuffer{Buf: b, wr: w, availBuffer: e.availBuffer, bufStats: e.bufStats}
if bb, ok := w.(*bytes.Buffer); ok && bb != nil {
e.Buf = bb.AvailableBuffer() // alias the unused buffer of bb
}
diff --git a/src/encoding/json/jsontext/encode_test.go b/src/encoding/json/jsontext/encode_test.go
index 206482263f..a9505f5258 100644
--- a/src/encoding/json/jsontext/encode_test.go
+++ b/src/encoding/json/jsontext/encode_test.go
@@ -735,3 +735,95 @@ func testEncoderErrors(t *testing.T, where jsontest.CasePos, opts []Options, cal
t.Fatalf("%s: Encoder.OutputOffset = %v, want %v", where, gotOffset, wantOffset)
}
}
+
+// TestEncoderReset tests that the encoder preserves its internal
+// buffer between Reset calls to avoid frequent allocations when reusing the encoder.
+// It ensures that the buffer capacity is maintained while avoiding aliasing
+// issues with [bytes.Buffer].
+func TestEncoderReset(t *testing.T) {
+ // Create an encoder with a reasonably large JSON input to ensure buffer growth.
+ largeJSON := `{"key1":"value1","key2":"value2","key3":"value3","key4":"value4","key5":"value5"}` + "\n"
+ bb := new(bytes.Buffer)
+ enc := NewEncoder(struct{ io.Writer }{bb}) // mask out underlying [bytes.Buffer]
+
+ t.Run("Test capacity preservation", func(t *testing.T) {
+ // Write the first JSON value to grow the internal buffer.
+ err := enc.WriteValue(append(enc.AvailableBuffer(), largeJSON...))
+ if err != nil {
+ t.Fatalf("first WriteValue failed: %v", err)
+ }
+ if bb.String() != largeJSON {
+ t.Fatalf("first WriteValue = %q, want %q", bb.String(), largeJSON)
+ }
+
+ // Get the buffer capacity after first use.
+ initialCapacity := cap(enc.s.Buf)
+ initialCacheCapacity := cap(enc.s.availBuffer)
+ if initialCapacity == 0 {
+ t.Fatalf("expected non-zero buffer capacity after first use")
+ }
+ if initialCacheCapacity == 0 {
+ t.Fatalf("expected non-zero cache capacity after first use")
+ }
+
+ // Reset with a new writer - this should preserve the buffer capacity.
+ bb.Reset()
+ enc.Reset(struct{ io.Writer }{bb})
+
+ // Verify the buffer capacity is preserved (or at least not smaller).
+ preservedCapacity := cap(enc.s.Buf)
+ if preservedCapacity < initialCapacity {
+ t.Fatalf("buffer capacity reduced after Reset: got %d, want at least %d", preservedCapacity, initialCapacity)
+ }
+ preservedCacheCapacity := cap(enc.s.availBuffer)
+ if preservedCacheCapacity < initialCacheCapacity {
+ t.Fatalf("cache capacity reduced after Reset: got %d, want at least %d", preservedCapacity, initialCapacity)
+ }
+
+ // Write the second JSON value to ensure the encoder still works correctly.
+ err = enc.WriteValue(append(enc.AvailableBuffer(), largeJSON...))
+ if err != nil {
+ t.Fatalf("second WriteValue failed: %v", err)
+ }
+ if bb.String() != largeJSON {
+ t.Fatalf("second WriteValue = %q, want %q", bb.String(), largeJSON)
+ }
+ })
+
+ t.Run("Test aliasing with bytes.Buffer", func(t *testing.T) {
+ // Test with bytes.Buffer to verify proper aliasing behavior.
+ bb.Reset()
+ enc.Reset(bb)
+
+ // Write the third JSON value to ensure functionality with bytes.Buffer.
+ err := enc.WriteValue([]byte(largeJSON))
+ if err != nil {
+ t.Fatalf("fourth WriteValue failed: %v", err)
+ }
+ if bb.String() != largeJSON {
+ t.Fatalf("fourth WriteValue = %q, want %q", bb.String(), largeJSON)
+ }
+ // The encoder buffer should alias bytes.Buffer's internal buffer.
+ if cap(enc.s.Buf) == 0 || cap(bb.AvailableBuffer()) == 0 || &enc.s.Buf[:1][0] != &bb.AvailableBuffer()[:1][0] {
+ t.Fatalf("encoder buffer does not alias bytes.Buffer")
+ }
+ })
+
+ t.Run("Test aliasing removed after Reset", func(t *testing.T) {
+ // Reset with a new reader and verify the buffer is not aliased.
+ bb.Reset()
+ enc.Reset(struct{ io.Writer }{bb})
+ err := enc.WriteValue([]byte(largeJSON))
+ if err != nil {
+ t.Fatalf("fifth WriteValue failed: %v", err)
+ }
+ if bb.String() != largeJSON {
+ t.Fatalf("fourth WriteValue = %q, want %q", bb.String(), largeJSON)
+ }
+
+ // The encoder buffer should not alias the bytes.Buffer's internal buffer.
+ if cap(enc.s.Buf) == 0 || cap(bb.AvailableBuffer()) == 0 || &enc.s.Buf[:1][0] == &bb.AvailableBuffer()[:1][0] {
+ t.Fatalf("encoder buffer aliases bytes.Buffer")
+ }
+ })
+}
diff --git a/src/encoding/json/jsontext/options.go b/src/encoding/json/jsontext/options.go
index e07de21fcf..7eb4f9b9e0 100644
--- a/src/encoding/json/jsontext/options.go
+++ b/src/encoding/json/jsontext/options.go
@@ -271,6 +271,7 @@ func WithIndentPrefix(prefix string) Options {
/*
// TODO(https://go.dev/issue/56733): Implement WithByteLimit and WithDepthLimit.
+// Remember to also update the "Security Considerations" section.
// WithByteLimit sets a limit on the number of bytes of input or output bytes
// that may be consumed or produced for each top-level JSON value.
diff --git a/src/encoding/json/stream_test.go b/src/encoding/json/stream_test.go
index 478ee18291..9e5d48d39d 100644
--- a/src/encoding/json/stream_test.go
+++ b/src/encoding/json/stream_test.go
@@ -522,3 +522,38 @@ func TestHTTPDecoding(t *testing.T) {
t.Errorf("Decode error:\n\tgot: %v\n\twant: io.EOF", err)
}
}
+
+func TestTokenTruncation(t *testing.T) {
+ tests := []struct {
+ in string
+ err error
+ }{
+ {in: ``, err: io.EOF},
+ {in: `{`, err: io.EOF},
+ {in: `{"`, err: io.ErrUnexpectedEOF},
+ {in: `{"k"`, err: io.EOF},
+ {in: `{"k":`, err: io.EOF},
+ {in: `{"k",`, err: &SyntaxError{"invalid character ',' after object key", int64(len(`{"k"`))}},
+ {in: `{"k"}`, err: &SyntaxError{"invalid character '}' after object key", int64(len(`{"k"`))}},
+ {in: ` [0`, err: io.EOF},
+ {in: `[0.`, err: io.ErrUnexpectedEOF},
+ {in: `[0. `, err: &SyntaxError{"invalid character ' ' after decimal point in numeric literal", int64(len(`[0.`))}},
+ {in: `[0,`, err: io.EOF},
+ {in: `[0:`, err: &SyntaxError{"invalid character ':' after array element", int64(len(`[0`))}},
+ {in: `n`, err: io.ErrUnexpectedEOF},
+ {in: `nul`, err: io.ErrUnexpectedEOF},
+ {in: `fal `, err: &SyntaxError{"invalid character ' ' in literal false (expecting 's')", int64(len(`fal `))}},
+ {in: `false`, err: io.EOF},
+ }
+ for _, tt := range tests {
+ d := NewDecoder(strings.NewReader(tt.in))
+ for i := 0; true; i++ {
+ if _, err := d.Token(); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("`%s`: %d.Token error = %#v, want %v", tt.in, i, err, tt.err)
+ }
+ break
+ }
+ }
+ }
+}
diff --git a/src/encoding/json/v2/arshal.go b/src/encoding/json/v2/arshal.go
index 10b16efe4a..e2ce778d5a 100644
--- a/src/encoding/json/v2/arshal.go
+++ b/src/encoding/json/v2/arshal.go
@@ -409,7 +409,7 @@ func Unmarshal(in []byte, out any, opts ...Options) (err error) {
dec := export.GetBufferedDecoder(in, opts...)
defer export.PutBufferedDecoder(dec)
xd := export.Decoder(dec)
- err = unmarshalFull(dec, out, &xd.Struct)
+ err = unmarshalDecode(dec, out, &xd.Struct, true)
if err != nil && xd.Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
return internal.TransformUnmarshalError(out, err)
}
@@ -426,24 +426,13 @@ func UnmarshalRead(in io.Reader, out any, opts ...Options) (err error) {
dec := export.GetStreamingDecoder(in, opts...)
defer export.PutStreamingDecoder(dec)
xd := export.Decoder(dec)
- err = unmarshalFull(dec, out, &xd.Struct)
+ err = unmarshalDecode(dec, out, &xd.Struct, true)
if err != nil && xd.Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
return internal.TransformUnmarshalError(out, err)
}
return err
}
-func unmarshalFull(in *jsontext.Decoder, out any, uo *jsonopts.Struct) error {
- switch err := unmarshalDecode(in, out, uo); err {
- case nil:
- return export.Decoder(in).CheckEOF()
- case io.EOF:
- return io.ErrUnexpectedEOF
- default:
- return err
- }
-}
-
// UnmarshalDecode deserializes a Go value from a [jsontext.Decoder] according to
// the provided unmarshal options (while ignoring marshal, encode, or decode options).
// Any unmarshal options already specified on the [jsontext.Decoder]
@@ -462,14 +451,14 @@ func UnmarshalDecode(in *jsontext.Decoder, out any, opts ...Options) (err error)
defer func() { xd.Struct = optsOriginal }()
xd.Struct.JoinWithoutCoderOptions(opts...)
}
- err = unmarshalDecode(in, out, &xd.Struct)
+ err = unmarshalDecode(in, out, &xd.Struct, false)
if err != nil && xd.Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
return internal.TransformUnmarshalError(out, err)
}
return err
}
-func unmarshalDecode(in *jsontext.Decoder, out any, uo *jsonopts.Struct) (err error) {
+func unmarshalDecode(in *jsontext.Decoder, out any, uo *jsonopts.Struct, last bool) (err error) {
v := reflect.ValueOf(out)
if v.Kind() != reflect.Pointer || v.IsNil() {
return &SemanticError{action: "unmarshal", GoType: reflect.TypeOf(out), Err: internal.ErrNonNilReference}
@@ -480,7 +469,11 @@ func unmarshalDecode(in *jsontext.Decoder, out any, uo *jsonopts.Struct) (err er
// In legacy semantics, the entirety of the next JSON value
// was validated before attempting to unmarshal it.
if uo.Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
- if err := export.Decoder(in).CheckNextValue(); err != nil {
+ if err := export.Decoder(in).CheckNextValue(last); err != nil {
+ if err == io.EOF {
+ offset := in.InputOffset() + int64(len(in.UnreadBuffer()))
+ return &jsontext.SyntacticError{ByteOffset: offset, Err: io.ErrUnexpectedEOF}
+ }
return err
}
}
@@ -494,8 +487,15 @@ func unmarshalDecode(in *jsontext.Decoder, out any, uo *jsonopts.Struct) (err er
if !uo.Flags.Get(jsonflags.AllowDuplicateNames) {
export.Decoder(in).Tokens.InvalidateDisabledNamespaces()
}
+ if err == io.EOF {
+ offset := in.InputOffset() + int64(len(in.UnreadBuffer()))
+ return &jsontext.SyntacticError{ByteOffset: offset, Err: io.ErrUnexpectedEOF}
+ }
return err
}
+ if last {
+ return export.Decoder(in).CheckEOF()
+ }
return nil
}
diff --git a/src/encoding/json/v2/arshal_default.go b/src/encoding/json/v2/arshal_default.go
index 5ca51c6635..f3fc79beac 100644
--- a/src/encoding/json/v2/arshal_default.go
+++ b/src/encoding/json/v2/arshal_default.go
@@ -329,8 +329,9 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler {
default:
return newInvalidFormatError(enc, t, mo)
}
- } else if mo.Flags.Get(jsonflags.FormatBytesWithLegacySemantics) &&
- (va.Kind() == reflect.Array || hasMarshaler) {
+ } else if mo.Flags.Get(jsonflags.FormatByteArrayAsArray) && va.Kind() == reflect.Array {
+ return marshalArray(enc, va, mo)
+ } else if mo.Flags.Get(jsonflags.FormatBytesWithLegacySemantics) && hasMarshaler {
return marshalArray(enc, va, mo)
}
if mo.Flags.Get(jsonflags.FormatNilSliceAsNull) && va.Kind() == reflect.Slice && va.IsNil() {
@@ -366,8 +367,9 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler {
default:
return newInvalidFormatError(dec, t, uo)
}
- } else if uo.Flags.Get(jsonflags.FormatBytesWithLegacySemantics) &&
- (va.Kind() == reflect.Array || dec.PeekKind() == '[') {
+ } else if uo.Flags.Get(jsonflags.FormatByteArrayAsArray) && va.Kind() == reflect.Array {
+ return unmarshalArray(dec, va, uo)
+ } else if uo.Flags.Get(jsonflags.FormatBytesWithLegacySemantics) && dec.PeekKind() == '[' {
return unmarshalArray(dec, va, uo)
}
var flags jsonwire.ValueFlags
@@ -395,7 +397,7 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler {
if err != nil {
return newUnmarshalErrorAfter(dec, t, err)
}
- if len(val) != encodedLen(len(b)) && !uo.Flags.Get(jsonflags.FormatBytesWithLegacySemantics) {
+ if len(val) != encodedLen(len(b)) && !uo.Flags.Get(jsonflags.ParseBytesWithLooseRFC4648) {
// TODO(https://go.dev/issue/53845): RFC 4648, section 3.3,
// specifies that non-alphabet characters must be rejected.
// Unfortunately, the "base32" and "base64" packages allow
@@ -1065,7 +1067,7 @@ func makeStructArshaler(t reflect.Type) *arshaler {
}
// Check for the legacy definition of omitempty.
- if f.omitempty && mo.Flags.Get(jsonflags.OmitEmptyWithLegacyDefinition) && isLegacyEmpty(v) {
+ if f.omitempty && mo.Flags.Get(jsonflags.OmitEmptyWithLegacySemantics) && isLegacyEmpty(v) {
continue
}
@@ -1080,7 +1082,7 @@ func makeStructArshaler(t reflect.Type) *arshaler {
// OmitEmpty skips the field if the marshaled JSON value is empty,
// which we can know up front if there are no custom marshalers,
// otherwise we must marshal the value and unwrite it if empty.
- if f.omitempty && !mo.Flags.Get(jsonflags.OmitEmptyWithLegacyDefinition) &&
+ if f.omitempty && !mo.Flags.Get(jsonflags.OmitEmptyWithLegacySemantics) &&
!nonDefault && f.isEmpty != nil && f.isEmpty(v) {
continue // fast path for omitempty
}
@@ -1145,7 +1147,7 @@ func makeStructArshaler(t reflect.Type) *arshaler {
}
// Try unwriting the member if empty (slow path for omitempty).
- if f.omitempty && !mo.Flags.Get(jsonflags.OmitEmptyWithLegacyDefinition) {
+ if f.omitempty && !mo.Flags.Get(jsonflags.OmitEmptyWithLegacySemantics) {
var prevName *string
if prevIdx >= 0 {
prevName = &fields.flattened[prevIdx].name
@@ -1688,8 +1690,6 @@ func makePointerArshaler(t reflect.Type) *arshaler {
return &fncs
}
-var errNilInterface = errors.New("cannot derive concrete type for nil interface with finite type set")
-
func makeInterfaceArshaler(t reflect.Type) *arshaler {
// NOTE: Values retrieved from an interface are not addressable,
// so we shallow copy the values to make them addressable and
@@ -1795,7 +1795,7 @@ func makeInterfaceArshaler(t reflect.Type) *arshaler {
k := dec.PeekKind()
if !isAnyType(t) {
- return newUnmarshalErrorBeforeWithSkipping(dec, uo, t, errNilInterface)
+ return newUnmarshalErrorBeforeWithSkipping(dec, uo, t, internal.ErrNilInterface)
}
switch k {
case 'f', 't':
diff --git a/src/encoding/json/v2/arshal_test.go b/src/encoding/json/v2/arshal_test.go
index 8494deed03..f1ee2e2e3a 100644
--- a/src/encoding/json/v2/arshal_test.go
+++ b/src/encoding/json/v2/arshal_test.go
@@ -1924,12 +1924,12 @@ func TestMarshal(t *testing.T) {
}`,
}, {
name: jsontest.Name("Structs/OmitEmpty/Legacy/Zero"),
- opts: []Options{jsonflags.OmitEmptyWithLegacyDefinition | 1},
+ opts: []Options{jsonflags.OmitEmptyWithLegacySemantics | 1},
in: structOmitEmptyAll{},
want: `{}`,
}, {
name: jsontest.Name("Structs/OmitEmpty/Legacy/NonEmpty"),
- opts: []Options{jsontext.Multiline(true), jsonflags.OmitEmptyWithLegacyDefinition | 1},
+ opts: []Options{jsontext.Multiline(true), jsonflags.OmitEmptyWithLegacySemantics | 1},
in: structOmitEmptyAll{
Bool: true,
PointerBool: addr(true),
@@ -2144,7 +2144,7 @@ func TestMarshal(t *testing.T) {
"Default": "AQIDBA=="
}`}, {
name: jsontest.Name("Structs/Format/ArrayBytes/Legacy"),
- opts: []Options{jsontext.Multiline(true), jsonflags.FormatBytesWithLegacySemantics | 1},
+ opts: []Options{jsontext.Multiline(true), jsonflags.FormatByteArrayAsArray | jsonflags.FormatBytesWithLegacySemantics | 1},
in: structFormatArrayBytes{
Base16: [4]byte{1, 2, 3, 4},
Base32: [4]byte{1, 2, 3, 4},
@@ -4394,7 +4394,7 @@ func TestMarshal(t *testing.T) {
}, {
/* TODO(https://go.dev/issue/71631): Re-enable this test case.
name: jsontest.Name("Duration/Format/Legacy"),
- opts: []Options{jsonflags.FormatTimeWithLegacySemantics | 1},
+ opts: []Options{jsonflags.FormatDurationAsNano | 1},
in: structDurationFormat{
D1: 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond,
D2: 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond,
@@ -4407,7 +4407,7 @@ func TestMarshal(t *testing.T) {
want: `{"1s":""}`,
}, { */
name: jsontest.Name("Duration/MapKey/Legacy"),
- opts: []Options{jsonflags.FormatTimeWithLegacySemantics | 1},
+ opts: []Options{jsonflags.FormatDurationAsNano | 1},
in: map[time.Duration]string{time.Second: ""},
want: `{"1000000000":""}`,
}, {
@@ -6399,7 +6399,7 @@ func TestUnmarshal(t *testing.T) {
wantErr: EU(errors.New("illegal character '\\r' at offset 3")).withPos(`{"Base64": `, "/Base64").withType('"', T[[]byte]()),
}, {
name: jsontest.Name("Structs/Format/Bytes/Base64/NonAlphabet/Ignored"),
- opts: []Options{jsonflags.FormatBytesWithLegacySemantics | 1},
+ opts: []Options{jsonflags.ParseBytesWithLooseRFC4648 | 1},
inBuf: `{"Base64": "aa=\r\n="}`,
inVal: new(structFormatBytes),
want: &structFormatBytes{Base64: []byte{105}},
@@ -7138,7 +7138,13 @@ func TestUnmarshal(t *testing.T) {
inBuf: ``,
inVal: addr(structAll{}),
want: addr(structAll{}),
- wantErr: io.ErrUnexpectedEOF,
+ wantErr: &jsontext.SyntacticError{Err: io.ErrUnexpectedEOF},
+ }, {
+ name: jsontest.Name("Structs/Invalid/ErrUnexpectedEOF"),
+ inBuf: " \n\r\t",
+ inVal: addr(structAll{}),
+ want: addr(structAll{}),
+ wantErr: &jsontext.SyntacticError{Err: io.ErrUnexpectedEOF, ByteOffset: len64(" \n\r\t")},
}, {
name: jsontest.Name("Structs/Invalid/NestedErrUnexpectedEOF"),
inBuf: `{"Pointer":`,
@@ -7490,7 +7496,7 @@ func TestUnmarshal(t *testing.T) {
inBuf: `"hello"`,
inVal: new(io.Reader),
want: new(io.Reader),
- wantErr: EU(errNilInterface).withType(0, T[io.Reader]()),
+ wantErr: EU(internal.ErrNilInterface).withType(0, T[io.Reader]()),
}, {
name: jsontest.Name("Interfaces/Empty/False"),
inBuf: `false`,
@@ -8338,7 +8344,7 @@ func TestUnmarshal(t *testing.T) {
inBuf: `{"X":"hello"}`,
inVal: addr(struct{ X fmt.Stringer }{nil}),
want: addr(struct{ X fmt.Stringer }{nil}),
- wantErr: EU(errNilInterface).withPos(`{"X":`, "/X").withType(0, T[fmt.Stringer]()),
+ wantErr: EU(internal.ErrNilInterface).withPos(`{"X":`, "/X").withType(0, T[fmt.Stringer]()),
}, {
name: jsontest.Name("Functions/Interface/NetIP"),
opts: []Options{
@@ -8879,7 +8885,7 @@ func TestUnmarshal(t *testing.T) {
/* TODO(https://go.dev/issue/71631): Re-enable this test case.
name: jsontest.Name("Duration/Format/Legacy"),
inBuf: `{"D1":45296078090012,"D2":"12h34m56.078090012s"}`,
- opts: []Options{jsonflags.FormatTimeWithLegacySemantics | 1},
+ opts: []Options{jsonflags.FormatDurationAsNano | 1},
inVal: new(structDurationFormat),
want: addr(structDurationFormat{
D1: 12*time.Hour + 34*time.Minute + 56*time.Second + 78*time.Millisecond + 90*time.Microsecond + 12*time.Nanosecond,
@@ -8893,7 +8899,7 @@ func TestUnmarshal(t *testing.T) {
want: addr(map[time.Duration]string{time.Second: ""}),
}, { */
name: jsontest.Name("Duration/MapKey/Legacy"),
- opts: []Options{jsonflags.FormatTimeWithLegacySemantics | 1},
+ opts: []Options{jsonflags.FormatDurationAsNano | 1},
inBuf: `{"1000000000":""}`,
inVal: new(map[time.Duration]string),
want: addr(map[time.Duration]string{time.Second: ""}),
diff --git a/src/encoding/json/v2/arshal_time.go b/src/encoding/json/v2/arshal_time.go
index fefa50ff5f..06fed03e05 100644
--- a/src/encoding/json/v2/arshal_time.go
+++ b/src/encoding/json/v2/arshal_time.go
@@ -50,7 +50,7 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
if !m.initFormat(mo.Format) {
return newInvalidFormatError(enc, t, mo)
}
- } else if mo.Flags.Get(jsonflags.FormatTimeWithLegacySemantics) {
+ } else if mo.Flags.Get(jsonflags.FormatDurationAsNano) {
return marshalNano(enc, va, mo)
} else {
// TODO(https://go.dev/issue/71631): Decide on default duration representation.
@@ -76,7 +76,7 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
if !u.initFormat(uo.Format) {
return newInvalidFormatError(dec, t, uo)
}
- } else if uo.Flags.Get(jsonflags.FormatTimeWithLegacySemantics) {
+ } else if uo.Flags.Get(jsonflags.FormatDurationAsNano) {
return unmarshalNano(dec, va, uo)
} else {
// TODO(https://go.dev/issue/71631): Decide on default duration representation.
@@ -150,7 +150,7 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler {
if !u.initFormat(uo.Format) {
return newInvalidFormatError(dec, t, uo)
}
- } else if uo.Flags.Get(jsonflags.FormatTimeWithLegacySemantics) {
+ } else if uo.Flags.Get(jsonflags.ParseTimeWithLooseRFC3339) {
u.looseRFC3339 = true
}
diff --git a/src/encoding/json/v2/doc.go b/src/encoding/json/v2/doc.go
index 203139754c..8179f8ab17 100644
--- a/src/encoding/json/v2/doc.go
+++ b/src/encoding/json/v2/doc.go
@@ -166,6 +166,100 @@
// Marshaling or unmarshaling a non-empty struct
// without any JSON representable fields results in a [SemanticError].
// Unexported fields must not have any `json` tags except for `json:"-"`.
+//
+// # Security Considerations
+//
+// JSON is frequently used as a data interchange format to communicate
+// between different systems, possibly implemented in different languages.
+// For interoperability and security reasons, it is important that
+// all implementations agree upon the semantic meaning of the data.
+//
+// [For example, suppose we have two micro-services.]
+// The first service is responsible for authenticating a JSON request,
+// while the second service is responsible for executing the request
+// (having assumed that the prior service authenticated the request).
+// If an attacker were able to maliciously craft a JSON request such that
+// both services believe that the same request is from different users,
+// it could bypass the authenticator with valid credentials for one user,
+// but maliciously perform an action on behalf of a different user.
+//
+// According to RFC 8259, there unfortunately exist many JSON texts
+// that are syntactically valid but semantically ambiguous.
+// For example, the standard does not define how to interpret duplicate
+// names within an object.
+//
+// The v1 [encoding/json] and [encoding/json/v2] packages
+// interpret some inputs in different ways. In particular:
+//
+// - The standard specifies that JSON must be encoded using UTF-8.
+// By default, v1 replaces invalid bytes of UTF-8 in JSON strings
+// with the Unicode replacement character,
+// while v2 rejects inputs with invalid UTF-8.
+// To change the default, specify the [jsontext.AllowInvalidUTF8] option.
+// The replacement of invalid UTF-8 is a form of data corruption
+// that alters the precise meaning of strings.
+//
+// - The standard does not specify a particular behavior when
+// duplicate names are encountered within a JSON object,
+// which means that different implementations may behave differently.
+// By default, v1 allows for the presence of duplicate names,
+// while v2 rejects duplicate names.
+// To change the default, specify the [jsontext.AllowDuplicateNames] option.
+// If allowed, object members are processed in the order they are observed,
+// meaning that later values will replace or be merged into prior values,
+// depending on the Go value type.
+//
+// - The standard defines a JSON object as an unordered collection of name/value pairs.
+// While ordering can be observed through the underlying [jsontext] API,
+// both v1 and v2 generally avoid exposing the ordering.
+// No application should semantically depend on the order of object members.
+// Allowing duplicate names is a vector through which ordering of members
+// can accidentally be observed and depended upon.
+//
+// - The standard suggests that JSON object names are typically compared
+// based on equality of the sequence of Unicode code points,
+// which implies that comparing names is often case-sensitive.
+// When unmarshaling a JSON object into a Go struct,
+// by default, v1 uses a (loose) case-insensitive match on the name,
+// while v2 uses a (strict) case-sensitive match on the name.
+// To change the default, specify the [MatchCaseInsensitiveNames] option.
+// The use of case-insensitive matching provides another vector through
+// which duplicate names can occur. Allowing case-insensitive matching
+// means that v1 or v2 might interpret JSON objects differently from most
+// other JSON implementations (which typically use a case-sensitive match).
+//
+// - The standard does not specify a particular behavior when
+// an unknown name in a JSON object is encountered.
+// When unmarshaling a JSON object into a Go struct, by default
+// both v1 and v2 ignore unknown names and their corresponding values.
+// To change the default, specify the [RejectUnknownMembers] option.
+//
+// - The standard suggests that implementations may use a float64
+// to represent a JSON number. Consequently, large JSON integers
+// may lose precision when stored as a floating-point type.
+// Both v1 and v2 correctly preserve precision when marshaling and
+// unmarshaling a concrete integer type. However, even if v1 and v2
+// preserve precision for concrete types, other JSON implementations
+// may not be able to preserve precision for outputs produced by v1 or v2.
+// The `string` tag option can be used to specify that an integer type
+// is to be quoted within a JSON string to avoid loss of precision.
+// Furthermore, v1 and v2 may still lose precision when unmarshaling
+// into an any interface value, where unmarshal uses a float64
+// by default to represent a JSON number.
+// To change the default, specify the [WithUnmarshalers] option
+// with a custom unmarshaler that pre-populates the interface value
+// with a concrete Go type that can preserve precision.
+//
+// RFC 8785 specifies a canonical form for any JSON text,
+// which explicitly defines specific behaviors that RFC 8259 leaves undefined.
+// In theory, if a text can successfully [jsontext.Value.Canonicalize]
+// without changing the semantic meaning of the data, then it provides a
+// greater degree of confidence that the data is more secure and interoperable.
+//
+// The v2 API generally chooses more secure defaults than v1,
+// but care should still be taken with large integers or unknown members.
+//
+// [For example, suppose we have two micro-services.]: https://www.youtube.com/watch?v=avilmOcHKHE&t=1057s
package json
// requireKeyedLiterals can be embedded in a struct to require keyed literals.
diff --git a/src/encoding/json/v2/errors.go b/src/encoding/json/v2/errors.go
index 48cdcc953b..1f31505869 100644
--- a/src/encoding/json/v2/errors.go
+++ b/src/encoding/json/v2/errors.go
@@ -120,10 +120,17 @@ func newMarshalErrorBefore(e *jsontext.Encoder, t reflect.Type, err error) error
// is positioned right before the next token or value, which causes an error.
// It does not record the next JSON kind as this error is used to indicate
// the receiving Go value is invalid to unmarshal into (and not a JSON error).
+// However, if [jsonflags.ReportErrorsWithLegacySemantics] is specified,
+// then it does record the next JSON kind for historical reporting reasons.
func newUnmarshalErrorBefore(d *jsontext.Decoder, t reflect.Type, err error) error {
+ var k jsontext.Kind
+ if export.Decoder(d).Flags.Get(jsonflags.ReportErrorsWithLegacySemantics) {
+ k = d.PeekKind()
+ }
return &SemanticError{action: "unmarshal", GoType: t, Err: err,
ByteOffset: d.InputOffset() + int64(export.Decoder(d).CountNextDelimWhitespace()),
- JSONPointer: jsontext.Pointer(export.Decoder(d).AppendStackPointer(nil, +1))}
+ JSONPointer: jsontext.Pointer(export.Decoder(d).AppendStackPointer(nil, +1)),
+ JSONKind: k}
}
// newUnmarshalErrorBeforeWithSkipping is like [newUnmarshalErrorBefore],
diff --git a/src/encoding/json/v2_decode.go b/src/encoding/json/v2_decode.go
index c82ee903c3..1041ec7ee4 100644
--- a/src/encoding/json/v2_decode.go
+++ b/src/encoding/json/v2_decode.go
@@ -117,19 +117,11 @@ type UnmarshalTypeError struct {
}
func (e *UnmarshalTypeError) Error() string {
- s := "json: cannot unmarshal"
- if e.Value != "" {
- s += " JSON " + e.Value
- }
- s += " into"
- var preposition string
- if e.Field != "" {
- s += " " + e.Struct + "." + e.Field
- preposition = " of"
- }
- if e.Type != nil {
- s += preposition
- s += " Go type " + e.Type.String()
+ var s string
+ if e.Struct != "" || e.Field != "" {
+ s = "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String()
+ } else {
+ s = "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
}
if e.Err != nil {
s += ": " + e.Err.Error()
diff --git a/src/encoding/json/v2_decode_test.go b/src/encoding/json/v2_decode_test.go
index 3ab20e2b5d..f9b0a60f47 100644
--- a/src/encoding/json/v2_decode_test.go
+++ b/src/encoding/json/v2_decode_test.go
@@ -12,6 +12,7 @@ import (
"errors"
"fmt"
"image"
+ "io"
"maps"
"math"
"math/big"
@@ -419,6 +420,8 @@ type DoublePtr struct {
J **int
}
+type NestedUnamed struct{ F struct{ V int } }
+
var unmarshalTests = []struct {
CaseName
in string
@@ -473,11 +476,13 @@ var unmarshalTests = []struct {
{CaseName: Name(""), in: `{"alphabet": "xyz"}`, ptr: new(U), err: fmt.Errorf("json: unknown field \"alphabet\""), disallowUnknownFields: true},
// syntax errors
+ {CaseName: Name(""), in: ``, ptr: new(any), err: &SyntaxError{errUnexpectedEnd.Error(), 0}},
+ {CaseName: Name(""), in: " \n\r\t", ptr: new(any), err: &SyntaxError{errUnexpectedEnd.Error(), len64(" \n\r\t")}},
+ {CaseName: Name(""), in: `[2, 3`, ptr: new(any), err: &SyntaxError{errUnexpectedEnd.Error(), len64(`[2, 3`)}},
{CaseName: Name(""), in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", len64(`{"X": "foo", "Y"`)}},
{CaseName: Name(""), in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", len64(`[1, 2, 3`)}},
{CaseName: Name(""), in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", len64(`{"X":12`)}, useNumber: true},
- {CaseName: Name(""), in: `[2, 3`, err: &SyntaxError{msg: "unexpected end of JSON input", Offset: len64(`[2, 3`)}},
- {CaseName: Name(""), in: `{"F3": -}`, ptr: new(V), err: &SyntaxError{msg: "invalid character '}' in numeric literal", Offset: len64(`{"F3": -`)}},
+ {CaseName: Name(""), in: `{"F3": -}`, ptr: new(V), err: &SyntaxError{"invalid character '}' in numeric literal", len64(`{"F3": -`)}},
// raw value errors
{CaseName: Name(""), in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", len64(``)}},
@@ -1216,6 +1221,28 @@ var unmarshalTests = []struct {
F string `json:"-,omitempty"`
}{"hello"},
},
+
+ {
+ CaseName: Name("ErrorForNestedUnamed"),
+ in: `{"F":{"V":"s"}}`,
+ ptr: new(NestedUnamed),
+ out: NestedUnamed{},
+ err: &UnmarshalTypeError{Value: "string", Type: reflect.TypeFor[int](), Offset: 10, Struct: "NestedUnamed", Field: "F.V"},
+ },
+ {
+ CaseName: Name("ErrorInterface"),
+ in: `1`,
+ ptr: new(error),
+ out: error(nil),
+ err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[error]()},
+ },
+ {
+ CaseName: Name("ErrorChan"),
+ in: `1`,
+ ptr: new(chan int),
+ out: (chan int)(nil),
+ err: &UnmarshalTypeError{Value: "number", Type: reflect.TypeFor[chan int]()},
+ },
}
func TestMarshal(t *testing.T) {
@@ -1246,12 +1273,12 @@ func TestMarshalInvalidUTF8(t *testing.T) {
in string
want string
}{
- {Name(""), "hello\xffworld", `"hello\ufffdworld"`},
+ {Name(""), "hello\xffworld", "\"hello\ufffdworld\""},
{Name(""), "", `""`},
- {Name(""), "\xff", `"\ufffd"`},
- {Name(""), "\xff\xff", `"\ufffd\ufffd"`},
- {Name(""), "a\xffb", `"a\ufffdb"`},
- {Name(""), "\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`},
+ {Name(""), "\xff", "\"\ufffd\""},
+ {Name(""), "\xff\xff", "\"\ufffd\ufffd\""},
+ {Name(""), "a\xffb", "\"a\ufffdb\""},
+ {Name(""), "\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", "\"日本\ufffd\ufffd\ufffd\""},
}
for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) {
@@ -1382,6 +1409,14 @@ func TestUnmarshal(t *testing.T) {
if tt.disallowUnknownFields {
dec.DisallowUnknownFields()
}
+ if tt.err != nil && strings.Contains(tt.err.Error(), errUnexpectedEnd.Error()) {
+ // In streaming mode, we expect EOF or ErrUnexpectedEOF instead.
+ if strings.TrimSpace(tt.in) == "" {
+ tt.err = io.EOF
+ } else {
+ tt.err = io.ErrUnexpectedEOF
+ }
+ }
if err := dec.Decode(v.Interface()); !equalError(err, tt.err) {
t.Fatalf("%s: Decode error:\n\tgot: %v\n\twant: %v\n\n\tgot: %#v\n\twant: %#v", tt.Where, err, tt.err, err, tt.err)
} else if err != nil && tt.out == nil {
@@ -1541,12 +1576,12 @@ func TestErrorMessageFromMisusedString(t *testing.T) {
CaseName
in, err string
}{
- {Name(""), `{"result":"x"}`, `json: cannot unmarshal JSON string into WrongString.result of Go type string: invalid character 'x' looking for beginning of object key string`},
- {Name(""), `{"result":"foo"}`, `json: cannot unmarshal JSON string into WrongString.result of Go type string: invalid character 'f' looking for beginning of object key string`},
- {Name(""), `{"result":"123"}`, `json: cannot unmarshal JSON string into WrongString.result of Go type string: invalid character '1' looking for beginning of object key string`},
- {Name(""), `{"result":123}`, `json: cannot unmarshal JSON number into WrongString.result of Go type string`},
- {Name(""), `{"result":"\""}`, `json: cannot unmarshal JSON string into WrongString.result of Go type string: unexpected end of JSON input`},
- {Name(""), `{"result":"\"foo"}`, `json: cannot unmarshal JSON string into WrongString.result of Go type string: unexpected end of JSON input`},
+ {Name(""), `{"result":"x"}`, `json: cannot unmarshal string into Go struct field WrongString.result of type string: invalid character 'x' looking for beginning of object key string`},
+ {Name(""), `{"result":"foo"}`, `json: cannot unmarshal string into Go struct field WrongString.result of type string: invalid character 'f' looking for beginning of object key string`},
+ {Name(""), `{"result":"123"}`, `json: cannot unmarshal string into Go struct field WrongString.result of type string: invalid character '1' looking for beginning of object key string`},
+ {Name(""), `{"result":123}`, `json: cannot unmarshal number into Go struct field WrongString.result of type string`},
+ {Name(""), `{"result":"\""}`, `json: cannot unmarshal string into Go struct field WrongString.result of type string: unexpected end of JSON input`},
+ {Name(""), `{"result":"\"foo"}`, `json: cannot unmarshal string into Go struct field WrongString.result of type string: unexpected end of JSON input`},
}
for _, tt := range tests {
t.Run(tt.Name, func(t *testing.T) {
@@ -2534,6 +2569,7 @@ func TestUnmarshalEmbeddedUnexported(t *testing.T) {
ptr: new(S1),
out: &S1{R: 2},
err: &UnmarshalTypeError{
+ Value: "number",
Type: reflect.TypeFor[S1](),
Offset: len64(`{"R":2,"Q":`),
Struct: "S1",
@@ -2566,6 +2602,7 @@ func TestUnmarshalEmbeddedUnexported(t *testing.T) {
ptr: new(S5),
out: &S5{R: 2},
err: &UnmarshalTypeError{
+ Value: "number",
Type: reflect.TypeFor[S5](),
Offset: len64(`{"R":2,"Q":`),
Struct: "S5",
diff --git a/src/encoding/json/v2_diff_test.go b/src/encoding/json/v2_diff_test.go
index 7a561732f4..9d0798ed1d 100644
--- a/src/encoding/json/v2_diff_test.go
+++ b/src/encoding/json/v2_diff_test.go
@@ -786,8 +786,8 @@ func TestInvalidUTF8(t *testing.T) {
switch {
case json.Version == "v1" && err != nil:
t.Fatalf("json.Marshal error: %v", err)
- case json.Version == "v1" && string(got) != `"\ufffd"`:
- t.Fatalf(`json.Marshal = %s, want "\ufffd"`, got)
+ case json.Version == "v1" && string(got) != "\"\ufffd\"":
+ t.Fatalf(`json.Marshal = %s, want %q`, got, "\ufffd")
case json.Version == "v2" && err == nil:
t.Fatal("json.Marshal error is nil, want non-nil")
}
diff --git a/src/encoding/json/v2_encode.go b/src/encoding/json/v2_encode.go
index cbb167dbd0..c2d620bcbb 100644
--- a/src/encoding/json/v2_encode.go
+++ b/src/encoding/json/v2_encode.go
@@ -10,6 +10,14 @@
//
// See "JSON and Go" for an introduction to this package:
// https://golang.org/doc/articles/json_and_go.html
+//
+// # Security Considerations
+//
+// See the "Security Considerations" section in [encoding/json/v2].
+//
+// For historical reasons, the default behavior of v1 [encoding/json]
+// unfortunately operates with less secure defaults.
+// New usages of JSON in Go are encouraged to use [encoding/json/v2] instead.
package json
import (
diff --git a/src/encoding/json/v2_inject.go b/src/encoding/json/v2_inject.go
index f903588431..31cdb4d61a 100644
--- a/src/encoding/json/v2_inject.go
+++ b/src/encoding/json/v2_inject.go
@@ -73,6 +73,9 @@ func transformUnmarshalError(root any, err error) error {
if err.Err == jsonv2.ErrUnknownName {
return fmt.Errorf("json: unknown field %q", err.JSONPointer.LastToken())
}
+ if err.Err == internal.ErrNilInterface {
+ err.Err = nil // non-descriptive for historical reasons
+ }
// Historically, UnmarshalTypeError has always been inconsistent
// about how it reported position information.
diff --git a/src/encoding/json/v2_options.go b/src/encoding/json/v2_options.go
index 4006d764cc..4dea88ad7e 100644
--- a/src/encoding/json/v2_options.go
+++ b/src/encoding/json/v2_options.go
@@ -36,7 +36,7 @@
// any empty array, slice, map, or string. In contrast, v2 redefines
// `omitempty` to omit a field if it encodes as an "empty" JSON value,
// which is defined as a JSON null, or an empty JSON string, object, or array.
-// The [OmitEmptyWithLegacyDefinition] option controls this behavior difference.
+// The [OmitEmptyWithLegacySemantics] option controls this behavior difference.
// Note that `omitempty` behaves identically in both v1 and v2 for a
// Go array, slice, map, or string (assuming no user-defined MarshalJSON method
// overrides the default representation). Existing usages of `omitempty` on a
@@ -66,7 +66,7 @@
//
// - In v1, a Go byte array is represented as a JSON array of JSON numbers.
// In contrast, in v2 a Go byte array is represented as a Base64-encoded JSON string.
-// The [FormatBytesWithLegacySemantics] option controls this behavior difference.
+// The [FormatByteArrayAsArray] option controls this behavior difference.
// To explicitly specify a Go struct field to use a particular representation,
// either the `format:array` or `format:base64` field option can be specified.
// Field-specified options take precedence over caller-specified options.
@@ -118,9 +118,8 @@
//
// - In v1, a [time.Duration] is represented as a JSON number containing
// the decimal number of nanoseconds. In contrast, in v2 a [time.Duration]
-// is represented as a JSON string containing the formatted duration
-// (e.g., "1h2m3.456s") according to [time.Duration.String].
-// The [FormatTimeWithLegacySemantics] option controls this behavior difference.
+// has no default representation and results in a runtime error.
+// The [FormatDurationAsNano] option controls this behavior difference.
// To explicitly specify a Go struct field to use a particular representation,
// either the `format:nano` or `format:units` field option can be specified.
// Field-specified options take precedence over caller-specified options.
@@ -172,6 +171,9 @@
// but the v1 package will forever remain supported.
package json
+// TODO(https://go.dev/issue/71631): Update the "Migrating to v2" documentation
+// with default v2 behavior for [time.Duration].
+
import (
"encoding"
@@ -204,12 +206,14 @@ type Options = jsonopts.Options
// It is equivalent to the following boolean options being set to true:
//
// - [CallMethodsWithLegacySemantics]
-// - [EscapeInvalidUTF8]
+// - [FormatByteArrayAsArray]
// - [FormatBytesWithLegacySemantics]
-// - [FormatTimeWithLegacySemantics]
+// - [FormatDurationAsNano]
// - [MatchCaseSensitiveDelimiter]
// - [MergeWithLegacySemantics]
-// - [OmitEmptyWithLegacyDefinition]
+// - [OmitEmptyWithLegacySemantics]
+// - [ParseBytesWithLooseRFC4648]
+// - [ParseTimeWithLooseRFC3339]
// - [ReportErrorsWithLegacySemantics]
// - [StringifyWithLegacySemantics]
// - [UnmarshalArrayFromAnyLength]
@@ -279,30 +283,25 @@ func CallMethodsWithLegacySemantics(v bool) Options {
}
}
-// EscapeInvalidUTF8 specifies that when encoding a [jsontext.String]
-// with bytes of invalid UTF-8, such bytes are escaped as
-// a hexadecimal Unicode codepoint (i.e., \ufffd).
-// In contrast, the v2 default is to use the minimal representation,
-// which is to encode invalid UTF-8 as the Unicode replacement rune itself
-// (without any form of escaping).
+// FormatByteArrayAsArray specifies that a Go [N]byte is
+// formatted as as a normal Go array in contrast to the v2 default of
+// formatting [N]byte as using binary data encoding (RFC 4648).
+// If a struct field has a `format` tag option,
+// then the specified formatting takes precedence.
//
-// This only affects encoding and is ignored when decoding.
+// This affects either marshaling or unmarshaling.
// The v1 default is true.
-func EscapeInvalidUTF8(v bool) Options {
+func FormatByteArrayAsArray(v bool) Options {
if v {
- return jsonflags.EscapeInvalidUTF8 | 1
+ return jsonflags.FormatByteArrayAsArray | 1
} else {
- return jsonflags.EscapeInvalidUTF8 | 0
+ return jsonflags.FormatByteArrayAsArray | 0
}
}
// FormatBytesWithLegacySemantics specifies that handling of
// []~byte and [N]~byte types follow legacy semantics:
//
-// - A Go [N]~byte is always treated as as a normal Go array
-// in contrast to the v2 default of treating [N]byte as
-// using some form of binary data encoding (RFC 4648).
-//
// - A Go []~byte is to be treated as using some form of
// binary data encoding (RFC 4648) in contrast to the v2 default
// of only treating []byte as such. In particular, v2 does not
@@ -317,12 +316,6 @@ func EscapeInvalidUTF8(v bool) Options {
// In contrast, the v2 default is to report an error unmarshaling
// a JSON array when expecting some form of binary data encoding.
//
-// - When unmarshaling, '\r' and '\n' characters are ignored
-// within the encoded "base32" and "base64" data.
-// In contrast, the v2 default is to report an error in order to be
-// strictly compliant with RFC 4648, section 3.3,
-// which specifies that non-alphabet characters must be rejected.
-//
// This affects either marshaling or unmarshaling.
// The v1 default is true.
func FormatBytesWithLegacySemantics(v bool) Options {
@@ -333,29 +326,20 @@ func FormatBytesWithLegacySemantics(v bool) Options {
}
}
-// FormatTimeWithLegacySemantics specifies that [time] types are formatted
-// with legacy semantics:
-//
-// - When marshaling or unmarshaling, a [time.Duration] is formatted as
-// a JSON number representing the number of nanoseconds.
-// In contrast, the default v2 behavior uses a JSON string
-// with the duration formatted with [time.Duration.String].
-// If a duration field has a `format` tag option,
-// then the specified formatting takes precedence.
-//
-// - When unmarshaling, a [time.Time] follows loose adherence to RFC 3339.
-// In particular, it permits historically incorrect representations,
-// allowing for deviations in hour format, sub-second separator,
-// and timezone representation. In contrast, the default v2 behavior
-// is to strictly comply with the grammar specified in RFC 3339.
+// FormatDurationAsNano specifies that a [time.Duration] is
+// formatted as a JSON number representing the number of nanoseconds
+// in contrast to the v2 default of reporting an error.
+// If a duration field has a `format` tag option,
+// then the specified formatting takes precedence.
//
// This affects either marshaling or unmarshaling.
// The v1 default is true.
-func FormatTimeWithLegacySemantics(v bool) Options {
+func FormatDurationAsNano(v bool) Options {
+ // TODO(https://go.dev/issue/71631): Update documentation with v2 behavior.
if v {
- return jsonflags.FormatTimeWithLegacySemantics | 1
+ return jsonflags.FormatDurationAsNano | 1
} else {
- return jsonflags.FormatTimeWithLegacySemantics | 0
+ return jsonflags.FormatDurationAsNano | 0
}
}
@@ -404,7 +388,7 @@ func MergeWithLegacySemantics(v bool) Options {
}
}
-// OmitEmptyWithLegacyDefinition specifies that the `omitempty` tag option
+// OmitEmptyWithLegacySemantics specifies that the `omitempty` tag option
// follows a definition of empty where a field is omitted if the Go value is
// false, 0, a nil pointer, a nil interface value,
// or any empty array, slice, map, or string.
@@ -418,11 +402,45 @@ func MergeWithLegacySemantics(v bool) Options {
//
// This only affects marshaling and is ignored when unmarshaling.
// The v1 default is true.
-func OmitEmptyWithLegacyDefinition(v bool) Options {
+func OmitEmptyWithLegacySemantics(v bool) Options {
+ if v {
+ return jsonflags.OmitEmptyWithLegacySemantics | 1
+ } else {
+ return jsonflags.OmitEmptyWithLegacySemantics | 0
+ }
+}
+
+// ParseBytesWithLooseRFC4648 specifies that when parsing
+// binary data encoded as "base32" or "base64",
+// to ignore the presence of '\r' and '\n' characters.
+// In contrast, the v2 default is to report an error in order to be
+// strictly compliant with RFC 4648, section 3.3,
+// which specifies that non-alphabet characters must be rejected.
+//
+// This only affects unmarshaling and is ignored when marshaling.
+// The v1 default is true.
+func ParseBytesWithLooseRFC4648(v bool) Options {
+ if v {
+ return jsonflags.ParseBytesWithLooseRFC4648 | 1
+ } else {
+ return jsonflags.ParseBytesWithLooseRFC4648 | 0
+ }
+}
+
+// ParseTimeWithLooseRFC3339 specifies that a [time.Time]
+// parses according to loose adherence to RFC 3339.
+// In particular, it permits historically incorrect representations,
+// allowing for deviations in hour format, sub-second separator,
+// and timezone representation. In contrast, the default v2 behavior
+// is to strictly comply with the grammar specified in RFC 3339.
+//
+// This only affects unmarshaling and is ignored when marshaling.
+// The v1 default is true.
+func ParseTimeWithLooseRFC3339(v bool) Options {
if v {
- return jsonflags.OmitEmptyWithLegacyDefinition | 1
+ return jsonflags.ParseTimeWithLooseRFC3339 | 1
} else {
- return jsonflags.OmitEmptyWithLegacyDefinition | 0
+ return jsonflags.ParseTimeWithLooseRFC3339 | 0
}
}
diff --git a/src/encoding/json/v2_scanner.go b/src/encoding/json/v2_scanner.go
index 475bf58b20..aef045f466 100644
--- a/src/encoding/json/v2_scanner.go
+++ b/src/encoding/json/v2_scanner.go
@@ -30,6 +30,10 @@ func checkValid(data []byte) error {
xd := export.Decoder(d)
xd.Struct.Flags.Set(jsonflags.AllowDuplicateNames | jsonflags.AllowInvalidUTF8 | 1)
if _, err := d.ReadValue(); err != nil {
+ if err == io.EOF {
+ offset := d.InputOffset() + int64(len(d.UnreadBuffer()))
+ err = &jsontext.SyntacticError{ByteOffset: offset, Err: io.ErrUnexpectedEOF}
+ }
return transformSyntacticError(err)
}
if err := xd.CheckEOF(); err != nil {
diff --git a/src/encoding/json/v2_stream.go b/src/encoding/json/v2_stream.go
index d58bafbfd0..28e72c0a52 100644
--- a/src/encoding/json/v2_stream.go
+++ b/src/encoding/json/v2_stream.go
@@ -8,6 +8,7 @@ package json
import (
"bytes"
+ "errors"
"io"
"encoding/json/jsontext"
@@ -68,7 +69,7 @@ func (dec *Decoder) Decode(v any) error {
b, err := dec.dec.ReadValue()
if err != nil {
dec.err = transformSyntacticError(err)
- if dec.err == errUnexpectedEnd {
+ if dec.err.Error() == errUnexpectedEnd.Error() {
// NOTE: Decode has always been inconsistent with Unmarshal
// with regard to the exact error value for truncated input.
dec.err = io.ErrUnexpectedEOF
@@ -193,6 +194,16 @@ func (d Delim) String() string {
func (dec *Decoder) Token() (Token, error) {
tok, err := dec.dec.ReadToken()
if err != nil {
+ // Historically, v1 would report just [io.EOF] if
+ // the stream is a prefix of a valid JSON value.
+ // It reports an unwrapped [io.ErrUnexpectedEOF] if
+ // truncated within a JSON token such as a literal, number, or string.
+ if errors.Is(err, io.ErrUnexpectedEOF) {
+ if len(bytes.Trim(dec.dec.UnreadBuffer(), " \r\n\t,:")) == 0 {
+ return nil, io.EOF
+ }
+ return nil, io.ErrUnexpectedEOF
+ }
return nil, transformSyntacticError(err)
}
switch k := tok.Kind(); k {
diff --git a/src/encoding/json/v2_stream_test.go b/src/encoding/json/v2_stream_test.go
index 38eb6660a2..b8951f8205 100644
--- a/src/encoding/json/v2_stream_test.go
+++ b/src/encoding/json/v2_stream_test.go
@@ -502,3 +502,38 @@ func TestHTTPDecoding(t *testing.T) {
t.Errorf("Decode error:\n\tgot: %v\n\twant: io.EOF", err)
}
}
+
+func TestTokenTruncation(t *testing.T) {
+ tests := []struct {
+ in string
+ err error
+ }{
+ {in: ``, err: io.EOF},
+ {in: `{`, err: io.EOF},
+ {in: `{"`, err: io.ErrUnexpectedEOF},
+ {in: `{"k"`, err: io.EOF},
+ {in: `{"k":`, err: io.EOF},
+ {in: `{"k",`, err: &SyntaxError{"invalid character ',' after object key", int64(len(`{"k"`))}},
+ {in: `{"k"}`, err: &SyntaxError{"invalid character '}' after object key", int64(len(`{"k"`))}},
+ {in: ` [0`, err: io.EOF},
+ {in: `[0.`, err: io.ErrUnexpectedEOF},
+ {in: `[0. `, err: &SyntaxError{"invalid character ' ' in numeric literal", int64(len(`[0.`))}},
+ {in: `[0,`, err: io.EOF},
+ {in: `[0:`, err: &SyntaxError{"invalid character ':' after array element", int64(len(`[0`))}},
+ {in: `n`, err: io.ErrUnexpectedEOF},
+ {in: `nul`, err: io.ErrUnexpectedEOF},
+ {in: `fal `, err: &SyntaxError{"invalid character ' ' in literal false (expecting 's')", int64(len(`fal`))}},
+ {in: `false`, err: io.EOF},
+ }
+ for _, tt := range tests {
+ d := NewDecoder(strings.NewReader(tt.in))
+ for i := 0; true; i++ {
+ if _, err := d.Token(); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("`%s`: %d.Token error = %#v, want %v", tt.in, i, err, tt.err)
+ }
+ break
+ }
+ }
+ }
+}
diff --git a/src/errors/join.go b/src/errors/join.go
index dd50089c29..d0a1bc298e 100644
--- a/src/errors/join.go
+++ b/src/errors/join.go
@@ -28,12 +28,10 @@ func Join(errs ...error) error {
}
if n == 1 {
for _, err := range errs {
- if err != nil {
- if _, ok := err.(interface {
- Unwrap() []error
- }); ok {
- return err
- }
+ if _, ok := err.(interface {
+ Unwrap() []error
+ }); ok {
+ return err
}
}
}
diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go
index d62f177b1a..faa6a27ea6 100644
--- a/src/go/build/deps_test.go
+++ b/src/go/build/deps_test.go
@@ -93,7 +93,8 @@ var depsRules = `
< internal/msan
< internal/asan
< internal/runtime/sys
- < internal/runtime/syscall
+ < internal/runtime/syscall/linux
+ < internal/runtime/syscall/windows
< internal/runtime/atomic
< internal/runtime/exithook
< internal/runtime/gc
@@ -797,6 +798,7 @@ var depsRules = `
FMT < math/big/internal/asmgen;
FMT, testing < internal/cgrouptest;
+ C, CGO < internal/runtime/cgobench;
`
// listStdPkgs returns the same list of packages as "go list std".
diff --git a/src/go/parser/parser.go b/src/go/parser/parser.go
index 8a2f95976f..9ee1576a99 100644
--- a/src/go/parser/parser.go
+++ b/src/go/parser/parser.go
@@ -455,25 +455,6 @@ var exprEnd = map[token.Token]bool{
token.RBRACE: true,
}
-// safePos returns a valid file position for a given position: If pos
-// is valid to begin with, safePos returns pos. If pos is out-of-range,
-// safePos returns the EOF position.
-//
-// This is hack to work around "artificial" end positions in the AST which
-// are computed by adding 1 to (presumably valid) token positions. If the
-// token positions are invalid due to parse errors, the resulting end position
-// may be past the file's EOF position, which would lead to panics if used
-// later on.
-func (p *parser) safePos(pos token.Pos) (res token.Pos) {
- defer func() {
- if recover() != nil {
- res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
- }
- }()
- _ = p.file.Offset(pos) // trigger a panic if position is out-of-range
- return pos
-}
-
// ----------------------------------------------------------------------------
// Identifiers
@@ -2022,7 +2003,7 @@ func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
}
if _, isBad := x.(*ast.BadExpr); !isBad {
// only report error if it's a new one
- p.error(p.safePos(x.End()), fmt.Sprintf("expression in %s must be function call", callType))
+ p.error(x.End(), fmt.Sprintf("expression in %s must be function call", callType))
}
return nil
}
@@ -2100,7 +2081,7 @@ func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
found = "assignment"
}
p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
- return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
+ return &ast.BadExpr{From: s.Pos(), To: s.End()}
}
// parseIfHeader is an adjusted version of parser.header
@@ -2423,7 +2404,7 @@ func (p *parser) parseForStmt() ast.Stmt {
key, value = as.Lhs[0], as.Lhs[1]
default:
p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
- return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
+ return &ast.BadStmt{From: pos, To: body.End()}
}
// parseSimpleStmt returned a right-hand side that
// is a single unary expression of the form "range x"
diff --git a/src/go/types/alias.go b/src/go/types/alias.go
index 3836ce9bb9..f15ff57030 100644
--- a/src/go/types/alias.go
+++ b/src/go/types/alias.go
@@ -8,7 +8,6 @@
package types
import (
- "fmt"
"go/token"
)
@@ -53,7 +52,7 @@ type Alias struct {
}
// NewAlias creates a new Alias type with the given type name and rhs.
-// rhs must not be nil.
+// If rhs is nil, the alias is incomplete.
func NewAlias(obj *TypeName, rhs Type) *Alias {
alias := (*Checker)(nil).newAlias(obj, rhs)
// Ensure that alias.actual is set (#65455).
@@ -101,6 +100,7 @@ func (a *Alias) Rhs() Type { return a.fromRHS }
// otherwise it follows t's alias chain until it
// reaches a non-alias type which is then returned.
// Consequently, the result is never an alias type.
+// Returns nil if the alias is incomplete.
func Unalias(t Type) Type {
if a0, _ := t.(*Alias); a0 != nil {
return unalias(a0)
@@ -116,19 +116,10 @@ func unalias(a0 *Alias) Type {
for a := a0; a != nil; a, _ = t.(*Alias) {
t = a.fromRHS
}
- if t == nil {
- panic(fmt.Sprintf("non-terminated alias %s", a0.obj.name))
- }
-
- // Memoize the type only if valid.
- // In the presence of unfinished cyclic declarations, Unalias
- // would otherwise latch the invalid value (#66704).
- // TODO(adonovan): rethink, along with checker.typeDecl's use
- // of Invalid to mark unfinished aliases.
- if t != Typ[Invalid] {
- a0.actual = t
- }
+ // It's fine to memoize nil types since it's the zero value for actual.
+ // It accomplishes nothing.
+ a0.actual = t
return t
}
@@ -140,9 +131,8 @@ func asNamed(t Type) *Named {
}
// newAlias creates a new Alias type with the given type name and rhs.
-// rhs must not be nil.
+// If rhs is nil, the alias is incomplete.
func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias {
- assert(rhs != nil)
a := new(Alias)
a.obj = obj
a.orig = a
@@ -175,12 +165,6 @@ func (check *Checker) newAliasInstance(pos token.Pos, orig *Alias, targs []Type,
func (a *Alias) cleanup() {
// Ensure a.actual is set before types are published,
- // so Unalias is a pure "getter", not a "setter".
- actual := Unalias(a)
-
- if actual == Typ[Invalid] {
- // We don't set a.actual to Typ[Invalid] during type checking,
- // as it may indicate that the RHS is not fully set up.
- a.actual = actual
- }
+ // so unalias is a pure "getter", not a "setter".
+ unalias(a)
}
diff --git a/src/go/types/alias_test.go b/src/go/types/alias_test.go
new file mode 100644
index 0000000000..aa12336e21
--- /dev/null
+++ b/src/go/types/alias_test.go
@@ -0,0 +1,85 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "testing"
+)
+
+func TestIssue74181(t *testing.T) {
+ t.Setenv("GODEBUG", "gotypesalias=1")
+
+ src := `package p
+
+type AB = A[B]
+
+type _ struct {
+ _ AB
+}
+
+type B struct {
+ f *AB
+}
+
+type A[T any] struct{}`
+
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "p.go", src, parser.ParseComments)
+ if err != nil {
+ t.Fatalf("could not parse: %v", err)
+ }
+
+ conf := types.Config{}
+ pkg, err := conf.Check(file.Name.Name, fset, []*ast.File{file}, &types.Info{})
+ if err != nil {
+ t.Fatalf("could not type check: %v", err)
+ }
+
+ b := pkg.Scope().Lookup("B").Type()
+ if n, ok := b.(*types.Named); ok {
+ if s, ok := n.Underlying().(*types.Struct); ok {
+ got := s.Field(0).Type()
+ want := types.NewPointer(pkg.Scope().Lookup("AB").Type())
+ if !types.Identical(got, want) {
+ t.Errorf("wrong type for f: got %v, want %v", got, want)
+ }
+ return
+ }
+ }
+ t.Errorf("unexpected type for B: %v", b)
+}
+
+func TestPartialTypeCheckUndeclaredAliasPanic(t *testing.T) {
+ t.Setenv("GODEBUG", "gotypesalias=1")
+
+ src := `package p
+
+type A = B // undeclared`
+
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "p.go", src, parser.ParseComments)
+ if err != nil {
+ t.Fatalf("could not parse: %v", err)
+ }
+
+ conf := types.Config{} // no error handler, panic
+ pkg, _ := conf.Check(file.Name.Name, fset, []*ast.File{file}, &types.Info{})
+ a := pkg.Scope().Lookup("A").Type()
+
+ if alias, ok := a.(*types.Alias); ok {
+ got := alias.Rhs()
+ want := types.Typ[types.Invalid]
+
+ if !types.Identical(got, want) {
+ t.Errorf("wrong type for B: got %v, want %v", got, want)
+ }
+ return
+ }
+ t.Errorf("unexpected type for A: %v", a)
+}
diff --git a/src/go/types/api_test.go b/src/go/types/api_test.go
index 4396b8ae89..f7a98ae280 100644
--- a/src/go/types/api_test.go
+++ b/src/go/types/api_test.go
@@ -3176,3 +3176,39 @@ func (recv T) f(param int) (result int) {
t.Errorf("got:\n%s\nwant:\n%s", got, want)
}
}
+
+func TestIssue73871(t *testing.T) {
+ const src = `package p
+
+func f[T ~[]byte](y T) []byte { return append([]byte(nil), y...) }
+
+// for illustration only:
+type B []byte
+var _ = f[B]
+`
+ fset := token.NewFileSet()
+ f, _ := parser.ParseFile(fset, "p.go", src, 0)
+
+ pkg := NewPackage("p", "p")
+ info := &Info{Types: make(map[ast.Expr]TypeAndValue)}
+ check := NewChecker(&Config{}, fset, pkg, info)
+ if err := check.Files([]*ast.File{f}); err != nil {
+ t.Fatal(err)
+ }
+
+ // Check type inferred for 'append'.
+ //
+ // Before the fix, the inferred type of append's y parameter
+ // was T. When a client such as x/tools/go/ssa instantiated T=B,
+ // it would result in the Signature "func([]byte, B)" with the
+ // variadic flag set, an invalid combination that caused
+ // NewSignatureType to panic.
+ append := f.Decls[0].(*ast.FuncDecl).Body.List[0].(*ast.ReturnStmt).Results[0].(*ast.CallExpr).Fun
+ tAppend := info.TypeOf(append).(*Signature)
+ want := "func([]byte, ...byte) []byte"
+ if got := fmt.Sprint(tAppend); got != want {
+ // Before the fix, tAppend was func([]byte, T) []byte,
+ // where T prints as "<expected string type>".
+ t.Errorf("for append, inferred type %s, want %s", tAppend, want)
+ }
+}
diff --git a/src/go/types/builtins.go b/src/go/types/builtins.go
index d190212e05..e9f2b3e21d 100644
--- a/src/go/types/builtins.go
+++ b/src/go/types/builtins.go
@@ -94,22 +94,25 @@ func (check *Checker) builtin(x *operand, call *ast.CallExpr, id builtinId) (_ b
// to type []byte with a second argument of string type followed by ... .
// This form appends the bytes of the string."
- // get special case out of the way
+ // Handle append(bytes, y...) special case, where
+ // the type set of y is {string} or {string, []byte}.
var sig *Signature
if nargs == 2 && hasDots(call) {
if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok {
y := args[1]
+ hasString := false
typeset(y.typ, func(_, u Type) bool {
if s, _ := u.(*Slice); s != nil && Identical(s.elem, universeByte) {
return true
}
if isString(u) {
+ hasString = true
return true
}
y = nil
return false
})
- if y != nil {
+ if y != nil && hasString {
// setting the signature also signals that we're done
sig = makeSig(x.typ, x.typ, y.typ)
sig.variadic = true
diff --git a/src/go/types/check_test.go b/src/go/types/check_test.go
index 823525828a..44e4fd0104 100644
--- a/src/go/types/check_test.go
+++ b/src/go/types/check_test.go
@@ -426,12 +426,6 @@ func TestIssue47243_TypedRHS(t *testing.T) {
}
func TestCheck(t *testing.T) {
- old := buildcfg.Experiment.RangeFunc
- defer func() {
- buildcfg.Experiment.RangeFunc = old
- }()
- buildcfg.Experiment.RangeFunc = true
-
DefPredeclaredTestFuncs()
testDirFiles(t, "../../internal/types/testdata/check", false)
}
diff --git a/src/go/types/decl.go b/src/go/types/decl.go
index 742191cc1c..f40a8e54b9 100644
--- a/src/go/types/decl.go
+++ b/src/go/types/decl.go
@@ -321,11 +321,15 @@ func (check *Checker) cycleError(cycle []Object, start int) {
// If obj is a type alias, mark it as valid (not broken) in order to avoid follow-on errors.
obj := cycle[start]
tname, _ := obj.(*TypeName)
- if tname != nil && tname.IsAlias() {
- // If we use Alias nodes, it is initialized with Typ[Invalid].
- // TODO(gri) Adjust this code if we initialize with nil.
- if !check.conf._EnableAlias {
- check.validAlias(tname, Typ[Invalid])
+ if tname != nil {
+ if check.conf._EnableAlias {
+ if a, ok := tname.Type().(*Alias); ok {
+ a.fromRHS = Typ[Invalid]
+ }
+ } else {
+ if tname.IsAlias() {
+ check.validAlias(tname, Typ[Invalid])
+ }
}
}
@@ -582,17 +586,18 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *TypeName
}
if check.conf._EnableAlias {
- // TODO(gri) Should be able to use nil instead of Typ[Invalid] to mark
- // the alias as incomplete. Currently this causes problems
- // with certain cycles. Investigate.
- //
- // NOTE(adonovan): to avoid the Invalid being prematurely observed
- // by (e.g.) a var whose type is an unfinished cycle,
- // Unalias does not memoize if Invalid. Perhaps we should use a
- // special sentinel distinct from Invalid.
- alias := check.newAlias(obj, Typ[Invalid])
+ alias := check.newAlias(obj, nil)
setDefType(def, alias)
+ // If we could not type the RHS, set it to invalid. This should
+ // only ever happen if we panic before setting.
+ defer func() {
+ if alias.fromRHS == nil {
+ alias.fromRHS = Typ[Invalid]
+ unalias(alias)
+ }
+ }()
+
// handle type parameters even if not allowed (Alias type is supported)
if tparam0 != nil {
if !versionErr && !buildcfg.Experiment.AliasTypeParams {
@@ -606,8 +611,9 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *ast.TypeSpec, def *TypeName
rhs = check.definedType(tdecl.Type, obj)
assert(rhs != nil)
+
alias.fromRHS = rhs
- Unalias(alias) // resolve alias.actual
+ unalias(alias) // resolve alias.actual
} else {
// With Go1.23, the default behavior is to use Alias nodes,
// reflected by check.enableAlias. Signal non-default behavior.
diff --git a/src/go/types/expr.go b/src/go/types/expr.go
index aa8543f081..97d8c42997 100644
--- a/src/go/types/expr.go
+++ b/src/go/types/expr.go
@@ -887,6 +887,10 @@ func (check *Checker) matchTypes(x, y *operand) {
if isTyped(x.typ) && isTyped(y.typ) {
return false
}
+ // A numeric type can only convert to another numeric type.
+ if allNumeric(x.typ) != allNumeric(y.typ) {
+ return false
+ }
// An untyped operand may convert to its default type when paired with an empty interface
// TODO(gri) This should only matter for comparisons (the only binary operation that is
// valid with interfaces), but in that case the assignability check should take
diff --git a/src/go/types/named.go b/src/go/types/named.go
index 1282abfa3f..72fd4970bb 100644
--- a/src/go/types/named.go
+++ b/src/go/types/named.go
@@ -130,8 +130,8 @@ type Named struct {
// accessed.
methods []*Func
- // loader may be provided to lazily load type parameters, underlying type, and methods.
- loader func(*Named) (tparams []*TypeParam, underlying Type, methods []*Func)
+ // loader may be provided to lazily load type parameters, underlying type, methods, and delayed functions
+ loader func(*Named) ([]*TypeParam, Type, []*Func, []func())
}
// instance holds information that is only necessary for instantiated named
@@ -146,9 +146,11 @@ type instance struct {
// namedState represents the possible states that a named type may assume.
type namedState uint32
+// Note: the order of states is relevant
const (
unresolved namedState = iota // tparams, underlying type and methods might be unavailable
- resolved // resolve has run; methods might be incomplete (for instances)
+ resolved // resolve has run; methods might be unexpanded (for instances)
+ loaded // loader has run; constraints might be unexpanded (for generic types)
complete // all data is known
)
@@ -170,7 +172,7 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
// accessible; but if n is an instantiated type, its methods may still be
// unexpanded.
func (n *Named) resolve() *Named {
- if n.state() >= resolved { // avoid locking below
+ if n.state() > unresolved { // avoid locking below
return n
}
@@ -179,7 +181,7 @@ func (n *Named) resolve() *Named {
n.mu.Lock()
defer n.mu.Unlock()
- if n.state() >= resolved {
+ if n.state() > unresolved {
return n
}
@@ -215,13 +217,20 @@ func (n *Named) resolve() *Named {
assert(n.underlying == nil)
assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil
- tparams, underlying, methods := n.loader(n)
+ tparams, underlying, methods, delayed := n.loader(n)
+ n.loader = nil
n.tparams = bindTParams(tparams)
n.underlying = underlying
n.fromRHS = underlying // for cycle detection
n.methods = methods
- n.loader = nil
+
+ // advance state to avoid deadlock calling delayed functions
+ n.setState(loaded)
+
+ for _, f := range delayed {
+ f()
+ }
}
n.setState(complete)
diff --git a/src/go/types/object.go b/src/go/types/object.go
index aa7dcb835c..823c03c7fd 100644
--- a/src/go/types/object.go
+++ b/src/go/types/object.go
@@ -296,7 +296,7 @@ func NewTypeName(pos token.Pos, pkg *Package, name string, typ Type) *TypeName {
// NewTypeNameLazy returns a new defined type like NewTypeName, but it
// lazily calls resolve to finish constructing the Named object.
-func _NewTypeNameLazy(pos token.Pos, pkg *Package, name string, load func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName {
+func _NewTypeNameLazy(pos token.Pos, pkg *Package, name string, load func(*Named) ([]*TypeParam, Type, []*Func, []func())) *TypeName {
obj := NewTypeName(pos, pkg, name, nil)
NewNamed(obj, nil, nil).loader = load
return obj
diff --git a/src/go/types/range.go b/src/go/types/range.go
index ed7d83283c..303d001c72 100644
--- a/src/go/types/range.go
+++ b/src/go/types/range.go
@@ -12,7 +12,6 @@ package types
import (
"go/ast"
"go/constant"
- "internal/buildcfg"
. "internal/types/errors"
)
@@ -240,7 +239,7 @@ func rangeKeyVal(check *Checker, orig Type, allowVersion func(goVersion) bool) (
assert(typ.dir != SendOnly)
return typ.elem, nil, "", true
case *Signature:
- if !buildcfg.Experiment.RangeFunc && allowVersion != nil && !allowVersion(go1_23) {
+ if allowVersion != nil && !allowVersion(go1_23) {
return bad("requires go1.23 or later")
}
// check iterator arity
diff --git a/src/hash/crc32/crc32_amd64.go b/src/hash/crc32/crc32_amd64.go
index 6be129f5dd..105ce01a1e 100644
--- a/src/hash/crc32/crc32_amd64.go
+++ b/src/hash/crc32/crc32_amd64.go
@@ -13,6 +13,11 @@ import (
"unsafe"
)
+// Offset into internal/cpu records for use in assembly.
+const (
+ offsetX86HasAVX512VPCLMULQDQL = unsafe.Offsetof(cpu.X86.HasAVX512VPCLMULQDQ)
+)
+
// This file contains the code to call the SSE 4.2 version of the Castagnoli
// and IEEE CRC.
diff --git a/src/hash/crc32/crc32_amd64.s b/src/hash/crc32/crc32_amd64.s
index 6af6c253a7..85d8b67a71 100644
--- a/src/hash/crc32/crc32_amd64.s
+++ b/src/hash/crc32/crc32_amd64.s
@@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
#include "textflag.h"
+#include "go_asm.h"
// castagnoliSSE42 updates the (non-inverted) crc with the given buffer.
//
@@ -136,15 +137,23 @@ loop:
// Linux kernel, since they avoid the costly
// PSHUFB 16 byte reversal proposed in the
// original Intel paper.
+// Splatted so it can be loaded with a single VMOVDQU64
DATA r2r1<>+0(SB)/8, $0x154442bd4
DATA r2r1<>+8(SB)/8, $0x1c6e41596
+DATA r2r1<>+16(SB)/8, $0x154442bd4
+DATA r2r1<>+24(SB)/8, $0x1c6e41596
+DATA r2r1<>+32(SB)/8, $0x154442bd4
+DATA r2r1<>+40(SB)/8, $0x1c6e41596
+DATA r2r1<>+48(SB)/8, $0x154442bd4
+DATA r2r1<>+56(SB)/8, $0x1c6e41596
+
DATA r4r3<>+0(SB)/8, $0x1751997d0
DATA r4r3<>+8(SB)/8, $0x0ccaa009e
DATA rupoly<>+0(SB)/8, $0x1db710641
DATA rupoly<>+8(SB)/8, $0x1f7011641
DATA r5<>+0(SB)/8, $0x163cd6124
-GLOBL r2r1<>(SB),RODATA,$16
+GLOBL r2r1<>(SB), RODATA, $64
GLOBL r4r3<>(SB),RODATA,$16
GLOBL rupoly<>(SB),RODATA,$16
GLOBL r5<>(SB),RODATA,$8
@@ -158,6 +167,43 @@ TEXT ·ieeeCLMUL(SB),NOSPLIT,$0
MOVQ p+8(FP), SI // data pointer
MOVQ p_len+16(FP), CX // len(p)
+ // Check feature support and length to be >= 1024 bytes.
+ CMPB internal∕cpu·X86+const_offsetX86HasAVX512VPCLMULQDQL(SB), $1
+ JNE useSSE42
+ CMPQ CX, $1024
+ JL useSSE42
+
+ // Use AVX512. Zero upper and Z10 and load initial CRC into lower part of Z10.
+ VPXORQ Z10, Z10, Z10
+ VMOVAPS X0, X10
+ VMOVDQU64 (SI), Z1
+ VPXORQ Z10, Z1, Z1 // Merge initial CRC value into Z1
+ ADDQ $64, SI // buf+=64
+ SUBQ $64, CX // len-=64
+
+ VMOVDQU64 r2r1<>+0(SB), Z0
+
+loopback64Avx512:
+ VMOVDQU64 (SI), Z11 // Load next
+ VPCLMULQDQ $0x11, Z0, Z1, Z5
+ VPCLMULQDQ $0, Z0, Z1, Z1
+ VPTERNLOGD $0x96, Z11, Z5, Z1 // Combine results with xor into Z1
+
+ ADDQ $0x40, DI
+ ADDQ $64, SI // buf+=64
+ SUBQ $64, CX // len-=64
+ CMPQ CX, $64 // Less than 64 bytes left?
+ JGE loopback64Avx512
+
+ // Unfold result into XMM1-XMM4 to match SSE4 code.
+ VEXTRACTF32X4 $1, Z1, X2 // X2: Second 128-bit lane
+ VEXTRACTF32X4 $2, Z1, X3 // X3: Third 128-bit lane
+ VEXTRACTF32X4 $3, Z1, X4 // X4: Fourth 128-bit lane
+ VZEROUPPER
+ JMP remain64
+
+ PCALIGN $16
+useSSE42:
MOVOU (SI), X1
MOVOU 16(SI), X2
MOVOU 32(SI), X3
@@ -207,6 +253,7 @@ loopback64:
CMPQ CX, $64 // Less than 64 bytes left?
JGE loopback64
+ PCALIGN $16
/* Fold result into a single register (X1) */
remain64:
MOVOA r4r3<>+0(SB), X0
diff --git a/src/hash/crc32/crc32_test.go b/src/hash/crc32/crc32_test.go
index 40acd7da4f..eb5e73c394 100644
--- a/src/hash/crc32/crc32_test.go
+++ b/src/hash/crc32/crc32_test.go
@@ -11,6 +11,7 @@ import (
"internal/testhash"
"io"
"math/rand"
+ "strings"
"testing"
)
@@ -67,6 +68,8 @@ var golden = []test{
{0x8c79fd79, 0x297a88ed, "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley", "crc\x01ʇ\x91Ml+\xb8\xa7", "crc\x01wB\x84\x81\xbf\xd6S\xdd"},
{0xa20b7167, 0x66ed1d8b, "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule", "crc\x01ʇ\x91M<lR[", "crc\x01wB\x84\x81{\xaco\xb1"},
{0x8e0bb443, 0xdcded527, "How can you write a big system without C++? -Paul Glick", "crc\x01ʇ\x91M\x0e\x88\x89\xed", "crc\x01wB\x84\x813\xd7C\u007f"},
+ {0x1010dab0, 0x8a11661f, strings.Repeat("01234567", 1024), "crc\x01ʇ\x91M\x92\xe5\xba\xf3", "crc\x01wB\x84\x81\x1a\x02\x88Y"},
+ {0x772d04d7, 0x5a6f5c45, strings.Repeat("a", 1024+65), "crc\x01ʇ\x91M\xe7Љ\xd1", "crc\x01wB\x84\x81\x95B\xa9("},
}
// testGoldenIEEE verifies that the given function returns
diff --git a/src/hash/maphash/maphash_runtime.go b/src/hash/maphash/maphash_runtime.go
index 91e7d49e2c..36ac638071 100644
--- a/src/hash/maphash/maphash_runtime.go
+++ b/src/hash/maphash/maphash_runtime.go
@@ -9,7 +9,6 @@ package maphash
import (
"internal/abi"
"internal/goarch"
- "internal/goexperiment"
"unsafe"
)
@@ -51,12 +50,7 @@ func comparableHash[T comparable](v T, seed Seed) uint64 {
s := seed.s
var m map[T]struct{}
mTyp := abi.TypeOf(m)
- var hasher func(unsafe.Pointer, uintptr) uintptr
- if goexperiment.SwissMap {
- hasher = (*abi.SwissMapType)(unsafe.Pointer(mTyp)).Hasher
- } else {
- hasher = (*abi.OldMapType)(unsafe.Pointer(mTyp)).Hasher
- }
+ hasher := (*abi.MapType)(unsafe.Pointer(mTyp)).Hasher
if goarch.PtrSize == 8 {
return uint64(hasher(abi.NoEscape(unsafe.Pointer(&v)), uintptr(s)))
}
diff --git a/src/image/image.go b/src/image/image.go
index f08182ba06..2cc94075b2 100644
--- a/src/image/image.go
+++ b/src/image/image.go
@@ -34,7 +34,9 @@
// regardless of whether the image is itself malformed or not. A call to
// [DecodeConfig] which returns a header which does not match the image returned
// by [Decode] may be considered a security issue, and should be reported per the
-// [Go Security Policy](https://go.dev/security/policy).
+// [Go Security Policy].
+//
+// [Go Security Policy]: https://go.dev/security/policy
package image
import (
diff --git a/src/internal/abi/abi.go b/src/internal/abi/abi.go
index e1c8adccc7..6026701659 100644
--- a/src/internal/abi/abi.go
+++ b/src/internal/abi/abi.go
@@ -98,5 +98,7 @@ func (b *IntArgRegBitmap) Set(i int) {
//
//go:nosplit
func (b *IntArgRegBitmap) Get(i int) bool {
- return b[i/8]&(uint8(1)<<(i%8)) != 0
+ // Compute p=&b[i/8], but without a bounds check. We don't have the stack for it.
+ p := (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(b)) + uintptr(i/8)))
+ return *p&(uint8(1)<<(i%8)) != 0
}
diff --git a/src/internal/abi/bounds.go b/src/internal/abi/bounds.go
new file mode 100644
index 0000000000..d6859802d2
--- /dev/null
+++ b/src/internal/abi/bounds.go
@@ -0,0 +1,113 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+// This type and constants are for encoding different
+// kinds of bounds check failures.
+type BoundsErrorCode uint8
+
+const (
+ BoundsIndex BoundsErrorCode = iota // s[x], 0 <= x < len(s) failed
+ BoundsSliceAlen // s[?:x], 0 <= x <= len(s) failed
+ BoundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed
+ BoundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen)
+ BoundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed
+ BoundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed
+ BoundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen)
+ BoundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen)
+ BoundsConvert // (*[x]T)(s), 0 <= x <= len(s) failed
+ numBoundsCodes
+)
+
+const (
+ BoundsMaxReg = 15
+ BoundsMaxConst = 31
+)
+
+// Here's how we encode PCDATA_PanicBounds entries:
+
+// We allow 16 registers (0-15) and 32 constants (0-31).
+// Encode the following constant c:
+// bits use
+// -----------------------------
+// 0 x is in a register
+// 1 y is in a register
+//
+// if x is in a register
+// 2 x is signed
+// [3:6] x's register number
+// else
+// [2:6] x's constant value
+//
+// if y is in a register
+// [7:10] y's register number
+// else
+// [7:11] y's constant value
+//
+// The final integer is c * numBoundsCode + code
+
+// TODO: 32-bit
+
+// Encode bounds failure information into an integer for PCDATA_PanicBounds.
+// Register numbers must be in 0-15. Constants must be in 0-31.
+func BoundsEncode(code BoundsErrorCode, signed, xIsReg, yIsReg bool, xVal, yVal int) int {
+ c := int(0)
+ if xIsReg {
+ c |= 1 << 0
+ if signed {
+ c |= 1 << 2
+ }
+ if xVal < 0 || xVal > BoundsMaxReg {
+ panic("bad xReg")
+ }
+ c |= xVal << 3
+ } else {
+ if xVal < 0 || xVal > BoundsMaxConst {
+ panic("bad xConst")
+ }
+ c |= xVal << 2
+ }
+ if yIsReg {
+ c |= 1 << 1
+ if yVal < 0 || yVal > BoundsMaxReg {
+ panic("bad yReg")
+ }
+ c |= yVal << 7
+ } else {
+ if yVal < 0 || yVal > BoundsMaxConst {
+ panic("bad yConst")
+ }
+ c |= yVal << 7
+ }
+ return c*int(numBoundsCodes) + int(code)
+}
+func BoundsDecode(v int) (code BoundsErrorCode, signed, xIsReg, yIsReg bool, xVal, yVal int) {
+ code = BoundsErrorCode(v % int(numBoundsCodes))
+ c := v / int(numBoundsCodes)
+ xIsReg = c&1 != 0
+ c >>= 1
+ yIsReg = c&1 != 0
+ c >>= 1
+ if xIsReg {
+ signed = c&1 != 0
+ c >>= 1
+ xVal = c & 0xf
+ c >>= 4
+ } else {
+ xVal = c & 0x1f
+ c >>= 5
+ }
+ if yIsReg {
+ yVal = c & 0xf
+ c >>= 4
+ } else {
+ yVal = c & 0x1f
+ c >>= 5
+ }
+ if c != 0 {
+ panic("BoundsDecode decoding error")
+ }
+ return
+}
diff --git a/src/internal/abi/map_swiss.go b/src/internal/abi/map.go
index 6c855667e3..4476dda5ca 100644
--- a/src/internal/abi/map_swiss.go
+++ b/src/internal/abi/map.go
@@ -12,24 +12,24 @@ import (
// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
const (
// Number of bits in the group.slot count.
- SwissMapGroupSlotsBits = 3
+ MapGroupSlotsBits = 3
// Number of slots in a group.
- SwissMapGroupSlots = 1 << SwissMapGroupSlotsBits // 8
+ MapGroupSlots = 1 << MapGroupSlotsBits // 8
// Maximum key or elem size to keep inline (instead of mallocing per element).
// Must fit in a uint8.
- SwissMapMaxKeyBytes = 128
- SwissMapMaxElemBytes = 128
+ MapMaxKeyBytes = 128
+ MapMaxElemBytes = 128
ctrlEmpty = 0b10000000
bitsetLSB = 0x0101010101010101
// Value of control word with all empty slots.
- SwissMapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty)
+ MapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty)
)
-type SwissMapType struct {
+type MapType struct {
Type
Key *Type
Elem *Type
@@ -44,21 +44,21 @@ type SwissMapType struct {
// Flag values
const (
- SwissMapNeedKeyUpdate = 1 << iota
- SwissMapHashMightPanic
- SwissMapIndirectKey
- SwissMapIndirectElem
+ MapNeedKeyUpdate = 1 << iota
+ MapHashMightPanic
+ MapIndirectKey
+ MapIndirectElem
)
-func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
- return mt.Flags&SwissMapNeedKeyUpdate != 0
+func (mt *MapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
+ return mt.Flags&MapNeedKeyUpdate != 0
}
-func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic
- return mt.Flags&SwissMapHashMightPanic != 0
+func (mt *MapType) HashMightPanic() bool { // true if hash function might panic
+ return mt.Flags&MapHashMightPanic != 0
}
-func (mt *SwissMapType) IndirectKey() bool { // store ptr to key instead of key itself
- return mt.Flags&SwissMapIndirectKey != 0
+func (mt *MapType) IndirectKey() bool { // store ptr to key instead of key itself
+ return mt.Flags&MapIndirectKey != 0
}
-func (mt *SwissMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
- return mt.Flags&SwissMapIndirectElem != 0
+func (mt *MapType) IndirectElem() bool { // store ptr to elem instead of elem itself
+ return mt.Flags&MapIndirectElem != 0
}
diff --git a/src/internal/abi/map_noswiss.go b/src/internal/abi/map_noswiss.go
deleted file mode 100644
index ff8609efcf..0000000000
--- a/src/internal/abi/map_noswiss.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package abi
-
-import (
- "unsafe"
-)
-
-// Map constants common to several packages
-// runtime/runtime-gdb.py:MapTypePrinter contains its own copy
-const (
- // Maximum number of key/elem pairs a bucket can hold.
- OldMapBucketCountBits = 3 // log2 of number of elements in a bucket.
- OldMapBucketCount = 1 << OldMapBucketCountBits
-
- // Maximum key or elem size to keep inline (instead of mallocing per element).
- // Must fit in a uint8.
- // Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes).
- OldMapMaxKeyBytes = 128
- OldMapMaxElemBytes = 128 // Must fit in a uint8.
-)
-
-type OldMapType struct {
- Type
- Key *Type
- Elem *Type
- Bucket *Type // internal type representing a hash bucket
- // function for hashing keys (ptr to key, seed) -> hash
- Hasher func(unsafe.Pointer, uintptr) uintptr
- KeySize uint8 // size of key slot
- ValueSize uint8 // size of elem slot
- BucketSize uint16 // size of bucket
- Flags uint32
-}
-
-// Note: flag values must match those used in the TMAP case
-// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
-func (mt *OldMapType) IndirectKey() bool { // store ptr to key instead of key itself
- return mt.Flags&1 != 0
-}
-func (mt *OldMapType) IndirectElem() bool { // store ptr to elem instead of elem itself
- return mt.Flags&2 != 0
-}
-func (mt *OldMapType) ReflexiveKey() bool { // true if k==k for all keys
- return mt.Flags&4 != 0
-}
-func (mt *OldMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite
- return mt.Flags&8 != 0
-}
-func (mt *OldMapType) HashMightPanic() bool { // true if hash function might panic
- return mt.Flags&16 != 0
-}
diff --git a/src/internal/abi/map_select_noswiss.go b/src/internal/abi/map_select_noswiss.go
deleted file mode 100644
index ab2b69de7e..0000000000
--- a/src/internal/abi/map_select_noswiss.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package abi
-
-// See comment in map_select_swiss.go.
-type mapType = OldMapType
diff --git a/src/internal/abi/map_select_swiss.go b/src/internal/abi/map_select_swiss.go
deleted file mode 100644
index 88a0bb2ebb..0000000000
--- a/src/internal/abi/map_select_swiss.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package abi
-
-// Select the map type that this binary is built using. This is for common
-// lookup methods like Type.Key to know which type to use.
-//
-// Note that mapType *must not be used by any functions called in the
-// compiler to build a target program* because the compiler must use the map
-// type determined by run-time GOEXPERIMENT, not the build tags used to build
-// the compiler.
-//
-// TODO(prattmic): This package is rather confusing because it has many
-// functions that can't be used by the compiler (e.g., Type.Uncommon depends on
-// the layout of type + uncommon objects in the binary. It would be incorrect
-// for an ad-hoc local Type object). It may be best to move code that isn't
-// usable by the compiler out of the package.
-type mapType = SwissMapType
diff --git a/src/internal/abi/symtab.go b/src/internal/abi/symtab.go
index ce322f2d75..86d6700388 100644
--- a/src/internal/abi/symtab.go
+++ b/src/internal/abi/symtab.go
@@ -79,6 +79,7 @@ const (
PCDATA_StackMapIndex = 1
PCDATA_InlTreeIndex = 2
PCDATA_ArgLiveIndex = 3
+ PCDATA_PanicBounds = 4
FUNCDATA_ArgsPointerMaps = 0
FUNCDATA_LocalsPointerMaps = 1
diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go
index 786fb9cd66..1920a8a37f 100644
--- a/src/internal/abi/type.go
+++ b/src/internal/abi/type.go
@@ -24,7 +24,7 @@ type Type struct {
TFlag TFlag // extra type information flags
Align_ uint8 // alignment of variable with this type
FieldAlign_ uint8 // alignment of struct field with this type
- Kind_ Kind // enumeration for C
+ Kind_ Kind // what kind of type this is (string, int, ...)
// function for comparing objects of this type
// (ptr to object A, ptr to object B) -> ==?
Equal func(unsafe.Pointer, unsafe.Pointer) bool
@@ -78,12 +78,6 @@ const (
UnsafePointer
)
-const (
- // TODO (khr, drchase) why aren't these in TFlag? Investigate, fix if possible.
- KindDirectIface Kind = 1 << 5
- KindMask Kind = (1 << 5) - 1
-)
-
// TFlag is used by a Type to signal what extra type information is
// available in the memory directly following the Type value.
type TFlag uint8
@@ -125,6 +119,15 @@ const (
// has type **byte instead of *byte. The runtime will store a
// pointer to the GC pointer bitmask in *GCData.
TFlagGCMaskOnDemand TFlag = 1 << 4
+
+ // TFlagDirectIface means that a value of this type is stored directly
+ // in the data field of an interface, instead of indirectly. Normally
+ // this means the type is pointer-ish.
+ TFlagDirectIface TFlag = 1 << 5
+
+ // Leaving this breadcrumb behind for dlv. It should not be used, and no
+ // Kind should be big enough to set this bit.
+ KindDirectIface Kind = 1 << 5
)
// NameOff is the offset to a name from moduledata.types. See resolveNameOff in runtime.
@@ -190,7 +193,7 @@ func TypeFor[T any]() *Type {
return (*PtrType)(unsafe.Pointer(TypeOf((*T)(nil)))).Elem
}
-func (t *Type) Kind() Kind { return t.Kind_ & KindMask }
+func (t *Type) Kind() Kind { return t.Kind_ }
func (t *Type) HasName() bool {
return t.TFlag&TFlagNamed != 0
@@ -199,14 +202,9 @@ func (t *Type) HasName() bool {
// Pointers reports whether t contains pointers.
func (t *Type) Pointers() bool { return t.PtrBytes != 0 }
-// IfaceIndir reports whether t is stored indirectly in an interface value.
-func (t *Type) IfaceIndir() bool {
- return t.Kind_&KindDirectIface == 0
-}
-
-// isDirectIface reports whether t is stored directly in an interface value.
+// IsDirectIface reports whether t is stored directly in an interface value.
func (t *Type) IsDirectIface() bool {
- return t.Kind_&KindDirectIface != 0
+ return t.TFlag&TFlagDirectIface != 0
}
func (t *Type) GcSlice(begin, end uintptr) []byte {
@@ -357,7 +355,7 @@ func (t *Type) Uncommon() *UncommonType {
return &(*u)(unsafe.Pointer(t)).u
case Map:
type u struct {
- mapType
+ MapType
u UncommonType
}
return &(*u)(unsafe.Pointer(t)).u
@@ -386,7 +384,7 @@ func (t *Type) Elem() *Type {
tt := (*ChanType)(unsafe.Pointer(t))
return tt.Elem
case Map:
- tt := (*mapType)(unsafe.Pointer(t))
+ tt := (*MapType)(unsafe.Pointer(t))
return tt.Elem
case Pointer:
tt := (*PtrType)(unsafe.Pointer(t))
@@ -406,12 +404,12 @@ func (t *Type) StructType() *StructType {
return (*StructType)(unsafe.Pointer(t))
}
-// MapType returns t cast to a *OldMapType or *SwissMapType, or nil if its tag does not match.
-func (t *Type) MapType() *mapType {
+// MapType returns t cast to a *MapType, or nil if its tag does not match.
+func (t *Type) MapType() *MapType {
if t.Kind() != Map {
return nil
}
- return (*mapType)(unsafe.Pointer(t))
+ return (*MapType)(unsafe.Pointer(t))
}
// ArrayType returns t cast to a *ArrayType, or nil if its tag does not match.
@@ -473,7 +471,7 @@ func (t *InterfaceType) NumMethod() int { return len(t.Methods) }
func (t *Type) Key() *Type {
if t.Kind() == Map {
- return (*mapType)(unsafe.Pointer(t)).Key
+ return (*MapType)(unsafe.Pointer(t)).Key
}
return nil
}
diff --git a/src/internal/buildcfg/cfg.go b/src/internal/buildcfg/cfg.go
index 5ae4c0c7ad..7e4ee365df 100644
--- a/src/internal/buildcfg/cfg.go
+++ b/src/internal/buildcfg/cfg.go
@@ -392,6 +392,8 @@ func GOGOARCH() (name, value string) {
return "GOMIPS64", GOMIPS64
case "ppc64", "ppc64le":
return "GOPPC64", fmt.Sprintf("power%d", GOPPC64)
+ case "riscv64":
+ return "GORISCV64", fmt.Sprintf("rva%du64", GORISCV64)
case "wasm":
return "GOWASM", GOWASM.String()
}
diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go
index 4c30ff1045..9199b7906f 100644
--- a/src/internal/buildcfg/exp.go
+++ b/src/internal/buildcfg/exp.go
@@ -82,8 +82,6 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) {
RegabiWrappers: regabiSupported,
RegabiArgs: regabiSupported,
AliasTypeParams: true,
- SwissMap: true,
- SyncHashTrieMap: true,
SIMD: goarch == "amd64", // TODO remove this (default to false) when dev.simd is merged
Dwarf5: dwarf5Supported,
}
diff --git a/src/internal/cgrouptest/cgrouptest_linux.go b/src/internal/cgrouptest/cgrouptest_linux.go
index f23c37a705..8437f992f7 100644
--- a/src/internal/cgrouptest/cgrouptest_linux.go
+++ b/src/internal/cgrouptest/cgrouptest_linux.go
@@ -114,7 +114,7 @@ func findCurrent(t *testing.T) (string, string) {
if ver != cgroup.V2 {
t.Skipf("cgroup: running on cgroup v%d want v2", ver)
}
- rel := string(buf[1:n]) // The returned path always starts with /, skip it.
+ rel := string(buf[1:n]) // The returned path always starts with /, skip it.
rel = filepath.Join(".", rel) // Make sure this isn't empty string at root.
return mount, rel
}
diff --git a/src/internal/coverage/pkid.go b/src/internal/coverage/pkid.go
index 30b5ad49ac..09501e6bd2 100644
--- a/src/internal/coverage/pkid.go
+++ b/src/internal/coverage/pkid.go
@@ -31,7 +31,7 @@ package coverage
// slot: 6 path='internal/runtime/math' hard-coded id: 6
// slot: 7 path='internal/bytealg' hard-coded id: 7
// slot: 8 path='internal/goexperiment'
-// slot: 9 path='internal/runtime/syscall' hard-coded id: 8
+// slot: 9 path='internal/runtime/syscall/linux' hard-coded id: 8
// slot: 10 path='runtime' hard-coded id: 9
// fatal error: runtime.addCovMeta
//
@@ -66,7 +66,8 @@ var rtPkgs = [...]string{
"internal/runtime/strconv",
"internal/runtime/sys",
"internal/runtime/maps",
- "internal/runtime/syscall",
+ "internal/runtime/syscall/linux",
+ "internal/runtime/syscall/windows",
"internal/runtime/cgroup",
"internal/stringslite",
"runtime",
diff --git a/src/internal/cpu/cpu.go b/src/internal/cpu/cpu.go
index 53633c7ca8..71b654549a 100644
--- a/src/internal/cpu/cpu.go
+++ b/src/internal/cpu/cpu.go
@@ -26,39 +26,40 @@ var CacheLineSize uintptr = CacheLinePadSize
// in addition to the cpuid feature bit being set.
// The struct is padded to avoid false sharing.
var X86 struct {
- _ CacheLinePad
- HasAES bool
- HasADX bool
- HasAVX bool
- HasAVXVNNI bool
- HasAVX2 bool
- HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL
- HasAVX512F bool
- HasAVX512CD bool
- HasAVX512BW bool
- HasAVX512DQ bool
- HasAVX512VL bool
- HasAVX512GFNI bool
- HasAVX512VNNI bool
- HasAVX512VBMI bool
- HasAVX512VBMI2 bool
- HasAVX512BITALG bool
- HasAVX512VPOPCNTDQ bool
- HasBMI1 bool
- HasBMI2 bool
- HasERMS bool
- HasFSRM bool
- HasFMA bool
- HasOSXSAVE bool
- HasPCLMULQDQ bool
- HasPOPCNT bool
- HasRDTSCP bool
- HasSHA bool
- HasSSE3 bool
- HasSSSE3 bool
- HasSSE41 bool
- HasSSE42 bool
- _ CacheLinePad
+ _ CacheLinePad
+ HasAES bool
+ HasADX bool
+ HasAVX bool
+ HasAVXVNNI bool
+ HasAVX2 bool
+ HasAVX512 bool // Virtual feature: F+CD+BW+DQ+VL
+ HasAVX512F bool
+ HasAVX512CD bool
+ HasAVX512BW bool
+ HasAVX512DQ bool
+ HasAVX512VL bool
+ HasAVX512GFNI bool
+ HasAVX512VNNI bool
+ HasAVX512VBMI bool
+ HasAVX512VBMI2 bool
+ HasAVX512BITALG bool
+ HasAVX512VPOPCNTDQ bool
+ HasAVX512VPCLMULQDQ bool
+ HasBMI1 bool
+ HasBMI2 bool
+ HasERMS bool
+ HasFSRM bool
+ HasFMA bool
+ HasOSXSAVE bool
+ HasPCLMULQDQ bool
+ HasPOPCNT bool
+ HasRDTSCP bool
+ HasSHA bool
+ HasSSE3 bool
+ HasSSSE3 bool
+ HasSSE41 bool
+ HasSSE42 bool
+ _ CacheLinePad
}
// The booleans in ARM contain the correspondingly named cpu feature bit.
diff --git a/src/internal/cpu/cpu_x86.go b/src/internal/cpu/cpu_x86.go
index 04d89955da..76742706b5 100644
--- a/src/internal/cpu/cpu_x86.go
+++ b/src/internal/cpu/cpu_x86.go
@@ -51,6 +51,10 @@ const (
cpuid_SHA = 1 << 29
cpuid_AVX512BW = 1 << 30
cpuid_AVX512VL = 1 << 31
+
+ // ecx bits
+ cpuid_AVX512VPCLMULQDQ = 1 << 10
+
// edx bits
cpuid_FSRM = 1 << 4
// edx bits for CPUID 0x80000001
@@ -68,6 +72,7 @@ func doinit() {
{Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ},
{Name: "rdtscp", Feature: &X86.HasRDTSCP},
{Name: "sha", Feature: &X86.HasSHA},
+ {Name: "vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ},
}
level := getGOAMD64level()
if level < 2 {
@@ -172,6 +177,7 @@ func doinit() {
X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512VBMI)
X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2)
X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI)
+ X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ)
}
X86.HasFSRM = isSet(edx7, cpuid_FSRM)
diff --git a/src/internal/goexperiment/exp_cacheprog_off.go b/src/internal/goexperiment/exp_cacheprog_off.go
deleted file mode 100644
index 276855c7d6..0000000000
--- a/src/internal/goexperiment/exp_cacheprog_off.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.cacheprog
-
-package goexperiment
-
-const CacheProg = false
-const CacheProgInt = 0
diff --git a/src/internal/goexperiment/exp_cacheprog_on.go b/src/internal/goexperiment/exp_cacheprog_on.go
deleted file mode 100644
index b959dd68b9..0000000000
--- a/src/internal/goexperiment/exp_cacheprog_on.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.cacheprog
-
-package goexperiment
-
-const CacheProg = true
-const CacheProgInt = 1
diff --git a/src/internal/goexperiment/exp_randomizedheapbase64_off.go b/src/internal/goexperiment/exp_randomizedheapbase64_off.go
new file mode 100644
index 0000000000..0a578535a4
--- /dev/null
+++ b/src/internal/goexperiment/exp_randomizedheapbase64_off.go
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build !goexperiment.randomizedheapbase64
+
+package goexperiment
+
+const RandomizedHeapBase64 = false
+const RandomizedHeapBase64Int = 0
diff --git a/src/internal/goexperiment/exp_randomizedheapbase64_on.go b/src/internal/goexperiment/exp_randomizedheapbase64_on.go
new file mode 100644
index 0000000000..10d59c7028
--- /dev/null
+++ b/src/internal/goexperiment/exp_randomizedheapbase64_on.go
@@ -0,0 +1,8 @@
+// Code generated by mkconsts.go. DO NOT EDIT.
+
+//go:build goexperiment.randomizedheapbase64
+
+package goexperiment
+
+const RandomizedHeapBase64 = true
+const RandomizedHeapBase64Int = 1
diff --git a/src/internal/goexperiment/exp_rangefunc_off.go b/src/internal/goexperiment/exp_rangefunc_off.go
deleted file mode 100644
index fc028205db..0000000000
--- a/src/internal/goexperiment/exp_rangefunc_off.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.rangefunc
-
-package goexperiment
-
-const RangeFunc = false
-const RangeFuncInt = 0
diff --git a/src/internal/goexperiment/exp_rangefunc_on.go b/src/internal/goexperiment/exp_rangefunc_on.go
deleted file mode 100644
index 25e7bd361b..0000000000
--- a/src/internal/goexperiment/exp_rangefunc_on.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.rangefunc
-
-package goexperiment
-
-const RangeFunc = true
-const RangeFuncInt = 1
diff --git a/src/internal/goexperiment/exp_swissmap_off.go b/src/internal/goexperiment/exp_swissmap_off.go
deleted file mode 100644
index 2af40aa60b..0000000000
--- a/src/internal/goexperiment/exp_swissmap_off.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.swissmap
-
-package goexperiment
-
-const SwissMap = false
-const SwissMapInt = 0
diff --git a/src/internal/goexperiment/exp_swissmap_on.go b/src/internal/goexperiment/exp_swissmap_on.go
deleted file mode 100644
index 73be49b606..0000000000
--- a/src/internal/goexperiment/exp_swissmap_on.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.swissmap
-
-package goexperiment
-
-const SwissMap = true
-const SwissMapInt = 1
diff --git a/src/internal/goexperiment/exp_synchashtriemap_off.go b/src/internal/goexperiment/exp_synchashtriemap_off.go
deleted file mode 100644
index cab23aac1d..0000000000
--- a/src/internal/goexperiment/exp_synchashtriemap_off.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build !goexperiment.synchashtriemap
-
-package goexperiment
-
-const SyncHashTrieMap = false
-const SyncHashTrieMapInt = 0
diff --git a/src/internal/goexperiment/exp_synchashtriemap_on.go b/src/internal/goexperiment/exp_synchashtriemap_on.go
deleted file mode 100644
index 87433ef4de..0000000000
--- a/src/internal/goexperiment/exp_synchashtriemap_on.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Code generated by mkconsts.go. DO NOT EDIT.
-
-//go:build goexperiment.synchashtriemap
-
-package goexperiment
-
-const SyncHashTrieMap = true
-const SyncHashTrieMapInt = 1
diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go
index 09349357a7..e24e3f1ff1 100644
--- a/src/internal/goexperiment/flags.go
+++ b/src/internal/goexperiment/flags.go
@@ -96,28 +96,15 @@ type Flags struct {
// copy of the iteration variable.
LoopVar bool
- // CacheProg adds support to cmd/go to use a child process to implement
- // the build cache; see https://github.com/golang/go/issues/59719.
- CacheProg bool
-
// NewInliner enables a new+improved version of the function
// inlining phase within the Go compiler.
NewInliner bool
- // RangeFunc enables range over func.
- RangeFunc bool
-
// AliasTypeParams enables type parameters for alias types.
// Requires that gotypesalias=1 is set with GODEBUG.
// This flag will be removed with Go 1.25.
AliasTypeParams bool
- // SwissMap enables the SwissTable-based map implementation.
- SwissMap bool
-
- // SyncHashTrieMap enables the HashTrieMap sync.Map implementation.
- SyncHashTrieMap bool
-
// Synctest enables the testing/synctest package.
Synctest bool
@@ -130,6 +117,10 @@ type Flags struct {
// GreenTeaGC enables the Green Tea GC implementation.
GreenTeaGC bool
+ // RandomizedHeapBase enables heap base address randomization on 64-bit
+ // platforms.
+ RandomizedHeapBase64 bool
+
// SIMD enables the simd package and the compiler's handling
// of SIMD intrinsics.
SIMD bool
diff --git a/src/internal/goversion/goversion.go b/src/internal/goversion/goversion.go
index 18703a64ea..d8bfe7e085 100644
--- a/src/internal/goversion/goversion.go
+++ b/src/internal/goversion/goversion.go
@@ -9,4 +9,4 @@ package goversion
//
// It should be updated at the start of each development cycle to be
// the version of the next Go 1.x release. See go.dev/issue/40705.
-const Version = 25
+const Version = 26
diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go
index acc2ab0c6e..1748e5ef5a 100644
--- a/src/internal/poll/fd_windows.go
+++ b/src/internal/poll/fd_windows.go
@@ -77,16 +77,9 @@ type operation struct {
mode int32
// fields used only by net package
- fd *FD
- buf syscall.WSABuf
- msg windows.WSAMsg
- sa syscall.Sockaddr
- rsa *syscall.RawSockaddrAny
- rsan int32
- handle syscall.Handle
- flags uint32
- qty uint32
- bufs []syscall.WSABuf
+ buf syscall.WSABuf
+ rsa *syscall.RawSockaddrAny
+ bufs []syscall.WSABuf
}
func (o *operation) setEvent() {
@@ -105,8 +98,8 @@ func (o *operation) close() {
}
}
-func (o *operation) overlapped() *syscall.Overlapped {
- if o.fd.isBlocking {
+func (fd *FD) overlapped(o *operation) *syscall.Overlapped {
+ if fd.isBlocking {
// Don't return the overlapped object if the file handle
// doesn't use overlapped I/O. It could be used, but
// that would then use the file pointer stored in the
@@ -118,10 +111,7 @@ func (o *operation) overlapped() *syscall.Overlapped {
func (o *operation) InitBuf(buf []byte) {
o.buf.Len = uint32(len(buf))
- o.buf.Buf = nil
- if len(buf) != 0 {
- o.buf.Buf = &buf[0]
- }
+ o.buf.Buf = unsafe.SliceData(buf)
}
func (o *operation) InitBufs(buf *[][]byte) {
@@ -154,28 +144,26 @@ func (o *operation) ClearBufs() {
o.bufs = o.bufs[:0]
}
-func (o *operation) InitMsg(p []byte, oob []byte) {
- o.InitBuf(p)
- o.msg.Buffers = &o.buf
- o.msg.BufferCount = 1
-
- o.msg.Name = nil
- o.msg.Namelen = 0
-
- o.msg.Flags = 0
- o.msg.Control.Len = uint32(len(oob))
- o.msg.Control.Buf = nil
- if len(oob) != 0 {
- o.msg.Control.Buf = &oob[0]
+func newWSAMsg(p []byte, oob []byte, flags int) windows.WSAMsg {
+ return windows.WSAMsg{
+ Buffers: &syscall.WSABuf{
+ Len: uint32(len(p)),
+ Buf: unsafe.SliceData(p),
+ },
+ BufferCount: 1,
+ Control: syscall.WSABuf{
+ Len: uint32(len(oob)),
+ Buf: unsafe.SliceData(oob),
+ },
+ Flags: uint32(flags),
}
}
// waitIO waits for the IO operation o to complete.
-func waitIO(o *operation) error {
- if o.fd.isBlocking {
+func (fd *FD) waitIO(o *operation) error {
+ if fd.isBlocking {
panic("can't wait on blocking operations")
}
- fd := o.fd
if !fd.pollable() {
// The overlapped handle is not added to the runtime poller,
// the only way to wait for the IO to complete is block until
@@ -195,8 +183,7 @@ func waitIO(o *operation) error {
}
// cancelIO cancels the IO operation o and waits for it to complete.
-func cancelIO(o *operation) {
- fd := o.fd
+func (fd *FD) cancelIO(o *operation) {
if !fd.pollable() {
return
}
@@ -214,8 +201,7 @@ func cancelIO(o *operation) {
// It supports both synchronous and asynchronous IO.
// o.qty and o.flags are set to zero before calling submit
// to avoid reusing the values from a previous call.
-func execIO(o *operation, submit func(o *operation) error) (int, error) {
- fd := o.fd
+func (fd *FD) execIO(o *operation, submit func(o *operation) (uint32, error)) (int, error) {
// Notify runtime netpoll about starting IO.
err := fd.pd.prepare(int(o.mode), fd.isFile)
if err != nil {
@@ -228,26 +214,25 @@ func execIO(o *operation, submit func(o *operation) error) (int, error) {
// event to wait for the IO to complete.
o.setEvent()
}
- o.qty = 0
- o.flags = 0
- err = submit(o)
+ qty, err := submit(o)
var waitErr error
// Blocking operations shouldn't return ERROR_IO_PENDING.
// Continue without waiting if that happens.
- if !o.fd.isBlocking && (err == syscall.ERROR_IO_PENDING || (err == nil && !o.fd.skipSyncNotif)) {
+ if !fd.isBlocking && (err == syscall.ERROR_IO_PENDING || (err == nil && !fd.skipSyncNotif)) {
// IO started asynchronously or completed synchronously but
// a sync notification is required. Wait for it to complete.
- waitErr = waitIO(o)
+ waitErr = fd.waitIO(o)
if waitErr != nil {
// IO interrupted by "close" or "timeout".
- cancelIO(o)
+ fd.cancelIO(o)
// We issued a cancellation request, but the IO operation may still succeeded
// before the cancellation request runs.
}
if fd.isFile {
- err = windows.GetOverlappedResult(fd.Sysfd, &o.o, &o.qty, false)
+ err = windows.GetOverlappedResult(fd.Sysfd, &o.o, &qty, false)
} else {
- err = windows.WSAGetOverlappedResult(fd.Sysfd, &o.o, &o.qty, false, &o.flags)
+ var flags uint32
+ err = windows.WSAGetOverlappedResult(fd.Sysfd, &o.o, &qty, false, &flags)
}
}
switch err {
@@ -271,7 +256,7 @@ func execIO(o *operation, submit func(o *operation) error) (int, error) {
err = waitErr
}
}
- return int(o.qty), err
+ return int(qty), err
}
// FD is a file descriptor. The net and os packages embed this type in
@@ -399,8 +384,6 @@ func (fd *FD) Init(net string, pollable bool) error {
fd.isBlocking = !pollable
fd.rop.mode = 'r'
fd.wop.mode = 'w'
- fd.rop.fd = fd
- fd.wop.fd = fd
// It is safe to add overlapped handles that also perform I/O
// outside of the runtime poller. The runtime poller will ignore
@@ -514,8 +497,9 @@ func (fd *FD) Read(buf []byte) (int, error) {
case kindFile, kindPipe:
o := &fd.rop
o.InitBuf(buf)
- n, err = execIO(o, func(o *operation) error {
- return syscall.ReadFile(o.fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &o.qty, o.overlapped())
+ n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = syscall.ReadFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, fd.overlapped(o))
+ return qty, err
})
fd.addOffset(n)
switch err {
@@ -530,8 +514,10 @@ func (fd *FD) Read(buf []byte) (int, error) {
case kindNet:
o := &fd.rop
o.InitBuf(buf)
- n, err = execIO(o, func(o *operation) error {
- return syscall.WSARecv(o.fd.Sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil)
+ n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ var flags uint32
+ err = syscall.WSARecv(fd.Sysfd, &o.buf, 1, &qty, &flags, &o.o, nil)
+ return qty, err
})
if race.Enabled {
race.Acquire(unsafe.Pointer(&ioSync))
@@ -645,8 +631,9 @@ func (fd *FD) Pread(b []byte, off int64) (int, error) {
o := &fd.rop
o.InitBuf(b)
fd.setOffset(off)
- n, err := execIO(o, func(o *operation) error {
- return syscall.ReadFile(o.fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &o.qty, &o.o)
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = syscall.ReadFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, &o.o)
+ return qty, err
})
if err == syscall.ERROR_HANDLE_EOF {
err = io.EOF
@@ -671,12 +658,14 @@ func (fd *FD) ReadFrom(buf []byte) (int, syscall.Sockaddr, error) {
defer fd.readUnlock()
o := &fd.rop
o.InitBuf(buf)
- n, err := execIO(o, func(o *operation) error {
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
if o.rsa == nil {
o.rsa = new(syscall.RawSockaddrAny)
}
- o.rsan = int32(unsafe.Sizeof(*o.rsa))
- return syscall.WSARecvFrom(o.fd.Sysfd, &o.buf, 1, &o.qty, &o.flags, o.rsa, &o.rsan, &o.o, nil)
+ rsan := int32(unsafe.Sizeof(*o.rsa))
+ var flags uint32
+ err = syscall.WSARecvFrom(fd.Sysfd, &o.buf, 1, &qty, &flags, o.rsa, &rsan, &o.o, nil)
+ return qty, err
})
err = fd.eofError(n, err)
if err != nil {
@@ -700,12 +689,14 @@ func (fd *FD) ReadFromInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error)
defer fd.readUnlock()
o := &fd.rop
o.InitBuf(buf)
- n, err := execIO(o, func(o *operation) error {
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
if o.rsa == nil {
o.rsa = new(syscall.RawSockaddrAny)
}
- o.rsan = int32(unsafe.Sizeof(*o.rsa))
- return syscall.WSARecvFrom(o.fd.Sysfd, &o.buf, 1, &o.qty, &o.flags, o.rsa, &o.rsan, &o.o, nil)
+ rsan := int32(unsafe.Sizeof(*o.rsa))
+ var flags uint32
+ err = syscall.WSARecvFrom(fd.Sysfd, &o.buf, 1, &qty, &flags, o.rsa, &rsan, &o.o, nil)
+ return qty, err
})
err = fd.eofError(n, err)
if err != nil {
@@ -729,12 +720,14 @@ func (fd *FD) ReadFromInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error)
defer fd.readUnlock()
o := &fd.rop
o.InitBuf(buf)
- n, err := execIO(o, func(o *operation) error {
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
if o.rsa == nil {
o.rsa = new(syscall.RawSockaddrAny)
}
- o.rsan = int32(unsafe.Sizeof(*o.rsa))
- return syscall.WSARecvFrom(o.fd.Sysfd, &o.buf, 1, &o.qty, &o.flags, o.rsa, &o.rsan, &o.o, nil)
+ rsan := int32(unsafe.Sizeof(*o.rsa))
+ var flags uint32
+ err = syscall.WSARecvFrom(fd.Sysfd, &o.buf, 1, &qty, &flags, o.rsa, &rsan, &o.o, nil)
+ return qty, err
})
err = fd.eofError(n, err)
if err != nil {
@@ -770,8 +763,9 @@ func (fd *FD) Write(buf []byte) (int, error) {
case kindPipe, kindFile:
o := &fd.wop
o.InitBuf(b)
- n, err = execIO(o, func(o *operation) error {
- return syscall.WriteFile(o.fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &o.qty, o.overlapped())
+ n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = syscall.WriteFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, fd.overlapped(o))
+ return qty, err
})
fd.addOffset(n)
case kindNet:
@@ -780,8 +774,9 @@ func (fd *FD) Write(buf []byte) (int, error) {
}
o := &fd.wop
o.InitBuf(b)
- n, err = execIO(o, func(o *operation) error {
- return syscall.WSASend(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, &o.o, nil)
+ n, err = fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = syscall.WSASend(fd.Sysfd, &o.buf, 1, &qty, 0, &o.o, nil)
+ return qty, err
})
}
ntotal += n
@@ -869,8 +864,9 @@ func (fd *FD) Pwrite(buf []byte, off int64) (int, error) {
o := &fd.wop
o.InitBuf(b)
fd.setOffset(off + int64(ntotal))
- n, err := execIO(o, func(o *operation) error {
- return syscall.WriteFile(o.fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &o.qty, &o.o)
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = syscall.WriteFile(fd.Sysfd, unsafe.Slice(o.buf.Buf, o.buf.Len), &qty, &o.o)
+ return qty, err
})
if n > 0 {
ntotal += n
@@ -898,8 +894,9 @@ func (fd *FD) Writev(buf *[][]byte) (int64, error) {
}
o := &fd.wop
o.InitBufs(buf)
- n, err := execIO(o, func(o *operation) error {
- return syscall.WSASend(o.fd.Sysfd, &o.bufs[0], uint32(len(o.bufs)), &o.qty, 0, &o.o, nil)
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = syscall.WSASend(fd.Sysfd, &o.bufs[0], uint32(len(o.bufs)), &qty, 0, &o.o, nil)
+ return qty, err
})
o.ClearBufs()
TestHookDidWritev(n)
@@ -918,9 +915,9 @@ func (fd *FD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) {
// handle zero-byte payload
o := &fd.wop
o.InitBuf(buf)
- o.sa = sa
- n, err := execIO(o, func(o *operation) error {
- return syscall.WSASendto(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil)
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = syscall.WSASendto(fd.Sysfd, &o.buf, 1, &qty, 0, sa, &o.o, nil)
+ return qty, err
})
return n, err
}
@@ -933,9 +930,9 @@ func (fd *FD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) {
}
o := &fd.wop
o.InitBuf(b)
- o.sa = sa
- n, err := execIO(o, func(o *operation) error {
- return syscall.WSASendto(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil)
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = syscall.WSASendto(fd.Sysfd, &o.buf, 1, &qty, 0, sa, &o.o, nil)
+ return qty, err
})
ntotal += int(n)
if err != nil {
@@ -957,8 +954,9 @@ func (fd *FD) WriteToInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error)
// handle zero-byte payload
o := &fd.wop
o.InitBuf(buf)
- n, err := execIO(o, func(o *operation) error {
- return windows.WSASendtoInet4(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, sa4, &o.o, nil)
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = windows.WSASendtoInet4(fd.Sysfd, &o.buf, 1, &qty, 0, sa4, &o.o, nil)
+ return qty, err
})
return n, err
}
@@ -971,8 +969,9 @@ func (fd *FD) WriteToInet4(buf []byte, sa4 *syscall.SockaddrInet4) (int, error)
}
o := &fd.wop
o.InitBuf(b)
- n, err := execIO(o, func(o *operation) error {
- return windows.WSASendtoInet4(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, sa4, &o.o, nil)
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = windows.WSASendtoInet4(fd.Sysfd, &o.buf, 1, &qty, 0, sa4, &o.o, nil)
+ return qty, err
})
ntotal += int(n)
if err != nil {
@@ -994,8 +993,9 @@ func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error)
// handle zero-byte payload
o := &fd.wop
o.InitBuf(buf)
- n, err := execIO(o, func(o *operation) error {
- return windows.WSASendtoInet6(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, sa6, &o.o, nil)
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = windows.WSASendtoInet6(fd.Sysfd, &o.buf, 1, &qty, 0, sa6, &o.o, nil)
+ return qty, err
})
return n, err
}
@@ -1008,8 +1008,9 @@ func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error)
}
o := &fd.wop
o.InitBuf(b)
- n, err := execIO(o, func(o *operation) error {
- return windows.WSASendtoInet6(o.fd.Sysfd, &o.buf, 1, &o.qty, 0, sa6, &o.o, nil)
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = windows.WSASendtoInet6(fd.Sysfd, &o.buf, 1, &qty, 0, sa6, &o.o, nil)
+ return qty, err
})
ntotal += int(n)
if err != nil {
@@ -1025,19 +1026,19 @@ func (fd *FD) WriteToInet6(buf []byte, sa6 *syscall.SockaddrInet6) (int, error)
// than in the net package so that it can use fd.wop.
func (fd *FD) ConnectEx(ra syscall.Sockaddr) error {
o := &fd.wop
- o.sa = ra
- _, err := execIO(o, func(o *operation) error {
- return ConnectExFunc(o.fd.Sysfd, o.sa, nil, 0, nil, &o.o)
+ _, err := fd.execIO(o, func(o *operation) (uint32, error) {
+ return 0, ConnectExFunc(fd.Sysfd, ra, nil, 0, nil, &o.o)
})
return err
}
func (fd *FD) acceptOne(s syscall.Handle, rawsa []syscall.RawSockaddrAny, o *operation) (string, error) {
// Submit accept request.
- o.handle = s
- o.rsan = int32(unsafe.Sizeof(rawsa[0]))
- _, err := execIO(o, func(o *operation) error {
- return AcceptFunc(o.fd.Sysfd, o.handle, (*byte)(unsafe.Pointer(&rawsa[0])), 0, uint32(o.rsan), uint32(o.rsan), &o.qty, &o.o)
+ rsan := uint32(unsafe.Sizeof(rawsa[0]))
+ _, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = AcceptFunc(fd.Sysfd, s, (*byte)(unsafe.Pointer(&rawsa[0])), 0, rsan, rsan, &qty, &o.o)
+ return qty, err
+
})
if err != nil {
CloseFunc(s)
@@ -1072,7 +1073,7 @@ func (fd *FD) Accept(sysSocket func() (syscall.Handle, error)) (syscall.Handle,
errcall, err := fd.acceptOne(s, rawsa[:], o)
if err == nil {
- return s, rawsa[:], uint32(o.rsan), "", nil
+ return s, rawsa[:], uint32(unsafe.Sizeof(rawsa[0])), "", nil
}
// Sometimes we see WSAECONNRESET and ERROR_NETNAME_DELETED is
@@ -1179,11 +1180,13 @@ func (fd *FD) RawRead(f func(uintptr) bool) error {
// socket is readable. h/t https://stackoverflow.com/a/42019668/332798
o := &fd.rop
o.InitBuf(nil)
- _, err := execIO(o, func(o *operation) error {
+ _, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ var flags uint32
if !fd.IsStream {
- o.flags |= windows.MSG_PEEK
+ flags |= windows.MSG_PEEK
}
- return syscall.WSARecv(o.fd.Sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil)
+ err = syscall.WSARecv(fd.Sysfd, &o.buf, 1, &qty, &flags, &o.o, nil)
+ return qty, err
})
if err == windows.WSAEMSGSIZE {
// expected with a 0-byte peek, ignore.
@@ -1271,22 +1274,22 @@ func (fd *FD) ReadMsg(p []byte, oob []byte, flags int) (int, int, int, syscall.S
}
o := &fd.rop
- o.InitMsg(p, oob)
if o.rsa == nil {
o.rsa = new(syscall.RawSockaddrAny)
}
- o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
- o.msg.Namelen = int32(unsafe.Sizeof(*o.rsa))
- o.msg.Flags = uint32(flags)
- n, err := execIO(o, func(o *operation) error {
- return windows.WSARecvMsg(o.fd.Sysfd, &o.msg, &o.qty, &o.o, nil)
+ msg := newWSAMsg(p, oob, flags)
+ msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ msg.Namelen = int32(unsafe.Sizeof(*o.rsa))
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = windows.WSARecvMsg(fd.Sysfd, &msg, &qty, &o.o, nil)
+ return qty, err
})
err = fd.eofError(n, err)
var sa syscall.Sockaddr
if err == nil {
sa, err = o.rsa.Sockaddr()
}
- return n, int(o.msg.Control.Len), int(o.msg.Flags), sa, err
+ return n, int(msg.Control.Len), int(msg.Flags), sa, err
}
// ReadMsgInet4 is ReadMsg, but specialized to return a syscall.SockaddrInet4.
@@ -1301,21 +1304,21 @@ func (fd *FD) ReadMsgInet4(p []byte, oob []byte, flags int, sa4 *syscall.Sockadd
}
o := &fd.rop
- o.InitMsg(p, oob)
if o.rsa == nil {
o.rsa = new(syscall.RawSockaddrAny)
}
- o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
- o.msg.Namelen = int32(unsafe.Sizeof(*o.rsa))
- o.msg.Flags = uint32(flags)
- n, err := execIO(o, func(o *operation) error {
- return windows.WSARecvMsg(o.fd.Sysfd, &o.msg, &o.qty, &o.o, nil)
+ msg := newWSAMsg(p, oob, flags)
+ msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ msg.Namelen = int32(unsafe.Sizeof(*o.rsa))
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = windows.WSARecvMsg(fd.Sysfd, &msg, &qty, &o.o, nil)
+ return qty, err
})
err = fd.eofError(n, err)
if err == nil {
rawToSockaddrInet4(o.rsa, sa4)
}
- return n, int(o.msg.Control.Len), int(o.msg.Flags), err
+ return n, int(msg.Control.Len), int(msg.Flags), err
}
// ReadMsgInet6 is ReadMsg, but specialized to return a syscall.SockaddrInet6.
@@ -1330,21 +1333,21 @@ func (fd *FD) ReadMsgInet6(p []byte, oob []byte, flags int, sa6 *syscall.Sockadd
}
o := &fd.rop
- o.InitMsg(p, oob)
if o.rsa == nil {
o.rsa = new(syscall.RawSockaddrAny)
}
- o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
- o.msg.Namelen = int32(unsafe.Sizeof(*o.rsa))
- o.msg.Flags = uint32(flags)
- n, err := execIO(o, func(o *operation) error {
- return windows.WSARecvMsg(o.fd.Sysfd, &o.msg, &o.qty, &o.o, nil)
+ msg := newWSAMsg(p, oob, flags)
+ msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ msg.Namelen = int32(unsafe.Sizeof(*o.rsa))
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = windows.WSARecvMsg(fd.Sysfd, &msg, &qty, &o.o, nil)
+ return qty, err
})
err = fd.eofError(n, err)
if err == nil {
rawToSockaddrInet6(o.rsa, sa6)
}
- return n, int(o.msg.Control.Len), int(o.msg.Flags), err
+ return n, int(msg.Control.Len), int(msg.Flags), err
}
// WriteMsg wraps the WSASendMsg network call.
@@ -1359,7 +1362,7 @@ func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, err
defer fd.writeUnlock()
o := &fd.wop
- o.InitMsg(p, oob)
+ msg := newWSAMsg(p, oob, 0)
if sa != nil {
if o.rsa == nil {
o.rsa = new(syscall.RawSockaddrAny)
@@ -1368,13 +1371,14 @@ func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, err
if err != nil {
return 0, 0, err
}
- o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
- o.msg.Namelen = len
+ msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ msg.Namelen = len
}
- n, err := execIO(o, func(o *operation) error {
- return windows.WSASendMsg(o.fd.Sysfd, &o.msg, 0, &o.qty, &o.o, nil)
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = windows.WSASendMsg(fd.Sysfd, &msg, 0, nil, &o.o, nil)
+ return qty, err
})
- return n, int(o.msg.Control.Len), err
+ return n, int(msg.Control.Len), err
}
// WriteMsgInet4 is WriteMsg specialized for syscall.SockaddrInet4.
@@ -1389,17 +1393,18 @@ func (fd *FD) WriteMsgInet4(p []byte, oob []byte, sa *syscall.SockaddrInet4) (in
defer fd.writeUnlock()
o := &fd.wop
- o.InitMsg(p, oob)
if o.rsa == nil {
o.rsa = new(syscall.RawSockaddrAny)
}
len := sockaddrInet4ToRaw(o.rsa, sa)
- o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
- o.msg.Namelen = len
- n, err := execIO(o, func(o *operation) error {
- return windows.WSASendMsg(o.fd.Sysfd, &o.msg, 0, &o.qty, &o.o, nil)
+ msg := newWSAMsg(p, oob, 0)
+ msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ msg.Namelen = len
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = windows.WSASendMsg(fd.Sysfd, &msg, 0, nil, &o.o, nil)
+ return qty, err
})
- return n, int(o.msg.Control.Len), err
+ return n, int(msg.Control.Len), err
}
// WriteMsgInet6 is WriteMsg specialized for syscall.SockaddrInet6.
@@ -1414,17 +1419,18 @@ func (fd *FD) WriteMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (in
defer fd.writeUnlock()
o := &fd.wop
- o.InitMsg(p, oob)
if o.rsa == nil {
o.rsa = new(syscall.RawSockaddrAny)
}
+ msg := newWSAMsg(p, oob, 0)
len := sockaddrInet6ToRaw(o.rsa, sa)
- o.msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
- o.msg.Namelen = len
- n, err := execIO(o, func(o *operation) error {
- return windows.WSASendMsg(o.fd.Sysfd, &o.msg, 0, &o.qty, &o.o, nil)
+ msg.Name = (syscall.Pointer)(unsafe.Pointer(o.rsa))
+ msg.Namelen = len
+ n, err := fd.execIO(o, func(o *operation) (qty uint32, err error) {
+ err = windows.WSASendMsg(fd.Sysfd, &msg, 0, nil, &o.o, nil)
+ return qty, err
})
- return n, int(o.msg.Control.Len), err
+ return n, int(msg.Control.Len), err
}
func DupCloseOnExec(fd int) (int, string, error) {
diff --git a/src/internal/poll/sendfile_windows.go b/src/internal/poll/sendfile_windows.go
index d72bcd5871..a052f4a1f8 100644
--- a/src/internal/poll/sendfile_windows.go
+++ b/src/internal/poll/sendfile_windows.go
@@ -63,7 +63,6 @@ func SendFile(fd *FD, src uintptr, size int64) (written int64, err error, handle
const maxChunkSizePerCall = int64(0x7fffffff - 1)
o := &fd.wop
- o.handle = hsrc
for size > 0 {
chunkSize := maxChunkSizePerCall
if chunkSize > size {
@@ -74,9 +73,12 @@ func SendFile(fd *FD, src uintptr, size int64) (written int64, err error, handle
o.o.Offset = uint32(off)
o.o.OffsetHigh = uint32(off >> 32)
- n, err := execIO(o, func(o *operation) error {
- o.qty = uint32(chunkSize)
- return syscall.TransmitFile(o.fd.Sysfd, o.handle, o.qty, 0, &o.o, nil, syscall.TF_WRITE_BEHIND)
+ n, err := fd.execIO(o, func(o *operation) (uint32, error) {
+ err := syscall.TransmitFile(fd.Sysfd, hsrc, uint32(chunkSize), 0, &o.o, nil, syscall.TF_WRITE_BEHIND)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(chunkSize), nil
})
if err != nil {
return written, err, written > 0
diff --git a/src/internal/reflectlite/export_test.go b/src/internal/reflectlite/export_test.go
index 0ad3d97c15..93762a8387 100644
--- a/src/internal/reflectlite/export_test.go
+++ b/src/internal/reflectlite/export_test.go
@@ -70,7 +70,7 @@ func Zero(typ Type) Value {
}
t := typ.common()
fl := flag(t.Kind())
- if t.IfaceIndir() {
+ if !t.IsDirectIface() {
return Value{t, unsafe_New(t), fl | flagIndir}
}
return Value{t, nil, fl}
diff --git a/src/internal/reflectlite/value.go b/src/internal/reflectlite/value.go
index 7b231d554f..a92df613f5 100644
--- a/src/internal/reflectlite/value.go
+++ b/src/internal/reflectlite/value.go
@@ -52,7 +52,7 @@ type Value struct {
// - flagIndir: val holds a pointer to the data
// - flagAddr: v.CanAddr is true (implies flagIndir and ptr is non-nil)
// - flagMethod: v is a method value.
- // If ifaceIndir(typ), code can assume that flagIndir is set.
+ // If !typ.IsDirectIface(), code can assume that flagIndir is set.
//
// The remaining 22+ bits give a method number for method values.
// If flag.kind() != Func, code can assume that flagMethod is unset.
@@ -118,7 +118,7 @@ func packEface(v Value) any {
e := (*abi.EmptyInterface)(unsafe.Pointer(&i))
// First, fill in the data portion of the interface.
switch {
- case t.IfaceIndir():
+ case !t.IsDirectIface():
if v.flag&flagIndir == 0 {
panic("bad indir")
}
@@ -155,7 +155,7 @@ func unpackEface(i any) Value {
return Value{}
}
f := flag(t.Kind())
- if t.IfaceIndir() {
+ if !t.IsDirectIface() {
f |= flagIndir
}
return Value{t, e.Data, f}
diff --git a/src/internal/runtime/cgobench/bench_test.go b/src/internal/runtime/cgobench/bench_test.go
new file mode 100644
index 0000000000..b4d8efec5e
--- /dev/null
+++ b/src/internal/runtime/cgobench/bench_test.go
@@ -0,0 +1,26 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo
+
+package cgobench_test
+
+import (
+ "internal/runtime/cgobench"
+ "testing"
+)
+
+func BenchmarkCgoCall(b *testing.B) {
+ for b.Loop() {
+ cgobench.Empty()
+ }
+}
+
+func BenchmarkCgoCallParallel(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ cgobench.Empty()
+ }
+ })
+}
diff --git a/src/internal/runtime/cgobench/funcs.go b/src/internal/runtime/cgobench/funcs.go
new file mode 100644
index 0000000000..db685180a1
--- /dev/null
+++ b/src/internal/runtime/cgobench/funcs.go
@@ -0,0 +1,17 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cgo
+
+package cgobench
+
+/*
+static void empty() {
+}
+*/
+import "C"
+
+func Empty() {
+ C.empty()
+}
diff --git a/src/internal/runtime/cgroup/cgroup_linux.go b/src/internal/runtime/cgroup/cgroup_linux.go
index 2fc3b225c5..91815b4a1d 100644
--- a/src/internal/runtime/cgroup/cgroup_linux.go
+++ b/src/internal/runtime/cgroup/cgroup_linux.go
@@ -7,7 +7,7 @@ package cgroup
import (
"internal/bytealg"
"internal/runtime/strconv"
- "internal/runtime/syscall"
+ "internal/runtime/syscall/linux"
)
var (
@@ -77,10 +77,10 @@ type CPU struct {
func (c CPU) Close() {
switch c.version {
case V1:
- syscall.Close(c.quotaFD)
- syscall.Close(c.periodFD)
+ linux.Close(c.quotaFD)
+ linux.Close(c.periodFD)
case V2:
- syscall.Close(c.quotaFD)
+ linux.Close(c.quotaFD)
default:
throw("impossible cgroup version")
}
@@ -112,7 +112,7 @@ func OpenCPU(scratch []byte) (CPU, error) {
case 1:
n2 := copy(base[n:], v1QuotaFile)
path := base[:n+n2]
- quotaFD, errno := syscall.Open(&path[0], syscall.O_RDONLY|syscall.O_CLOEXEC, 0)
+ quotaFD, errno := linux.Open(&path[0], linux.O_RDONLY|linux.O_CLOEXEC, 0)
if errno != 0 {
// This may fail if this process was migrated out of
// the cgroup found by FindCPU and that cgroup has been
@@ -122,7 +122,7 @@ func OpenCPU(scratch []byte) (CPU, error) {
n2 = copy(base[n:], v1PeriodFile)
path = base[:n+n2]
- periodFD, errno := syscall.Open(&path[0], syscall.O_RDONLY|syscall.O_CLOEXEC, 0)
+ periodFD, errno := linux.Open(&path[0], linux.O_RDONLY|linux.O_CLOEXEC, 0)
if errno != 0 {
// This may fail if this process was migrated out of
// the cgroup found by FindCPU and that cgroup has been
@@ -139,7 +139,7 @@ func OpenCPU(scratch []byte) (CPU, error) {
case 2:
n2 := copy(base[n:], v2MaxFile)
path := base[:n+n2]
- maxFD, errno := syscall.Open(&path[0], syscall.O_RDONLY|syscall.O_CLOEXEC, 0)
+ maxFD, errno := linux.Open(&path[0], linux.O_RDONLY|linux.O_CLOEXEC, 0)
if errno != 0 {
// This may fail if this process was migrated out of
// the cgroup found by FindCPU and that cgroup has been
@@ -200,7 +200,7 @@ func readV1Number(fd int) (int64, error) {
//
// Always read from the beginning of the file to get a fresh value.
var b [64]byte
- n, errno := syscall.Pread(fd, b[:], 0)
+ n, errno := linux.Pread(fd, b[:], 0)
if errno != 0 {
return 0, errSyscallFailed
}
@@ -248,7 +248,7 @@ func readV2Limit(fd int) (float64, bool, error) {
//
// Always read from the beginning of the file to get a fresh value.
var b [64]byte
- n, errno := syscall.Pread(fd, b[:], 0)
+ n, errno := linux.Pread(fd, b[:], 0)
if errno != 0 {
return 0, false, errSyscallFailed
}
@@ -345,8 +345,8 @@ func FindCPU(out []byte, scratch []byte) (int, Version, error) {
// Returns ErrNoCgroup if the process is not in a CPU cgroup.
func FindCPURelativePath(out []byte, scratch []byte) (int, Version, error) {
path := []byte("/proc/self/cgroup\x00")
- fd, errno := syscall.Open(&path[0], syscall.O_RDONLY|syscall.O_CLOEXEC, 0)
- if errno == syscall.ENOENT {
+ fd, errno := linux.Open(&path[0], linux.O_RDONLY|linux.O_CLOEXEC, 0)
+ if errno == linux.ENOENT {
return 0, 0, ErrNoCgroup
} else if errno != 0 {
return 0, 0, errSyscallFailed
@@ -354,13 +354,13 @@ func FindCPURelativePath(out []byte, scratch []byte) (int, Version, error) {
// The relative path always starts with /, so we can directly append it
// to the mount point.
- n, version, err := parseCPURelativePath(fd, syscall.Read, out[:], scratch)
+ n, version, err := parseCPURelativePath(fd, linux.Read, out[:], scratch)
if err != nil {
- syscall.Close(fd)
+ linux.Close(fd)
return 0, 0, err
}
- syscall.Close(fd)
+ linux.Close(fd)
return n, version, nil
}
@@ -489,19 +489,19 @@ func FindCPUMountPoint(out []byte, scratch []byte) (int, error) {
checkBufferSize(scratch, ParseSize)
path := []byte("/proc/self/mountinfo\x00")
- fd, errno := syscall.Open(&path[0], syscall.O_RDONLY|syscall.O_CLOEXEC, 0)
- if errno == syscall.ENOENT {
+ fd, errno := linux.Open(&path[0], linux.O_RDONLY|linux.O_CLOEXEC, 0)
+ if errno == linux.ENOENT {
return 0, ErrNoCgroup
} else if errno != 0 {
return 0, errSyscallFailed
}
- n, err := parseCPUMount(fd, syscall.Read, out, scratch)
+ n, err := parseCPUMount(fd, linux.Read, out, scratch)
if err != nil {
- syscall.Close(fd)
+ linux.Close(fd)
return 0, err
}
- syscall.Close(fd)
+ linux.Close(fd)
return n, nil
}
diff --git a/src/internal/runtime/cgroup/line_reader.go b/src/internal/runtime/cgroup/line_reader.go
index 382cfd70d1..9a7213327c 100644
--- a/src/internal/runtime/cgroup/line_reader.go
+++ b/src/internal/runtime/cgroup/line_reader.go
@@ -55,7 +55,7 @@ type lineReader struct {
// remainder of the line skipped. See next for more details.
//
// read is the function used to read more bytes from fd. This is usually
-// internal/runtime/syscall.Read. Note that this follows syscall semantics (not
+// internal/runtime/syscall/linux.Read. Note that this follows syscall semantics (not
// io.Reader), so EOF is indicated with n=0, errno=0.
func newLineReader(fd int, scratch []byte, read func(fd int, b []byte) (n int, errno uintptr)) *lineReader {
return &lineReader{
diff --git a/src/internal/runtime/exithook/hooks.go b/src/internal/runtime/exithook/hooks.go
index eb8aa1ce0a..8dcfb9ed3c 100644
--- a/src/internal/runtime/exithook/hooks.go
+++ b/src/internal/runtime/exithook/hooks.go
@@ -8,7 +8,7 @@
// from a safe context (e.g. not an error/panic path or signal
// handler, preemption enabled, allocation allowed, write barriers
// allowed, etc), and that the exit function F will be invoked under
-// similar circumstances. That is the say, we are expecting that F
+// similar circumstances. That is to say, we are expecting that F
// uses normal / high-level Go code as opposed to one of the more
// restricted dialects used for the trickier parts of the runtime.
package exithook
diff --git a/src/internal/runtime/maps/export_noswiss_test.go b/src/internal/runtime/maps/export_noswiss_test.go
deleted file mode 100644
index 333fc6ce90..0000000000
--- a/src/internal/runtime/maps/export_noswiss_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-// This file allows non-GOEXPERIMENT=swissmap builds (i.e., old map builds) to
-// construct a swissmap table for running the tests in this package.
-
-package maps
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-type instantiatedGroup[K comparable, V any] struct {
- ctrls ctrlGroup
- slots [abi.SwissMapGroupSlots]instantiatedSlot[K, V]
-}
-
-type instantiatedSlot[K comparable, V any] struct {
- key K
- elem V
-}
-
-func newTestMapType[K comparable, V any]() *abi.SwissMapType {
- var m map[K]V
- mTyp := abi.TypeOf(m)
- omt := (*abi.OldMapType)(unsafe.Pointer(mTyp))
-
- var grp instantiatedGroup[K, V]
- var slot instantiatedSlot[K, V]
-
- mt := &abi.SwissMapType{
- Key: omt.Key,
- Elem: omt.Elem,
- Group: abi.TypeOf(grp),
- Hasher: omt.Hasher,
- SlotSize: unsafe.Sizeof(slot),
- GroupSize: unsafe.Sizeof(grp),
- ElemOff: unsafe.Offsetof(slot.elem),
- }
- if omt.NeedKeyUpdate() {
- mt.Flags |= abi.SwissMapNeedKeyUpdate
- }
- if omt.HashMightPanic() {
- mt.Flags |= abi.SwissMapHashMightPanic
- }
- return mt
-}
diff --git a/src/internal/runtime/maps/export_swiss_test.go b/src/internal/runtime/maps/export_swiss_test.go
deleted file mode 100644
index 3c6faf5c48..0000000000
--- a/src/internal/runtime/maps/export_swiss_test.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package maps
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-func newTestMapType[K comparable, V any]() *abi.SwissMapType {
- var m map[K]V
- mTyp := abi.TypeOf(m)
- mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
- return mt
-}
diff --git a/src/internal/runtime/maps/export_test.go b/src/internal/runtime/maps/export_test.go
index 2c7b05ea2d..adce44ba93 100644
--- a/src/internal/runtime/maps/export_test.go
+++ b/src/internal/runtime/maps/export_test.go
@@ -22,7 +22,14 @@ const MaxAvgGroupLoad = maxAvgGroupLoad
// we can't properly test hint alloc overflows with this.
const maxAllocTest = 1 << 30
-func NewTestMap[K comparable, V any](hint uintptr) (*Map, *abi.SwissMapType) {
+func newTestMapType[K comparable, V any]() *abi.MapType {
+ var m map[K]V
+ mTyp := abi.TypeOf(m)
+ mt := (*abi.MapType)(unsafe.Pointer(mTyp))
+ return mt
+}
+
+func NewTestMap[K comparable, V any](hint uintptr) (*Map, *abi.MapType) {
mt := newTestMapType[K, V]()
return NewMap(mt, hint, nil, maxAllocTest), mt
}
@@ -61,7 +68,7 @@ func (m *Map) GroupCount() uint64 {
// Returns nil if there are no full groups.
// Returns nil if a group is full but contains entirely deleted slots.
// Returns nil if the map is small.
-func (m *Map) KeyFromFullGroup(typ *abi.SwissMapType) unsafe.Pointer {
+func (m *Map) KeyFromFullGroup(typ *abi.MapType) unsafe.Pointer {
if m.dirLen <= 0 {
return nil
}
@@ -82,7 +89,7 @@ func (m *Map) KeyFromFullGroup(typ *abi.SwissMapType) unsafe.Pointer {
}
// All full or deleted slots.
- for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
+ for j := uintptr(0); j < abi.MapGroupSlots; j++ {
if g.ctrls().get(j) == ctrlDeleted {
continue
}
@@ -99,7 +106,7 @@ func (m *Map) KeyFromFullGroup(typ *abi.SwissMapType) unsafe.Pointer {
}
// Returns nil if the map is small.
-func (m *Map) TableFor(typ *abi.SwissMapType, key unsafe.Pointer) *table {
+func (m *Map) TableFor(typ *abi.MapType, key unsafe.Pointer) *table {
if m.dirLen <= 0 {
return nil
}
diff --git a/src/internal/runtime/maps/group.go b/src/internal/runtime/maps/group.go
index 5fd87218d1..b23ff76f98 100644
--- a/src/internal/runtime/maps/group.go
+++ b/src/internal/runtime/maps/group.go
@@ -39,7 +39,7 @@ const (
// On other architectures, bitset uses one byte per slot, where each byte is
// either 0x80 if the slot is part of the set or 0x00 otherwise. This makes it
// convenient to calculate for an entire group at once using standard
-// arithemetic instructions.
+// arithmetic instructions.
type bitset uint64
// first returns the relative index of the first control byte in the group that
@@ -122,7 +122,7 @@ func (b bitset) count() int {
// TODO(prattmic): Consider inverting the top bit so that the zero value is empty.
type ctrl uint8
-// ctrlGroup is a fixed size array of abi.SwissMapGroupSlots control bytes
+// ctrlGroup is a fixed size array of abi.MapGroupSlots control bytes
// stored in a uint64.
type ctrlGroup uint64
@@ -157,7 +157,7 @@ func (g ctrlGroup) matchH2(h uintptr) bitset {
// Portable implementation of matchH2.
//
// Note: On AMD64, this is an intrinsic implemented with SIMD instructions. See
-// note on bitset about the packed instrinsified return value.
+// note on bitset about the packed intrinsified return value.
func ctrlGroupMatchH2(g ctrlGroup, h uintptr) bitset {
// NB: This generic matching routine produces false positive matches when
// h is 2^N and the control bytes have a seq of 2^N followed by 2^N+1. For
@@ -179,7 +179,7 @@ func (g ctrlGroup) matchEmpty() bitset {
// Portable implementation of matchEmpty.
//
// Note: On AMD64, this is an intrinsic implemented with SIMD instructions. See
-// note on bitset about the packed instrinsified return value.
+// note on bitset about the packed intrinsified return value.
func ctrlGroupMatchEmpty(g ctrlGroup) bitset {
// An empty slot is 1000 0000
// A deleted slot is 1111 1110
@@ -200,7 +200,7 @@ func (g ctrlGroup) matchEmptyOrDeleted() bitset {
// Portable implementation of matchEmptyOrDeleted.
//
// Note: On AMD64, this is an intrinsic implemented with SIMD instructions. See
-// note on bitset about the packed instrinsified return value.
+// note on bitset about the packed intrinsified return value.
func ctrlGroupMatchEmptyOrDeleted(g ctrlGroup) bitset {
// An empty slot is 1000 0000
// A deleted slot is 1111 1110
@@ -219,7 +219,7 @@ func (g ctrlGroup) matchFull() bitset {
// Portable implementation of matchFull.
//
// Note: On AMD64, this is an intrinsic implemented with SIMD instructions. See
-// note on bitset about the packed instrinsified return value.
+// note on bitset about the packed intrinsified return value.
func ctrlGroupMatchFull(g ctrlGroup) bitset {
// An empty slot is 1000 0000
// A deleted slot is 1111 1110
@@ -233,7 +233,7 @@ func ctrlGroupMatchFull(g ctrlGroup) bitset {
// groupReference is a wrapper type representing a single slot group stored at
// data.
//
-// A group holds abi.SwissMapGroupSlots slots (key/elem pairs) plus their
+// A group holds abi.MapGroupSlots slots (key/elem pairs) plus their
// control word.
type groupReference struct {
// data points to the group, which is described by typ.Group and has
@@ -241,7 +241,7 @@ type groupReference struct {
//
// type group struct {
// ctrls ctrlGroup
- // slots [abi.SwissMapGroupSlots]slot
+ // slots [abi.MapGroupSlots]slot
// }
//
// type slot struct {
@@ -281,14 +281,14 @@ func (g *groupReference) ctrls() *ctrlGroup {
}
// key returns a pointer to the key at index i.
-func (g *groupReference) key(typ *abi.SwissMapType, i uintptr) unsafe.Pointer {
+func (g *groupReference) key(typ *abi.MapType, i uintptr) unsafe.Pointer {
offset := groupSlotsOffset + i*typ.SlotSize
return unsafe.Pointer(uintptr(g.data) + offset)
}
// elem returns a pointer to the element at index i.
-func (g *groupReference) elem(typ *abi.SwissMapType, i uintptr) unsafe.Pointer {
+func (g *groupReference) elem(typ *abi.MapType, i uintptr) unsafe.Pointer {
offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff
return unsafe.Pointer(uintptr(g.data) + offset)
@@ -310,7 +310,7 @@ type groupsReference struct {
// newGroups allocates a new array of length groups.
//
// Length must be a power of two.
-func newGroups(typ *abi.SwissMapType, length uint64) groupsReference {
+func newGroups(typ *abi.MapType, length uint64) groupsReference {
return groupsReference{
// TODO: make the length type the same throughout.
data: newarray(typ.Group, int(length)),
@@ -319,7 +319,7 @@ func newGroups(typ *abi.SwissMapType, length uint64) groupsReference {
}
// group returns the group at index i.
-func (g *groupsReference) group(typ *abi.SwissMapType, i uint64) groupReference {
+func (g *groupsReference) group(typ *abi.MapType, i uint64) groupReference {
// TODO(prattmic): Do something here about truncation on cast to
// uintptr on 32-bit systems?
offset := uintptr(i) * typ.GroupSize
@@ -329,11 +329,11 @@ func (g *groupsReference) group(typ *abi.SwissMapType, i uint64) groupReference
}
}
-func cloneGroup(typ *abi.SwissMapType, newGroup, oldGroup groupReference) {
+func cloneGroup(typ *abi.MapType, newGroup, oldGroup groupReference) {
typedmemmove(typ.Group, newGroup.data, oldGroup.data)
if typ.IndirectKey() {
// Deep copy keys if indirect.
- for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ {
+ for i := uintptr(0); i < abi.MapGroupSlots; i++ {
oldKey := *(*unsafe.Pointer)(oldGroup.key(typ, i))
if oldKey == nil {
continue
@@ -345,7 +345,7 @@ func cloneGroup(typ *abi.SwissMapType, newGroup, oldGroup groupReference) {
}
if typ.IndirectElem() {
// Deep copy elems if indirect.
- for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ {
+ for i := uintptr(0); i < abi.MapGroupSlots; i++ {
oldElem := *(*unsafe.Pointer)(oldGroup.elem(typ, i))
if oldElem == nil {
continue
diff --git a/src/internal/runtime/maps/map.go b/src/internal/runtime/maps/map.go
index 3b9a06239c..865a3f36c2 100644
--- a/src/internal/runtime/maps/map.go
+++ b/src/internal/runtime/maps/map.go
@@ -21,7 +21,7 @@ import (
//
// Terminology:
// - Slot: A storage location of a single key/element pair.
-// - Group: A group of abi.SwissMapGroupSlots (8) slots, plus a control word.
+// - Group: A group of abi.MapGroupSlots (8) slots, plus a control word.
// - Control word: An 8-byte word which denotes whether each slot is empty,
// deleted, or used. If a slot is used, its control byte also contains the
// lower 7 bits of the hash (H2).
@@ -113,7 +113,7 @@ import (
// Note that each table has its own load factor and grows independently. If the
// 1st bucket grows, it will split. We'll need 2 bits to select tables, though
// we'll have 3 tables total rather than 4. We support this by allowing
-// multiple indicies to point to the same table. This example:
+// multiple indices to point to the same table. This example:
//
// directory (globalDepth=2)
// +----+
@@ -191,7 +191,7 @@ func h2(h uintptr) uintptr {
return h & 0x7f
}
-// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map_swiss.go:SwissMapType.
+// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map.go:MapType.
type Map struct {
// The number of filled slots (i.e. the number of elements in all
// tables). Excludes deleted slots.
@@ -212,7 +212,7 @@ type Map struct {
// details.
//
// Small map optimization: if the map always contained
- // abi.SwissMapGroupSlots or fewer entries, it fits entirely in a
+ // abi.MapGroupSlots or fewer entries, it fits entirely in a
// single group. In that case dirPtr points directly to a single group.
//
// dirPtr *group
@@ -257,14 +257,14 @@ func depthToShift(depth uint8) uint8 {
// maxAlloc should be runtime.maxAlloc.
//
// TODO(prattmic): Put maxAlloc somewhere accessible.
-func NewMap(mt *abi.SwissMapType, hint uintptr, m *Map, maxAlloc uintptr) *Map {
+func NewMap(mt *abi.MapType, hint uintptr, m *Map, maxAlloc uintptr) *Map {
if m == nil {
m = new(Map)
}
m.seed = uintptr(rand())
- if hint <= abi.SwissMapGroupSlots {
+ if hint <= abi.MapGroupSlots {
// A small map can fill all 8 slots, so no need to increase
// target capacity.
//
@@ -286,7 +286,7 @@ func NewMap(mt *abi.SwissMapType, hint uintptr, m *Map, maxAlloc uintptr) *Map {
// Set initial capacity to hold hint entries without growing in the
// average case.
- targetCapacity := (hint * abi.SwissMapGroupSlots) / maxAvgGroupLoad
+ targetCapacity := (hint * abi.MapGroupSlots) / maxAvgGroupLoad
if targetCapacity < hint { // overflow
return m // return an empty map.
}
@@ -365,7 +365,7 @@ func (m *Map) installTableSplit(old, left, right *table) {
t := m.directoryAt(uintptr(i))
newDir[2*i] = t
newDir[2*i+1] = t
- // t may already exist in multiple indicies. We should
+ // t may already exist in multiple indices. We should
// only update t.index once. Since the index must
// increase, seeing the original index means this must
// be the first time we've encountered this table.
@@ -380,7 +380,7 @@ func (m *Map) installTableSplit(old, left, right *table) {
m.dirLen = len(newDir)
}
- // N.B. left and right may still consume multiple indicies if the
+ // N.B. left and right may still consume multiple indices if the
// directory has grown multiple times since old was last split.
left.index = old.index
m.replaceTable(left)
@@ -396,11 +396,11 @@ func (m *Map) Used() uint64 {
// Get performs a lookup of the key that key points to. It returns a pointer to
// the element, or false if the key doesn't exist.
-func (m *Map) Get(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func (m *Map) Get(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
return m.getWithoutKey(typ, key)
}
-func (m *Map) getWithKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
+func (m *Map) getWithKey(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
if m.Used() == 0 {
return nil, nil, false
}
@@ -419,7 +419,7 @@ func (m *Map) getWithKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Poin
return m.directoryAt(idx).getWithKey(typ, hash, key)
}
-func (m *Map) getWithoutKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func (m *Map) getWithoutKey(typ *abi.MapType, key unsafe.Pointer) (unsafe.Pointer, bool) {
if m.Used() == 0 {
return nil, false
}
@@ -439,7 +439,7 @@ func (m *Map) getWithoutKey(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.P
return m.directoryAt(idx).getWithoutKey(typ, hash, key)
}
-func (m *Map) getWithKeySmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
+func (m *Map) getWithKeySmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
g := groupReference{
data: m.dirPtr,
}
@@ -470,7 +470,7 @@ func (m *Map) getWithKeySmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Po
return nil, nil, false
}
-func (m *Map) Put(typ *abi.SwissMapType, key, elem unsafe.Pointer) {
+func (m *Map) Put(typ *abi.MapType, key, elem unsafe.Pointer) {
slotElem := m.PutSlot(typ, key)
typedmemmove(typ.Elem, slotElem, elem)
}
@@ -479,7 +479,7 @@ func (m *Map) Put(typ *abi.SwissMapType, key, elem unsafe.Pointer) {
// should be written.
//
// PutSlot never returns nil.
-func (m *Map) PutSlot(typ *abi.SwissMapType, key unsafe.Pointer) unsafe.Pointer {
+func (m *Map) PutSlot(typ *abi.MapType, key unsafe.Pointer) unsafe.Pointer {
if m.writing != 0 {
fatal("concurrent map writes")
}
@@ -495,7 +495,7 @@ func (m *Map) PutSlot(typ *abi.SwissMapType, key unsafe.Pointer) unsafe.Pointer
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmall(typ, hash, key)
if m.writing == 0 {
@@ -529,7 +529,7 @@ func (m *Map) PutSlot(typ *abi.SwissMapType, key unsafe.Pointer) unsafe.Pointer
}
}
-func (m *Map) putSlotSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
+func (m *Map) putSlotSmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@@ -591,7 +591,7 @@ func (m *Map) putSlotSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Point
return slotElem
}
-func (m *Map) growToSmall(typ *abi.SwissMapType) {
+func (m *Map) growToSmall(typ *abi.MapType) {
grp := newGroups(typ, 1)
m.dirPtr = grp.data
@@ -601,14 +601,14 @@ func (m *Map) growToSmall(typ *abi.SwissMapType) {
g.ctrls().setEmpty()
}
-func (m *Map) growToTable(typ *abi.SwissMapType) {
- tab := newTable(typ, 2*abi.SwissMapGroupSlots, 0, 0)
+func (m *Map) growToTable(typ *abi.MapType) {
+ tab := newTable(typ, 2*abi.MapGroupSlots, 0, 0)
g := groupReference{
data: m.dirPtr,
}
- for i := uintptr(0); i < abi.SwissMapGroupSlots; i++ {
+ for i := uintptr(0); i < abi.MapGroupSlots; i++ {
if (g.ctrls().get(i) & ctrlEmpty) == ctrlEmpty {
// Empty
continue
@@ -640,7 +640,7 @@ func (m *Map) growToTable(typ *abi.SwissMapType) {
m.globalShift = depthToShift(m.globalDepth)
}
-func (m *Map) Delete(typ *abi.SwissMapType, key unsafe.Pointer) {
+func (m *Map) Delete(typ *abi.MapType, key unsafe.Pointer) {
if m == nil || m.Used() == 0 {
if err := mapKeyError(typ, key); err != nil {
panic(err) // see issue 23734
@@ -680,7 +680,7 @@ func (m *Map) Delete(typ *abi.SwissMapType, key unsafe.Pointer) {
m.writing ^= 1
}
-func (m *Map) deleteSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) {
+func (m *Map) deleteSmall(typ *abi.MapType, hash uintptr, key unsafe.Pointer) {
g := groupReference{
data: m.dirPtr,
}
@@ -728,7 +728,7 @@ func (m *Map) deleteSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointe
}
// Clear deletes all entries from the map resulting in an empty map.
-func (m *Map) Clear(typ *abi.SwissMapType) {
+func (m *Map) Clear(typ *abi.MapType) {
if m == nil || m.Used() == 0 && !m.tombstonePossible {
return
}
@@ -766,7 +766,7 @@ func (m *Map) Clear(typ *abi.SwissMapType) {
m.writing ^= 1
}
-func (m *Map) clearSmall(typ *abi.SwissMapType) {
+func (m *Map) clearSmall(typ *abi.MapType) {
g := groupReference{
data: m.dirPtr,
}
@@ -777,7 +777,7 @@ func (m *Map) clearSmall(typ *abi.SwissMapType) {
m.used = 0
}
-func (m *Map) Clone(typ *abi.SwissMapType) *Map {
+func (m *Map) Clone(typ *abi.MapType) *Map {
// Note: this should never be called with a nil map.
if m.writing != 0 {
fatal("concurrent map clone and map write")
@@ -814,14 +814,7 @@ func (m *Map) Clone(typ *abi.SwissMapType) *Map {
return m
}
-func OldMapKeyError(t *abi.OldMapType, p unsafe.Pointer) error {
- if !t.HashMightPanic() {
- return nil
- }
- return mapKeyError2(t.Key, p)
-}
-
-func mapKeyError(t *abi.SwissMapType, p unsafe.Pointer) error {
+func mapKeyError(t *abi.MapType, p unsafe.Pointer) error {
if !t.HashMightPanic() {
return nil
}
@@ -859,7 +852,7 @@ func mapKeyError2(t *abi.Type, p unsafe.Pointer) error {
return unhashableTypeError{t}
}
- if t.Kind_&abi.KindDirectIface != 0 {
+ if t.IsDirectIface() {
return mapKeyError2(t, unsafe.Pointer(pdata))
} else {
return mapKeyError2(t, *pdata)
diff --git a/src/internal/runtime/maps/map_swiss_test.go b/src/internal/runtime/maps/map_swiss_test.go
deleted file mode 100644
index eef1c5b191..0000000000
--- a/src/internal/runtime/maps/map_swiss_test.go
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Tests of map internals that need to use the builtin map type, and thus must
-// be built with GOEXPERIMENT=swissmap.
-
-//go:build goexperiment.swissmap
-
-package maps_test
-
-import (
- "fmt"
- "internal/abi"
- "internal/runtime/maps"
- "testing"
- "unsafe"
-)
-
-var alwaysFalse bool
-var escapeSink any
-
-func escape[T any](x T) T {
- if alwaysFalse {
- escapeSink = x
- }
- return x
-}
-
-const (
- belowMax = abi.SwissMapGroupSlots * 3 / 2 // 1.5 * group max = 2 groups @ 75%
- atMax = (2 * abi.SwissMapGroupSlots * maps.MaxAvgGroupLoad) / abi.SwissMapGroupSlots // 2 groups at 7/8 full.
-)
-
-func TestTableGroupCount(t *testing.T) {
- // Test that maps of different sizes have the right number of
- // tables/groups.
-
- type mapCount struct {
- tables int
- groups uint64
- }
-
- type mapCase struct {
- initialLit mapCount
- initialHint mapCount
- after mapCount
- }
-
- var testCases = []struct {
- n int // n is the number of map elements
- escape mapCase // expected values for escaping map
- }{
- {
- n: -(1 << 30),
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{0, 0},
- after: mapCount{0, 0},
- },
- },
- {
- n: -1,
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{0, 0},
- after: mapCount{0, 0},
- },
- },
- {
- n: 0,
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{0, 0},
- after: mapCount{0, 0},
- },
- },
- {
- n: 1,
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{0, 0},
- after: mapCount{0, 1},
- },
- },
- {
- n: abi.SwissMapGroupSlots,
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{0, 0},
- after: mapCount{0, 1},
- },
- },
- {
- n: abi.SwissMapGroupSlots + 1,
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{1, 2},
- after: mapCount{1, 2},
- },
- },
- {
- n: belowMax, // 1.5 group max = 2 groups @ 75%
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{1, 2},
- after: mapCount{1, 2},
- },
- },
- {
- n: atMax, // 2 groups at max
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{1, 2},
- after: mapCount{1, 2},
- },
- },
- {
- n: atMax + 1, // 2 groups at max + 1 -> grow to 4 groups
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{1, 4},
- after: mapCount{1, 4},
- },
- },
- {
- n: 2 * belowMax, // 3 * group max = 4 groups @75%
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{1, 4},
- after: mapCount{1, 4},
- },
- },
- {
- n: 2*atMax + 1, // 4 groups at max + 1 -> grow to 8 groups
- escape: mapCase{
- initialLit: mapCount{0, 0},
- initialHint: mapCount{1, 8},
- after: mapCount{1, 8},
- },
- },
- }
-
- testMap := func(t *testing.T, m map[int]int, n int, initial, after mapCount) {
- mm := *(**maps.Map)(unsafe.Pointer(&m))
-
- gotTab := mm.TableCount()
- if gotTab != initial.tables {
- t.Errorf("initial TableCount got %d want %d", gotTab, initial.tables)
- }
-
- gotGroup := mm.GroupCount()
- if gotGroup != initial.groups {
- t.Errorf("initial GroupCount got %d want %d", gotGroup, initial.groups)
- }
-
- for i := 0; i < n; i++ {
- m[i] = i
- }
-
- gotTab = mm.TableCount()
- if gotTab != after.tables {
- t.Errorf("after TableCount got %d want %d", gotTab, after.tables)
- }
-
- gotGroup = mm.GroupCount()
- if gotGroup != after.groups {
- t.Errorf("after GroupCount got %d want %d", gotGroup, after.groups)
- }
- }
-
- t.Run("mapliteral", func(t *testing.T) {
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
- t.Run("escape", func(t *testing.T) {
- m := escape(map[int]int{})
- testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
- })
- })
- }
- })
- t.Run("nohint", func(t *testing.T) {
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
- t.Run("escape", func(t *testing.T) {
- m := escape(make(map[int]int))
- testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
- })
- })
- }
- })
- t.Run("makemap", func(t *testing.T) {
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
- t.Run("escape", func(t *testing.T) {
- m := escape(make(map[int]int, tc.n))
- testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
- })
- })
- }
- })
- t.Run("makemap64", func(t *testing.T) {
- for _, tc := range testCases {
- t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
- t.Run("escape", func(t *testing.T) {
- m := escape(make(map[int]int, int64(tc.n)))
- testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
- })
- })
- }
- })
-}
-
-func TestTombstoneGrow(t *testing.T) {
- tableSizes := []int{16, 32, 64, 128, 256}
- for _, tableSize := range tableSizes {
- for _, load := range []string{"low", "mid", "high"} {
- capacity := tableSize * 7 / 8
- var initialElems int
- switch load {
- case "low":
- initialElems = capacity / 8
- case "mid":
- initialElems = capacity / 2
- case "high":
- initialElems = capacity
- }
- t.Run(fmt.Sprintf("tableSize=%d/elems=%d/load=%0.3f", tableSize, initialElems, float64(initialElems)/float64(tableSize)), func(t *testing.T) {
- allocs := testing.AllocsPerRun(1, func() {
- // Fill the map with elements.
- m := make(map[int]int, capacity)
- for i := range initialElems {
- m[i] = i
- }
-
- // This is the heart of our test.
- // Loop over the map repeatedly, deleting a key then adding a not-yet-seen key
- // while keeping the map at a ~constant number of elements (+/-1).
- nextKey := initialElems
- for range 100000 {
- for k := range m {
- delete(m, k)
- break
- }
- m[nextKey] = nextKey
- nextKey++
- if len(m) != initialElems {
- t.Fatal("len(m) should remain constant")
- }
- }
- })
-
- // The make has 4 allocs (map, directory, table, groups).
- // Each growth has 2 allocs (table, groups).
- // We allow two growths if we start full, 1 otherwise.
- // Fail (somewhat arbitrarily) if there are more than that.
- allowed := float64(4 + 1*2)
- if initialElems == capacity {
- allowed += 2
- }
- if allocs > allowed {
- t.Fatalf("got %v allocations, allowed %v", allocs, allowed)
- }
- })
- }
- }
-}
diff --git a/src/internal/runtime/maps/map_test.go b/src/internal/runtime/maps/map_test.go
index 160450ebb2..c8ef25af9a 100644
--- a/src/internal/runtime/maps/map_test.go
+++ b/src/internal/runtime/maps/map_test.go
@@ -15,8 +15,8 @@ import (
func TestCtrlSize(t *testing.T) {
cs := unsafe.Sizeof(maps.CtrlGroup(0))
- if cs != abi.SwissMapGroupSlots {
- t.Errorf("ctrlGroup size got %d want abi.SwissMapGroupSlots %d", cs, abi.SwissMapGroupSlots)
+ if cs != abi.MapGroupSlots {
+ t.Errorf("ctrlGroup size got %d want abi.MapGroupSlots %d", cs, abi.MapGroupSlots)
}
}
@@ -630,7 +630,7 @@ func TestMapZeroSizeSlot(t *testing.T) {
}
func TestMapIndirect(t *testing.T) {
- type big [abi.SwissMapMaxKeyBytes + abi.SwissMapMaxElemBytes]byte
+ type big [abi.MapMaxKeyBytes + abi.MapMaxElemBytes]byte
m, typ := maps.NewTestMap[big, big](8)
@@ -699,3 +699,252 @@ func TestMapDeleteClear(t *testing.T) {
t.Errorf("Delete(%d) failed to clear element. got %d want 0", key, gotElem)
}
}
+
+var alwaysFalse bool
+var escapeSink any
+
+func escape[T any](x T) T {
+ if alwaysFalse {
+ escapeSink = x
+ }
+ return x
+}
+
+const (
+ belowMax = abi.MapGroupSlots * 3 / 2 // 1.5 * group max = 2 groups @ 75%
+ atMax = (2 * abi.MapGroupSlots * maps.MaxAvgGroupLoad) / abi.MapGroupSlots // 2 groups at 7/8 full.
+)
+
+func TestTableGroupCount(t *testing.T) {
+ // Test that maps of different sizes have the right number of
+ // tables/groups.
+
+ type mapCount struct {
+ tables int
+ groups uint64
+ }
+
+ type mapCase struct {
+ initialLit mapCount
+ initialHint mapCount
+ after mapCount
+ }
+
+ var testCases = []struct {
+ n int // n is the number of map elements
+ escape mapCase // expected values for escaping map
+ }{
+ {
+ n: -(1 << 30),
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{0, 0},
+ after: mapCount{0, 0},
+ },
+ },
+ {
+ n: -1,
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{0, 0},
+ after: mapCount{0, 0},
+ },
+ },
+ {
+ n: 0,
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{0, 0},
+ after: mapCount{0, 0},
+ },
+ },
+ {
+ n: 1,
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{0, 0},
+ after: mapCount{0, 1},
+ },
+ },
+ {
+ n: abi.MapGroupSlots,
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{0, 0},
+ after: mapCount{0, 1},
+ },
+ },
+ {
+ n: abi.MapGroupSlots + 1,
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{1, 2},
+ after: mapCount{1, 2},
+ },
+ },
+ {
+ n: belowMax, // 1.5 group max = 2 groups @ 75%
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{1, 2},
+ after: mapCount{1, 2},
+ },
+ },
+ {
+ n: atMax, // 2 groups at max
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{1, 2},
+ after: mapCount{1, 2},
+ },
+ },
+ {
+ n: atMax + 1, // 2 groups at max + 1 -> grow to 4 groups
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{1, 4},
+ after: mapCount{1, 4},
+ },
+ },
+ {
+ n: 2 * belowMax, // 3 * group max = 4 groups @75%
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{1, 4},
+ after: mapCount{1, 4},
+ },
+ },
+ {
+ n: 2*atMax + 1, // 4 groups at max + 1 -> grow to 8 groups
+ escape: mapCase{
+ initialLit: mapCount{0, 0},
+ initialHint: mapCount{1, 8},
+ after: mapCount{1, 8},
+ },
+ },
+ }
+
+ testMap := func(t *testing.T, m map[int]int, n int, initial, after mapCount) {
+ mm := *(**maps.Map)(unsafe.Pointer(&m))
+
+ gotTab := mm.TableCount()
+ if gotTab != initial.tables {
+ t.Errorf("initial TableCount got %d want %d", gotTab, initial.tables)
+ }
+
+ gotGroup := mm.GroupCount()
+ if gotGroup != initial.groups {
+ t.Errorf("initial GroupCount got %d want %d", gotGroup, initial.groups)
+ }
+
+ for i := 0; i < n; i++ {
+ m[i] = i
+ }
+
+ gotTab = mm.TableCount()
+ if gotTab != after.tables {
+ t.Errorf("after TableCount got %d want %d", gotTab, after.tables)
+ }
+
+ gotGroup = mm.GroupCount()
+ if gotGroup != after.groups {
+ t.Errorf("after GroupCount got %d want %d", gotGroup, after.groups)
+ }
+ }
+
+ t.Run("mapliteral", func(t *testing.T) {
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
+ t.Run("escape", func(t *testing.T) {
+ m := escape(map[int]int{})
+ testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
+ })
+ })
+ }
+ })
+ t.Run("nohint", func(t *testing.T) {
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
+ t.Run("escape", func(t *testing.T) {
+ m := escape(make(map[int]int))
+ testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after)
+ })
+ })
+ }
+ })
+ t.Run("makemap", func(t *testing.T) {
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
+ t.Run("escape", func(t *testing.T) {
+ m := escape(make(map[int]int, tc.n))
+ testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
+ })
+ })
+ }
+ })
+ t.Run("makemap64", func(t *testing.T) {
+ for _, tc := range testCases {
+ t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) {
+ t.Run("escape", func(t *testing.T) {
+ m := escape(make(map[int]int, int64(tc.n)))
+ testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after)
+ })
+ })
+ }
+ })
+}
+
+func TestTombstoneGrow(t *testing.T) {
+ tableSizes := []int{16, 32, 64, 128, 256}
+ for _, tableSize := range tableSizes {
+ for _, load := range []string{"low", "mid", "high"} {
+ capacity := tableSize * 7 / 8
+ var initialElems int
+ switch load {
+ case "low":
+ initialElems = capacity / 8
+ case "mid":
+ initialElems = capacity / 2
+ case "high":
+ initialElems = capacity
+ }
+ t.Run(fmt.Sprintf("tableSize=%d/elems=%d/load=%0.3f", tableSize, initialElems, float64(initialElems)/float64(tableSize)), func(t *testing.T) {
+ allocs := testing.AllocsPerRun(1, func() {
+ // Fill the map with elements.
+ m := make(map[int]int, capacity)
+ for i := range initialElems {
+ m[i] = i
+ }
+
+ // This is the heart of our test.
+ // Loop over the map repeatedly, deleting a key then adding a not-yet-seen key
+ // while keeping the map at a ~constant number of elements (+/-1).
+ nextKey := initialElems
+ for range 100000 {
+ for k := range m {
+ delete(m, k)
+ break
+ }
+ m[nextKey] = nextKey
+ nextKey++
+ if len(m) != initialElems {
+ t.Fatal("len(m) should remain constant")
+ }
+ }
+ })
+
+ // The make has 4 allocs (map, directory, table, groups).
+ // Each growth has 2 allocs (table, groups).
+ // We allow two growths if we start full, 1 otherwise.
+ // Fail (somewhat arbitrarily) if there are more than that.
+ allowed := float64(4 + 1*2)
+ if initialElems == capacity {
+ allowed += 2
+ }
+ if allocs > allowed {
+ t.Fatalf("got %v allocations, allowed %v", allocs, allowed)
+ }
+ })
+ }
+ }
+}
diff --git a/src/internal/runtime/maps/runtime.go b/src/internal/runtime/maps/runtime.go
index 3d06f54f4d..ff8a748249 100644
--- a/src/internal/runtime/maps/runtime.go
+++ b/src/internal/runtime/maps/runtime.go
@@ -6,6 +6,10 @@ package maps
import (
"internal/abi"
+ "internal/asan"
+ "internal/msan"
+ "internal/race"
+ "internal/runtime/sys"
"unsafe"
)
@@ -28,3 +32,337 @@ func newarray(typ *abi.Type, n int) unsafe.Pointer
//go:linkname newobject
func newobject(typ *abi.Type) unsafe.Pointer
+
+// Pushed from runtime in order to use runtime.plainError
+//
+//go:linkname errNilAssign
+var errNilAssign error
+
+// Pull from runtime. It is important that is this the exact same copy as the
+// runtime because runtime.mapaccess1_fat compares the returned pointer with
+// &runtime.zeroVal[0].
+// TODO: move zeroVal to internal/abi?
+//
+//go:linkname zeroVal runtime.zeroVal
+var zeroVal [abi.ZeroValSize]byte
+
+// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
+// it will return a reference to the zero object for the elem type if
+// the key is not in the map.
+// NOTE: The returned pointer may keep the whole map live, so don't
+// hold onto it for very long.
+//
+//go:linkname runtime_mapaccess1 runtime.mapaccess1
+func runtime_mapaccess1(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
+ if race.Enabled && m != nil {
+ callerpc := sys.GetCallerPC()
+ pc := abi.FuncPCABIInternal(runtime_mapaccess1)
+ race.ReadPC(unsafe.Pointer(m), callerpc, pc)
+ race.ReadObjectPC(typ.Key, key, callerpc, pc)
+ }
+ if msan.Enabled && m != nil {
+ msan.Read(key, typ.Key.Size_)
+ }
+ if asan.Enabled && m != nil {
+ asan.Read(key, typ.Key.Size_)
+ }
+
+ if m == nil || m.Used() == 0 {
+ if err := mapKeyError(typ, key); err != nil {
+ panic(err) // see issue 23734
+ }
+ return unsafe.Pointer(&zeroVal[0])
+ }
+
+ if m.writing != 0 {
+ fatal("concurrent map read and map write")
+ }
+
+ hash := typ.Hasher(key, m.seed)
+
+ if m.dirLen <= 0 {
+ _, elem, ok := m.getWithKeySmall(typ, hash, key)
+ if !ok {
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ return elem
+ }
+
+ // Select table.
+ idx := m.directoryIndex(hash)
+ t := m.directoryAt(idx)
+
+ // Probe table.
+ seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ for ; ; seq = seq.next() {
+ g := t.groups.group(typ, seq.offset)
+
+ match := g.ctrls().matchH2(h2(hash))
+
+ for match != 0 {
+ i := match.first()
+
+ slotKey := g.key(typ, i)
+ slotKeyOrig := slotKey
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
+ if typ.Key.Equal(key, slotKey) {
+ slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+ if typ.IndirectElem() {
+ slotElem = *((*unsafe.Pointer)(slotElem))
+ }
+ return slotElem
+ }
+ match = match.removeFirst()
+ }
+
+ match = g.ctrls().matchEmpty()
+ if match != 0 {
+ // Finding an empty slot means we've reached the end of
+ // the probe sequence.
+ return unsafe.Pointer(&zeroVal[0])
+ }
+ }
+}
+
+//go:linkname runtime_mapaccess2 runtime.mapaccess2
+func runtime_mapaccess2(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
+ if race.Enabled && m != nil {
+ callerpc := sys.GetCallerPC()
+ pc := abi.FuncPCABIInternal(runtime_mapaccess1)
+ race.ReadPC(unsafe.Pointer(m), callerpc, pc)
+ race.ReadObjectPC(typ.Key, key, callerpc, pc)
+ }
+ if msan.Enabled && m != nil {
+ msan.Read(key, typ.Key.Size_)
+ }
+ if asan.Enabled && m != nil {
+ asan.Read(key, typ.Key.Size_)
+ }
+
+ if m == nil || m.Used() == 0 {
+ if err := mapKeyError(typ, key); err != nil {
+ panic(err) // see issue 23734
+ }
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+
+ if m.writing != 0 {
+ fatal("concurrent map read and map write")
+ }
+
+ hash := typ.Hasher(key, m.seed)
+
+ if m.dirLen == 0 {
+ _, elem, ok := m.getWithKeySmall(typ, hash, key)
+ if !ok {
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ return elem, true
+ }
+
+ // Select table.
+ idx := m.directoryIndex(hash)
+ t := m.directoryAt(idx)
+
+ // Probe table.
+ seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+ for ; ; seq = seq.next() {
+ g := t.groups.group(typ, seq.offset)
+
+ match := g.ctrls().matchH2(h2(hash))
+
+ for match != 0 {
+ i := match.first()
+
+ slotKey := g.key(typ, i)
+ slotKeyOrig := slotKey
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
+ if typ.Key.Equal(key, slotKey) {
+ slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+ if typ.IndirectElem() {
+ slotElem = *((*unsafe.Pointer)(slotElem))
+ }
+ return slotElem, true
+ }
+ match = match.removeFirst()
+ }
+
+ match = g.ctrls().matchEmpty()
+ if match != 0 {
+ // Finding an empty slot means we've reached the end of
+ // the probe sequence.
+ return unsafe.Pointer(&zeroVal[0]), false
+ }
+ }
+}
+
+//go:linkname runtime_mapassign runtime.mapassign
+func runtime_mapassign(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
+ if m == nil {
+ panic(errNilAssign)
+ }
+ if race.Enabled {
+ callerpc := sys.GetCallerPC()
+ pc := abi.FuncPCABIInternal(runtime_mapassign)
+ race.WritePC(unsafe.Pointer(m), callerpc, pc)
+ race.ReadObjectPC(typ.Key, key, callerpc, pc)
+ }
+ if msan.Enabled {
+ msan.Read(key, typ.Key.Size_)
+ }
+ if asan.Enabled {
+ asan.Read(key, typ.Key.Size_)
+ }
+ if m.writing != 0 {
+ fatal("concurrent map writes")
+ }
+
+ hash := typ.Hasher(key, m.seed)
+
+ // Set writing after calling Hasher, since Hasher may panic, in which
+ // case we have not actually done a write.
+ m.writing ^= 1 // toggle, see comment on writing
+
+ if m.dirPtr == nil {
+ m.growToSmall(typ)
+ }
+
+ if m.dirLen == 0 {
+ if m.used < abi.MapGroupSlots {
+ elem := m.putSlotSmall(typ, hash, key)
+
+ if m.writing == 0 {
+ fatal("concurrent map writes")
+ }
+ m.writing ^= 1
+
+ return elem
+ }
+
+ // Can't fit another entry, grow to full size map.
+ m.growToTable(typ)
+ }
+
+ var slotElem unsafe.Pointer
+outer:
+ for {
+ // Select table.
+ idx := m.directoryIndex(hash)
+ t := m.directoryAt(idx)
+
+ seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
+
+ // As we look for a match, keep track of the first deleted slot
+ // we find, which we'll use to insert the new entry if
+ // necessary.
+ var firstDeletedGroup groupReference
+ var firstDeletedSlot uintptr
+
+ for ; ; seq = seq.next() {
+ g := t.groups.group(typ, seq.offset)
+ match := g.ctrls().matchH2(h2(hash))
+
+ // Look for an existing slot containing this key.
+ for match != 0 {
+ i := match.first()
+
+ slotKey := g.key(typ, i)
+ slotKeyOrig := slotKey
+ if typ.IndirectKey() {
+ slotKey = *((*unsafe.Pointer)(slotKey))
+ }
+ if typ.Key.Equal(key, slotKey) {
+ if typ.NeedKeyUpdate() {
+ typedmemmove(typ.Key, slotKey, key)
+ }
+
+ slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+ if typ.IndirectElem() {
+ slotElem = *((*unsafe.Pointer)(slotElem))
+ }
+
+ t.checkInvariants(typ, m)
+ break outer
+ }
+ match = match.removeFirst()
+ }
+
+ // No existing slot for this key in this group. Is this the end
+ // of the probe sequence?
+ match = g.ctrls().matchEmpty()
+ if match != 0 {
+ // Finding an empty slot means we've reached the end of
+ // the probe sequence.
+
+ var i uintptr
+
+ // If we found a deleted slot along the way, we
+ // can replace it without consuming growthLeft.
+ if firstDeletedGroup.data != nil {
+ g = firstDeletedGroup
+ i = firstDeletedSlot
+ t.growthLeft++ // will be decremented below to become a no-op.
+ } else {
+ // Otherwise, use the empty slot.
+ i = match.first()
+ }
+
+ // If there is room left to grow, just insert the new entry.
+ if t.growthLeft > 0 {
+ slotKey := g.key(typ, i)
+ slotKeyOrig := slotKey
+ if typ.IndirectKey() {
+ kmem := newobject(typ.Key)
+ *(*unsafe.Pointer)(slotKey) = kmem
+ slotKey = kmem
+ }
+ typedmemmove(typ.Key, slotKey, key)
+
+ slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
+ if typ.IndirectElem() {
+ emem := newobject(typ.Elem)
+ *(*unsafe.Pointer)(slotElem) = emem
+ slotElem = emem
+ }
+
+ g.ctrls().set(i, ctrl(h2(hash)))
+ t.growthLeft--
+ t.used++
+ m.used++
+
+ t.checkInvariants(typ, m)
+ break outer
+ }
+
+ t.rehash(typ, m)
+ continue outer
+ }
+
+ // No empty slots in this group. Check for a deleted
+ // slot, which we'll use if we don't find a match later
+ // in the probe sequence.
+ //
+ // We only need to remember a single deleted slot.
+ if firstDeletedGroup.data == nil {
+ // Since we already checked for empty slots
+ // above, matches here must be deleted slots.
+ match = g.ctrls().matchEmptyOrDeleted()
+ if match != 0 {
+ firstDeletedGroup = g
+ firstDeletedSlot = match.first()
+ }
+ }
+ }
+ }
+
+ if m.writing == 0 {
+ fatal("concurrent map writes")
+ }
+ m.writing ^= 1
+
+ return slotElem
+}
diff --git a/src/internal/runtime/maps/runtime_fast32_swiss.go b/src/internal/runtime/maps/runtime_fast32.go
index d57d042527..beed67ce28 100644
--- a/src/internal/runtime/maps/runtime_fast32_swiss.go
+++ b/src/internal/runtime/maps/runtime_fast32.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.swissmap
-
package maps
import (
@@ -14,7 +12,7 @@ import (
)
//go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32
-func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
+func runtime_mapaccess1_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast32)
@@ -83,7 +81,7 @@ func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe
}
//go:linkname runtime_mapaccess2_fast32 runtime.mapaccess2_fast32
-func runtime_mapaccess2_fast32(typ *abi.SwissMapType, m *Map, key uint32) (unsafe.Pointer, bool) {
+func runtime_mapaccess2_fast32(typ *abi.MapType, m *Map, key uint32) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast32)
@@ -151,7 +149,7 @@ func runtime_mapaccess2_fast32(typ *abi.SwissMapType, m *Map, key uint32) (unsaf
}
}
-func (m *Map) putSlotSmallFast32(typ *abi.SwissMapType, hash uintptr, key uint32) unsafe.Pointer {
+func (m *Map) putSlotSmallFast32(typ *abi.MapType, hash uintptr, key uint32) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@@ -192,7 +190,7 @@ func (m *Map) putSlotSmallFast32(typ *abi.SwissMapType, hash uintptr, key uint32
}
//go:linkname runtime_mapassign_fast32 runtime.mapassign_fast32
-func runtime_mapassign_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
+func runtime_mapassign_fast32(typ *abi.MapType, m *Map, key uint32) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@@ -217,7 +215,7 @@ func runtime_mapassign_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFast32(typ, hash, key)
if m.writing == 0 {
@@ -331,7 +329,7 @@ outer:
// TODO(prattmic): With some compiler refactoring we could avoid duplication of this function.
//
//go:linkname runtime_mapassign_fast32ptr runtime.mapassign_fast32ptr
-func runtime_mapassign_fast32ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
+func runtime_mapassign_fast32ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@@ -356,7 +354,7 @@ func runtime_mapassign_fast32ptr(typ *abi.SwissMapType, m *Map, key unsafe.Point
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFastPtr(typ, hash, key)
if m.writing == 0 {
@@ -460,7 +458,7 @@ outer:
}
//go:linkname runtime_mapdelete_fast32 runtime.mapdelete_fast32
-func runtime_mapdelete_fast32(typ *abi.SwissMapType, m *Map, key uint32) {
+func runtime_mapdelete_fast32(typ *abi.MapType, m *Map, key uint32) {
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapdelete_fast32)
diff --git a/src/internal/runtime/maps/runtime_fast64_swiss.go b/src/internal/runtime/maps/runtime_fast64.go
index 461cb1d318..2f9cf28daa 100644
--- a/src/internal/runtime/maps/runtime_fast64_swiss.go
+++ b/src/internal/runtime/maps/runtime_fast64.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.swissmap
-
package maps
import (
@@ -14,7 +12,7 @@ import (
)
//go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64
-func runtime_mapaccess1_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer {
+func runtime_mapaccess1_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast64)
@@ -83,7 +81,7 @@ func runtime_mapaccess1_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe
}
//go:linkname runtime_mapaccess2_fast64 runtime.mapaccess2_fast64
-func runtime_mapaccess2_fast64(typ *abi.SwissMapType, m *Map, key uint64) (unsafe.Pointer, bool) {
+func runtime_mapaccess2_fast64(typ *abi.MapType, m *Map, key uint64) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast64)
@@ -151,7 +149,7 @@ func runtime_mapaccess2_fast64(typ *abi.SwissMapType, m *Map, key uint64) (unsaf
}
}
-func (m *Map) putSlotSmallFast64(typ *abi.SwissMapType, hash uintptr, key uint64) unsafe.Pointer {
+func (m *Map) putSlotSmallFast64(typ *abi.MapType, hash uintptr, key uint64) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@@ -192,7 +190,7 @@ func (m *Map) putSlotSmallFast64(typ *abi.SwissMapType, hash uintptr, key uint64
}
//go:linkname runtime_mapassign_fast64 runtime.mapassign_fast64
-func runtime_mapassign_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer {
+func runtime_mapassign_fast64(typ *abi.MapType, m *Map, key uint64) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@@ -217,7 +215,7 @@ func runtime_mapassign_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFast64(typ, hash, key)
if m.writing == 0 {
@@ -326,7 +324,7 @@ outer:
return slotElem
}
-func (m *Map) putSlotSmallFastPtr(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
+func (m *Map) putSlotSmallFastPtr(typ *abi.MapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@@ -369,7 +367,7 @@ func (m *Map) putSlotSmallFastPtr(typ *abi.SwissMapType, hash uintptr, key unsaf
// Key is a 64-bit pointer (only called on 64-bit GOARCH).
//
//go:linkname runtime_mapassign_fast64ptr runtime.mapassign_fast64ptr
-func runtime_mapassign_fast64ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
+func runtime_mapassign_fast64ptr(typ *abi.MapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@@ -394,7 +392,7 @@ func runtime_mapassign_fast64ptr(typ *abi.SwissMapType, m *Map, key unsafe.Point
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFastPtr(typ, hash, key)
if m.writing == 0 {
@@ -499,7 +497,7 @@ outer:
}
//go:linkname runtime_mapdelete_fast64 runtime.mapdelete_fast64
-func runtime_mapdelete_fast64(typ *abi.SwissMapType, m *Map, key uint64) {
+func runtime_mapdelete_fast64(typ *abi.MapType, m *Map, key uint64) {
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapdelete_fast64)
diff --git a/src/internal/runtime/maps/runtime_faststr_swiss.go b/src/internal/runtime/maps/runtime_faststr.go
index 0d7b02e20c..ddac7eacc5 100644
--- a/src/internal/runtime/maps/runtime_faststr_swiss.go
+++ b/src/internal/runtime/maps/runtime_faststr.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.swissmap
-
package maps
import (
@@ -14,7 +12,7 @@ import (
"unsafe"
)
-func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsafe.Pointer {
+func (m *Map) getWithoutKeySmallFastStr(typ *abi.MapType, key string) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@@ -29,10 +27,10 @@ func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsaf
// for strings that are long enough that hashing is expensive.
if len(key) > 64 {
// String hashing and equality might be expensive. Do a quick check first.
- j := abi.SwissMapGroupSlots
- for i := range abi.SwissMapGroupSlots {
+ j := abi.MapGroupSlots
+ for i := range abi.MapGroupSlots {
if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) {
- if j < abi.SwissMapGroupSlots {
+ if j < abi.MapGroupSlots {
// 2 strings both passed the quick equality test.
// Break out of this loop and do it the slow way.
goto dohash
@@ -42,7 +40,7 @@ func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsaf
slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize)
ctrls >>= 8
}
- if j == abi.SwissMapGroupSlots {
+ if j == abi.MapGroupSlots {
// No slot passed the quick test.
return nil
}
@@ -61,7 +59,7 @@ dohash:
ctrls = *g.ctrls()
slotKey = g.key(typ, 0)
- for range abi.SwissMapGroupSlots {
+ for range abi.MapGroupSlots {
if uint8(ctrls) == h2 && key == *(*string)(slotKey) {
return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize)
}
@@ -100,7 +98,7 @@ func stringPtr(s string) unsafe.Pointer {
}
//go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr
-func runtime_mapaccess1_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer {
+func runtime_mapaccess1_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess1_faststr)
@@ -159,7 +157,7 @@ func runtime_mapaccess1_faststr(typ *abi.SwissMapType, m *Map, key string) unsaf
}
//go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr
-func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsafe.Pointer, bool) {
+func runtime_mapaccess2_faststr(typ *abi.MapType, m *Map, key string) (unsafe.Pointer, bool) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapaccess2_faststr)
@@ -217,7 +215,7 @@ func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsa
}
}
-func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key string) unsafe.Pointer {
+func (m *Map) putSlotSmallFastStr(typ *abi.MapType, hash uintptr, key string) unsafe.Pointer {
g := groupReference{
data: m.dirPtr,
}
@@ -260,7 +258,7 @@ func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key strin
}
//go:linkname runtime_mapassign_faststr runtime.mapassign_faststr
-func runtime_mapassign_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer {
+func runtime_mapassign_faststr(typ *abi.MapType, m *Map, key string) unsafe.Pointer {
if m == nil {
panic(errNilAssign)
}
@@ -285,7 +283,7 @@ func runtime_mapassign_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe
}
if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
+ if m.used < abi.MapGroupSlots {
elem := m.putSlotSmallFastStr(typ, hash, key)
if m.writing == 0 {
@@ -398,7 +396,7 @@ outer:
}
//go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr
-func runtime_mapdelete_faststr(typ *abi.SwissMapType, m *Map, key string) {
+func runtime_mapdelete_faststr(typ *abi.MapType, m *Map, key string) {
if race.Enabled {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(runtime_mapdelete_faststr)
diff --git a/src/internal/runtime/maps/runtime_swiss.go b/src/internal/runtime/maps/runtime_swiss.go
deleted file mode 100644
index 3ea018185b..0000000000
--- a/src/internal/runtime/maps/runtime_swiss.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package maps
-
-import (
- "internal/abi"
- "internal/asan"
- "internal/msan"
- "internal/race"
- "internal/runtime/sys"
- "unsafe"
-)
-
-// Functions below pushed from runtime.
-
-// Pushed from runtime in order to use runtime.plainError
-//
-//go:linkname errNilAssign
-var errNilAssign error
-
-// Pull from runtime. It is important that is this the exact same copy as the
-// runtime because runtime.mapaccess1_fat compares the returned pointer with
-// &runtime.zeroVal[0].
-// TODO: move zeroVal to internal/abi?
-//
-//go:linkname zeroVal runtime.zeroVal
-var zeroVal [abi.ZeroValSize]byte
-
-// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
-// it will return a reference to the zero object for the elem type if
-// the key is not in the map.
-// NOTE: The returned pointer may keep the whole map live, so don't
-// hold onto it for very long.
-//
-//go:linkname runtime_mapaccess1 runtime.mapaccess1
-func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
- if race.Enabled && m != nil {
- callerpc := sys.GetCallerPC()
- pc := abi.FuncPCABIInternal(runtime_mapaccess1)
- race.ReadPC(unsafe.Pointer(m), callerpc, pc)
- race.ReadObjectPC(typ.Key, key, callerpc, pc)
- }
- if msan.Enabled && m != nil {
- msan.Read(key, typ.Key.Size_)
- }
- if asan.Enabled && m != nil {
- asan.Read(key, typ.Key.Size_)
- }
-
- if m == nil || m.Used() == 0 {
- if err := mapKeyError(typ, key); err != nil {
- panic(err) // see issue 23734
- }
- return unsafe.Pointer(&zeroVal[0])
- }
-
- if m.writing != 0 {
- fatal("concurrent map read and map write")
- }
-
- hash := typ.Hasher(key, m.seed)
-
- if m.dirLen <= 0 {
- _, elem, ok := m.getWithKeySmall(typ, hash, key)
- if !ok {
- return unsafe.Pointer(&zeroVal[0])
- }
- return elem
- }
-
- // Select table.
- idx := m.directoryIndex(hash)
- t := m.directoryAt(idx)
-
- // Probe table.
- seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
- for ; ; seq = seq.next() {
- g := t.groups.group(typ, seq.offset)
-
- match := g.ctrls().matchH2(h2(hash))
-
- for match != 0 {
- i := match.first()
-
- slotKey := g.key(typ, i)
- slotKeyOrig := slotKey
- if typ.IndirectKey() {
- slotKey = *((*unsafe.Pointer)(slotKey))
- }
- if typ.Key.Equal(key, slotKey) {
- slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
- if typ.IndirectElem() {
- slotElem = *((*unsafe.Pointer)(slotElem))
- }
- return slotElem
- }
- match = match.removeFirst()
- }
-
- match = g.ctrls().matchEmpty()
- if match != 0 {
- // Finding an empty slot means we've reached the end of
- // the probe sequence.
- return unsafe.Pointer(&zeroVal[0])
- }
- }
-}
-
-//go:linkname runtime_mapaccess2 runtime.mapaccess2
-func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
- if race.Enabled && m != nil {
- callerpc := sys.GetCallerPC()
- pc := abi.FuncPCABIInternal(runtime_mapaccess1)
- race.ReadPC(unsafe.Pointer(m), callerpc, pc)
- race.ReadObjectPC(typ.Key, key, callerpc, pc)
- }
- if msan.Enabled && m != nil {
- msan.Read(key, typ.Key.Size_)
- }
- if asan.Enabled && m != nil {
- asan.Read(key, typ.Key.Size_)
- }
-
- if m == nil || m.Used() == 0 {
- if err := mapKeyError(typ, key); err != nil {
- panic(err) // see issue 23734
- }
- return unsafe.Pointer(&zeroVal[0]), false
- }
-
- if m.writing != 0 {
- fatal("concurrent map read and map write")
- }
-
- hash := typ.Hasher(key, m.seed)
-
- if m.dirLen == 0 {
- _, elem, ok := m.getWithKeySmall(typ, hash, key)
- if !ok {
- return unsafe.Pointer(&zeroVal[0]), false
- }
- return elem, true
- }
-
- // Select table.
- idx := m.directoryIndex(hash)
- t := m.directoryAt(idx)
-
- // Probe table.
- seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
- for ; ; seq = seq.next() {
- g := t.groups.group(typ, seq.offset)
-
- match := g.ctrls().matchH2(h2(hash))
-
- for match != 0 {
- i := match.first()
-
- slotKey := g.key(typ, i)
- slotKeyOrig := slotKey
- if typ.IndirectKey() {
- slotKey = *((*unsafe.Pointer)(slotKey))
- }
- if typ.Key.Equal(key, slotKey) {
- slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
- if typ.IndirectElem() {
- slotElem = *((*unsafe.Pointer)(slotElem))
- }
- return slotElem, true
- }
- match = match.removeFirst()
- }
-
- match = g.ctrls().matchEmpty()
- if match != 0 {
- // Finding an empty slot means we've reached the end of
- // the probe sequence.
- return unsafe.Pointer(&zeroVal[0]), false
- }
- }
-}
-
-//go:linkname runtime_mapassign runtime.mapassign
-func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer {
- if m == nil {
- panic(errNilAssign)
- }
- if race.Enabled {
- callerpc := sys.GetCallerPC()
- pc := abi.FuncPCABIInternal(runtime_mapassign)
- race.WritePC(unsafe.Pointer(m), callerpc, pc)
- race.ReadObjectPC(typ.Key, key, callerpc, pc)
- }
- if msan.Enabled {
- msan.Read(key, typ.Key.Size_)
- }
- if asan.Enabled {
- asan.Read(key, typ.Key.Size_)
- }
- if m.writing != 0 {
- fatal("concurrent map writes")
- }
-
- hash := typ.Hasher(key, m.seed)
-
- // Set writing after calling Hasher, since Hasher may panic, in which
- // case we have not actually done a write.
- m.writing ^= 1 // toggle, see comment on writing
-
- if m.dirPtr == nil {
- m.growToSmall(typ)
- }
-
- if m.dirLen == 0 {
- if m.used < abi.SwissMapGroupSlots {
- elem := m.putSlotSmall(typ, hash, key)
-
- if m.writing == 0 {
- fatal("concurrent map writes")
- }
- m.writing ^= 1
-
- return elem
- }
-
- // Can't fit another entry, grow to full size map.
- m.growToTable(typ)
- }
-
- var slotElem unsafe.Pointer
-outer:
- for {
- // Select table.
- idx := m.directoryIndex(hash)
- t := m.directoryAt(idx)
-
- seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
-
- // As we look for a match, keep track of the first deleted slot
- // we find, which we'll use to insert the new entry if
- // necessary.
- var firstDeletedGroup groupReference
- var firstDeletedSlot uintptr
-
- for ; ; seq = seq.next() {
- g := t.groups.group(typ, seq.offset)
- match := g.ctrls().matchH2(h2(hash))
-
- // Look for an existing slot containing this key.
- for match != 0 {
- i := match.first()
-
- slotKey := g.key(typ, i)
- slotKeyOrig := slotKey
- if typ.IndirectKey() {
- slotKey = *((*unsafe.Pointer)(slotKey))
- }
- if typ.Key.Equal(key, slotKey) {
- if typ.NeedKeyUpdate() {
- typedmemmove(typ.Key, slotKey, key)
- }
-
- slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
- if typ.IndirectElem() {
- slotElem = *((*unsafe.Pointer)(slotElem))
- }
-
- t.checkInvariants(typ, m)
- break outer
- }
- match = match.removeFirst()
- }
-
- // No existing slot for this key in this group. Is this the end
- // of the probe sequence?
- match = g.ctrls().matchEmpty()
- if match != 0 {
- // Finding an empty slot means we've reached the end of
- // the probe sequence.
-
- var i uintptr
-
- // If we found a deleted slot along the way, we
- // can replace it without consuming growthLeft.
- if firstDeletedGroup.data != nil {
- g = firstDeletedGroup
- i = firstDeletedSlot
- t.growthLeft++ // will be decremented below to become a no-op.
- } else {
- // Otherwise, use the empty slot.
- i = match.first()
- }
-
- // If there is room left to grow, just insert the new entry.
- if t.growthLeft > 0 {
- slotKey := g.key(typ, i)
- slotKeyOrig := slotKey
- if typ.IndirectKey() {
- kmem := newobject(typ.Key)
- *(*unsafe.Pointer)(slotKey) = kmem
- slotKey = kmem
- }
- typedmemmove(typ.Key, slotKey, key)
-
- slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff)
- if typ.IndirectElem() {
- emem := newobject(typ.Elem)
- *(*unsafe.Pointer)(slotElem) = emem
- slotElem = emem
- }
-
- g.ctrls().set(i, ctrl(h2(hash)))
- t.growthLeft--
- t.used++
- m.used++
-
- t.checkInvariants(typ, m)
- break outer
- }
-
- t.rehash(typ, m)
- continue outer
- }
-
- // No empty slots in this group. Check for a deleted
- // slot, which we'll use if we don't find a match later
- // in the probe sequence.
- //
- // We only need to remember a single deleted slot.
- if firstDeletedGroup.data == nil {
- // Since we already checked for empty slots
- // above, matches here must be deleted slots.
- match = g.ctrls().matchEmptyOrDeleted()
- if match != 0 {
- firstDeletedGroup = g
- firstDeletedSlot = match.first()
- }
- }
- }
- }
-
- if m.writing == 0 {
- fatal("concurrent map writes")
- }
- m.writing ^= 1
-
- return slotElem
-}
diff --git a/src/internal/runtime/maps/table.go b/src/internal/runtime/maps/table.go
index 88f87187fe..d4b9276b57 100644
--- a/src/internal/runtime/maps/table.go
+++ b/src/internal/runtime/maps/table.go
@@ -36,7 +36,7 @@ type table struct {
used uint16
// The total number of slots (always 2^N). Equal to
- // `(groups.lengthMask+1)*abi.SwissMapGroupSlots`.
+ // `(groups.lengthMask+1)*abi.MapGroupSlots`.
capacity uint16
// The number of slots we can still fill without needing to rehash.
@@ -53,13 +53,13 @@ type table struct {
// Index of this table in the Map directory. This is the index of the
// _first_ location in the directory. The table may occur in multiple
- // sequential indicies.
+ // sequential indices.
//
// index is -1 if the table is stale (no longer installed in the
// directory).
index int
- // groups is an array of slot groups. Each group holds abi.SwissMapGroupSlots
+ // groups is an array of slot groups. Each group holds abi.MapGroupSlots
// key/elem slots and their control bytes. A table has a fixed size
// groups array. The table is replaced (in rehash) when more space is
// required.
@@ -71,9 +71,9 @@ type table struct {
groups groupsReference
}
-func newTable(typ *abi.SwissMapType, capacity uint64, index int, localDepth uint8) *table {
- if capacity < abi.SwissMapGroupSlots {
- capacity = abi.SwissMapGroupSlots
+func newTable(typ *abi.MapType, capacity uint64, index int, localDepth uint8) *table {
+ if capacity < abi.MapGroupSlots {
+ capacity = abi.MapGroupSlots
}
t := &table{
@@ -99,8 +99,8 @@ func newTable(typ *abi.SwissMapType, capacity uint64, index int, localDepth uint
// reset resets the table with new, empty groups with the specified new total
// capacity.
-func (t *table) reset(typ *abi.SwissMapType, capacity uint16) {
- groupCount := uint64(capacity) / abi.SwissMapGroupSlots
+func (t *table) reset(typ *abi.MapType, capacity uint16) {
+ groupCount := uint64(capacity) / abi.MapGroupSlots
t.groups = newGroups(typ, groupCount)
t.capacity = capacity
t.growthLeft = t.maxGrowthLeft()
@@ -118,7 +118,7 @@ func (t *table) maxGrowthLeft() uint16 {
// No real reason to support zero capacity table, since an
// empty Map simply won't have a table.
panic("table must have positive capacity")
- } else if t.capacity <= abi.SwissMapGroupSlots {
+ } else if t.capacity <= abi.MapGroupSlots {
// If the map fits in a single group then we're able to fill all of
// the slots except 1 (an empty slot is needed to terminate find
// operations).
@@ -131,7 +131,7 @@ func (t *table) maxGrowthLeft() uint16 {
// TODO(prattmic): Do something cleaner.
panic("overflow")
}
- return (t.capacity * maxAvgGroupLoad) / abi.SwissMapGroupSlots
+ return (t.capacity * maxAvgGroupLoad) / abi.MapGroupSlots
}
}
@@ -142,7 +142,7 @@ func (t *table) Used() uint64 {
// Get performs a lookup of the key that key points to. It returns a pointer to
// the element, or false if the key doesn't exist.
-func (t *table) Get(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func (t *table) Get(typ *abi.MapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) {
// TODO(prattmic): We could avoid hashing in a variety of special
// cases.
//
@@ -163,7 +163,7 @@ func (t *table) Get(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.P
// expose updated elements. For NeedsKeyUpdate keys, iteration also must return
// the new key value, not the old key value.
// hash must be the hash of the key.
-func (t *table) getWithKey(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
+func (t *table) getWithKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) {
// To find the location of a key in the table, we compute hash(key). From
// h1(hash(key)) and the capacity, we construct a probeSeq that visits
// every group of slots in some interesting order. See [probeSeq].
@@ -223,7 +223,7 @@ func (t *table) getWithKey(typ *abi.SwissMapType, hash uintptr, key unsafe.Point
}
}
-func (t *table) getWithoutKey(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func (t *table) getWithoutKey(typ *abi.MapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
@@ -263,7 +263,7 @@ func (t *table) getWithoutKey(typ *abi.SwissMapType, hash uintptr, key unsafe.Po
// the new table.
//
// hash must be the hash of key.
-func (t *table) PutSlot(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
+func (t *table) PutSlot(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, bool) {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
// As we look for a match, keep track of the first deleted slot we
@@ -378,7 +378,7 @@ func (t *table) PutSlot(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.
// requires the caller to ensure that the referenced memory never
// changes (by sourcing those pointers from another indirect key/elem
// map).
-func (t *table) uncheckedPutSlot(typ *abi.SwissMapType, hash uintptr, key, elem unsafe.Pointer) {
+func (t *table) uncheckedPutSlot(typ *abi.MapType, hash uintptr, key, elem unsafe.Pointer) {
if t.growthLeft == 0 {
panic("invariant failed: growthLeft is unexpectedly 0")
}
@@ -418,7 +418,7 @@ func (t *table) uncheckedPutSlot(typ *abi.SwissMapType, hash uintptr, key, elem
}
// Delete returns true if it put a tombstone in t.
-func (t *table) Delete(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
+func (t *table) Delete(typ *abi.MapType, m *Map, hash uintptr, key unsafe.Pointer) bool {
seq := makeProbeSeq(h1(hash), t.groups.lengthMask)
for ; ; seq = seq.next() {
g := t.groups.group(typ, seq.offset)
@@ -505,14 +505,14 @@ func (t *table) Delete(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.P
// We really need to remove O(n) tombstones so we can pay for the cost
// of finding them. If we can't, then we need to grow (which is also O(n),
// but guarantees O(n) subsequent inserts can happen in constant time).
-func (t *table) pruneTombstones(typ *abi.SwissMapType, m *Map) {
+func (t *table) pruneTombstones(typ *abi.MapType, m *Map) {
if t.tombstones()*10 < t.capacity { // 10% of capacity
// Not enough tombstones to be worth the effort.
return
}
// Bit set marking all the groups whose tombstones are needed.
- var needed [(maxTableCapacity/abi.SwissMapGroupSlots + 31) / 32]uint32
+ var needed [(maxTableCapacity/abi.MapGroupSlots + 31) / 32]uint32
// Trace the probe sequence of every full entry.
for i := uint64(0); i <= t.groups.lengthMask; i++ {
@@ -591,11 +591,11 @@ func (t *table) pruneTombstones(typ *abi.SwissMapType, m *Map) {
// tombstone is a slot that has been deleted but is still considered occupied
// so as not to violate the probing invariant.
func (t *table) tombstones() uint16 {
- return (t.capacity*maxAvgGroupLoad)/abi.SwissMapGroupSlots - t.used - t.growthLeft
+ return (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - t.growthLeft
}
// Clear deletes all entries from the map resulting in an empty map.
-func (t *table) Clear(typ *abi.SwissMapType) {
+func (t *table) Clear(typ *abi.MapType) {
mgl := t.maxGrowthLeft()
if t.used == 0 && t.growthLeft == mgl { // no current entries and no tombstones
return
@@ -614,7 +614,7 @@ func (t *table) Clear(typ *abi.SwissMapType) {
type Iter struct {
key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
- typ *abi.SwissMapType
+ typ *abi.MapType
m *Map
// Randomize iteration order by starting iteration at a random slot
@@ -648,7 +648,7 @@ type Iter struct {
}
// Init initializes Iter for iteration.
-func (it *Iter) Init(typ *abi.SwissMapType, m *Map) {
+func (it *Iter) Init(typ *abi.MapType, m *Map) {
it.typ = typ
if m == nil || m.used == 0 {
@@ -795,8 +795,8 @@ func (it *Iter) Next() {
if it.dirIdx < 0 {
// Map was small at Init.
- for ; it.entryIdx < abi.SwissMapGroupSlots; it.entryIdx++ {
- k := uintptr(it.entryIdx+it.entryOffset) % abi.SwissMapGroupSlots
+ for ; it.entryIdx < abi.MapGroupSlots; it.entryIdx++ {
+ k := uintptr(it.entryIdx+it.entryOffset) % abi.MapGroupSlots
if (it.group.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {
// Empty or deleted.
@@ -933,13 +933,13 @@ func (it *Iter) Next() {
// match.
entryIdx := (it.entryIdx + it.entryOffset) & entryMask
- slotIdx := uintptr(entryIdx & (abi.SwissMapGroupSlots - 1))
+ slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
if slotIdx == 0 || it.group.data == nil {
// Only compute the group (a) when we switch
// groups (slotIdx rolls over) and (b) on the
// first iteration in this table (slotIdx may
// not be zero due to entryOffset).
- groupIdx := entryIdx >> abi.SwissMapGroupSlotsBits
+ groupIdx := entryIdx >> abi.MapGroupSlotsBits
it.group = it.tab.groups.group(it.typ, groupIdx)
}
@@ -1000,14 +1000,14 @@ func (it *Iter) Next() {
var groupMatch bitset
for it.entryIdx <= entryMask {
entryIdx := (it.entryIdx + it.entryOffset) & entryMask
- slotIdx := uintptr(entryIdx & (abi.SwissMapGroupSlots - 1))
+ slotIdx := uintptr(entryIdx & (abi.MapGroupSlots - 1))
if slotIdx == 0 || it.group.data == nil {
// Only compute the group (a) when we switch
// groups (slotIdx rolls over) and (b) on the
// first iteration in this table (slotIdx may
// not be zero due to entryOffset).
- groupIdx := entryIdx >> abi.SwissMapGroupSlotsBits
+ groupIdx := entryIdx >> abi.MapGroupSlotsBits
it.group = it.tab.groups.group(it.typ, groupIdx)
}
@@ -1025,7 +1025,7 @@ func (it *Iter) Next() {
if groupMatch == 0 {
// Jump past remaining slots in this
// group.
- it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx)
+ it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
continue
}
@@ -1067,7 +1067,7 @@ func (it *Iter) Next() {
// No more entries in this
// group. Continue to next
// group.
- it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx)
+ it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
continue
}
@@ -1092,7 +1092,7 @@ func (it *Iter) Next() {
// No more entries in
// this group. Continue
// to next group.
- it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx)
+ it.entryIdx += abi.MapGroupSlots - uint64(slotIdx)
} else {
// Next full slot.
i := groupMatch.first()
@@ -1115,7 +1115,7 @@ func (it *Iter) Next() {
// Replaces the table with one larger table or two split tables to fit more
// entries. Since the table is replaced, t is now stale and should not be
// modified.
-func (t *table) rehash(typ *abi.SwissMapType, m *Map) {
+func (t *table) rehash(typ *abi.MapType, m *Map) {
// TODO(prattmic): SwissTables typically perform a "rehash in place"
// operation which recovers capacity consumed by tombstones without growing
// the table by reordering slots as necessary to maintain the probe
@@ -1149,7 +1149,7 @@ func localDepthMask(localDepth uint8) uintptr {
}
// split the table into two, installing the new tables in the map directory.
-func (t *table) split(typ *abi.SwissMapType, m *Map) {
+func (t *table) split(typ *abi.MapType, m *Map) {
localDepth := t.localDepth
localDepth++
@@ -1162,7 +1162,7 @@ func (t *table) split(typ *abi.SwissMapType, m *Map) {
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
- for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
+ for j := uintptr(0); j < abi.MapGroupSlots; j++ {
if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
// Empty or deleted
continue
@@ -1197,13 +1197,13 @@ func (t *table) split(typ *abi.SwissMapType, m *Map) {
// and uncheckedPutting each element of the table into the new table (we know
// that no insertion here will Put an already-present value), and discard the
// old table.
-func (t *table) grow(typ *abi.SwissMapType, m *Map, newCapacity uint16) {
+func (t *table) grow(typ *abi.MapType, m *Map, newCapacity uint16) {
newTable := newTable(typ, uint64(newCapacity), t.index, t.localDepth)
if t.capacity > 0 {
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
- for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
+ for j := uintptr(0); j < abi.MapGroupSlots; j++ {
if (g.ctrls().get(j) & ctrlEmpty) == ctrlEmpty {
// Empty or deleted
continue
@@ -1262,7 +1262,7 @@ func (s probeSeq) next() probeSeq {
return s
}
-func (t *table) clone(typ *abi.SwissMapType) *table {
+func (t *table) clone(typ *abi.MapType) *table {
// Shallow copy the table structure.
t2 := new(table)
*t2 = *t
diff --git a/src/internal/runtime/maps/table_debug.go b/src/internal/runtime/maps/table_debug.go
index a754592f70..d7226251b0 100644
--- a/src/internal/runtime/maps/table_debug.go
+++ b/src/internal/runtime/maps/table_debug.go
@@ -12,7 +12,7 @@ import (
const debugLog = false
-func (t *table) checkInvariants(typ *abi.SwissMapType, m *Map) {
+func (t *table) checkInvariants(typ *abi.MapType, m *Map) {
if !debugLog {
return
}
@@ -24,7 +24,7 @@ func (t *table) checkInvariants(typ *abi.SwissMapType, m *Map) {
var empty uint16
for i := uint64(0); i <= t.groups.lengthMask; i++ {
g := t.groups.group(typ, i)
- for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
+ for j := uintptr(0); j < abi.MapGroupSlots; j++ {
c := g.ctrls().get(j)
switch {
case c == ctrlDeleted:
@@ -63,7 +63,7 @@ func (t *table) checkInvariants(typ *abi.SwissMapType, m *Map) {
panic("invariant failed: found mismatched used slot count")
}
- growthLeft := (t.capacity*maxAvgGroupLoad)/abi.SwissMapGroupSlots - t.used - deleted
+ growthLeft := (t.capacity*maxAvgGroupLoad)/abi.MapGroupSlots - t.used - deleted
if growthLeft != t.growthLeft {
print("invariant failed: found ", t.growthLeft, " growthLeft, but expected ", growthLeft, "\n")
t.Print(typ, m)
@@ -81,7 +81,7 @@ func (t *table) checkInvariants(typ *abi.SwissMapType, m *Map) {
panic("invariant failed: found no empty slots (violates probe invariant)")
}
}
-func (t *table) Print(typ *abi.SwissMapType, m *Map) {
+func (t *table) Print(typ *abi.MapType, m *Map) {
print(`table{
index: `, t.index, `
localDepth: `, t.localDepth, `
@@ -96,7 +96,7 @@ func (t *table) Print(typ *abi.SwissMapType, m *Map) {
g := t.groups.group(typ, i)
ctrls := g.ctrls()
- for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ {
+ for j := uintptr(0); j < abi.MapGroupSlots; j++ {
print("\t\t\tslot ", j, "\n")
c := ctrls.get(j)
diff --git a/src/internal/runtime/strconv/atoi.go b/src/internal/runtime/strconv/atoi.go
index 87b3faf6d5..0308757c6f 100644
--- a/src/internal/runtime/strconv/atoi.go
+++ b/src/internal/runtime/strconv/atoi.go
@@ -73,4 +73,3 @@ func Atoi32(s string) (int32, bool) {
}
return 0, false
}
-
diff --git a/src/internal/runtime/strconv/atoi_test.go b/src/internal/runtime/strconv/atoi_test.go
index 49cd6f160a..71a8030b1d 100644
--- a/src/internal/runtime/strconv/atoi_test.go
+++ b/src/internal/runtime/strconv/atoi_test.go
@@ -102,4 +102,3 @@ func TestAtoi32(t *testing.T) {
}
}
}
-
diff --git a/src/internal/runtime/syscall/asm_linux_386.s b/src/internal/runtime/syscall/linux/asm_linux_386.s
index 15aae4d8bd..15aae4d8bd 100644
--- a/src/internal/runtime/syscall/asm_linux_386.s
+++ b/src/internal/runtime/syscall/linux/asm_linux_386.s
diff --git a/src/internal/runtime/syscall/asm_linux_amd64.s b/src/internal/runtime/syscall/linux/asm_linux_amd64.s
index 3740ef1beb..3740ef1beb 100644
--- a/src/internal/runtime/syscall/asm_linux_amd64.s
+++ b/src/internal/runtime/syscall/linux/asm_linux_amd64.s
diff --git a/src/internal/runtime/syscall/asm_linux_arm.s b/src/internal/runtime/syscall/linux/asm_linux_arm.s
index dbf1826d94..dbf1826d94 100644
--- a/src/internal/runtime/syscall/asm_linux_arm.s
+++ b/src/internal/runtime/syscall/linux/asm_linux_arm.s
diff --git a/src/internal/runtime/syscall/asm_linux_arm64.s b/src/internal/runtime/syscall/linux/asm_linux_arm64.s
index 83e862ff72..83e862ff72 100644
--- a/src/internal/runtime/syscall/asm_linux_arm64.s
+++ b/src/internal/runtime/syscall/linux/asm_linux_arm64.s
diff --git a/src/internal/runtime/syscall/asm_linux_loong64.s b/src/internal/runtime/syscall/linux/asm_linux_loong64.s
index ff8ad75b05..ff8ad75b05 100644
--- a/src/internal/runtime/syscall/asm_linux_loong64.s
+++ b/src/internal/runtime/syscall/linux/asm_linux_loong64.s
diff --git a/src/internal/runtime/syscall/asm_linux_mips64x.s b/src/internal/runtime/syscall/linux/asm_linux_mips64x.s
index 6b7c524689..6b7c524689 100644
--- a/src/internal/runtime/syscall/asm_linux_mips64x.s
+++ b/src/internal/runtime/syscall/linux/asm_linux_mips64x.s
diff --git a/src/internal/runtime/syscall/asm_linux_mipsx.s b/src/internal/runtime/syscall/linux/asm_linux_mipsx.s
index 561310f8a1..561310f8a1 100644
--- a/src/internal/runtime/syscall/asm_linux_mipsx.s
+++ b/src/internal/runtime/syscall/linux/asm_linux_mipsx.s
diff --git a/src/internal/runtime/syscall/asm_linux_ppc64x.s b/src/internal/runtime/syscall/linux/asm_linux_ppc64x.s
index 3e985edea0..3e985edea0 100644
--- a/src/internal/runtime/syscall/asm_linux_ppc64x.s
+++ b/src/internal/runtime/syscall/linux/asm_linux_ppc64x.s
diff --git a/src/internal/runtime/syscall/asm_linux_riscv64.s b/src/internal/runtime/syscall/linux/asm_linux_riscv64.s
index 15e50ec153..15e50ec153 100644
--- a/src/internal/runtime/syscall/asm_linux_riscv64.s
+++ b/src/internal/runtime/syscall/linux/asm_linux_riscv64.s
diff --git a/src/internal/runtime/syscall/asm_linux_s390x.s b/src/internal/runtime/syscall/linux/asm_linux_s390x.s
index 1b27f29390..1b27f29390 100644
--- a/src/internal/runtime/syscall/asm_linux_s390x.s
+++ b/src/internal/runtime/syscall/linux/asm_linux_s390x.s
diff --git a/src/internal/runtime/syscall/defs_linux.go b/src/internal/runtime/syscall/linux/defs_linux.go
index 4c131e23cf..1ca3c35238 100644
--- a/src/internal/runtime/syscall/defs_linux.go
+++ b/src/internal/runtime/syscall/linux/defs_linux.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package syscall
+package linux
const (
AT_FDCWD = -0x64
diff --git a/src/internal/runtime/syscall/defs_linux_386.go b/src/internal/runtime/syscall/linux/defs_linux_386.go
index 6f05fd7306..7fdf5d3f80 100644
--- a/src/internal/runtime/syscall/defs_linux_386.go
+++ b/src/internal/runtime/syscall/linux/defs_linux_386.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package syscall
+package linux
const (
SYS_CLOSE = 6
diff --git a/src/internal/runtime/syscall/defs_linux_amd64.go b/src/internal/runtime/syscall/linux/defs_linux_amd64.go
index 6c62818100..2c8676e6e9 100644
--- a/src/internal/runtime/syscall/defs_linux_amd64.go
+++ b/src/internal/runtime/syscall/linux/defs_linux_amd64.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package syscall
+package linux
const (
SYS_CLOSE = 3
diff --git a/src/internal/runtime/syscall/defs_linux_arm.go b/src/internal/runtime/syscall/linux/defs_linux_arm.go
index 2b6005a3da..a0b395d676 100644
--- a/src/internal/runtime/syscall/defs_linux_arm.go
+++ b/src/internal/runtime/syscall/linux/defs_linux_arm.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package syscall
+package linux
const (
SYS_CLOSE = 6
diff --git a/src/internal/runtime/syscall/defs_linux_arm64.go b/src/internal/runtime/syscall/linux/defs_linux_arm64.go
index 05922fbf7a..223dce0c5b 100644
--- a/src/internal/runtime/syscall/defs_linux_arm64.go
+++ b/src/internal/runtime/syscall/linux/defs_linux_arm64.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package syscall
+package linux
const (
SYS_CLOSE = 57
diff --git a/src/internal/runtime/syscall/defs_linux_loong64.go b/src/internal/runtime/syscall/linux/defs_linux_loong64.go
index 2501434ceb..8aa61c391d 100644
--- a/src/internal/runtime/syscall/defs_linux_loong64.go
+++ b/src/internal/runtime/syscall/linux/defs_linux_loong64.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package syscall
+package linux
const (
SYS_CLOSE = 57
diff --git a/src/internal/runtime/syscall/defs_linux_mips64x.go b/src/internal/runtime/syscall/linux/defs_linux_mips64x.go
index 92ba3f7398..84b760dc1b 100644
--- a/src/internal/runtime/syscall/defs_linux_mips64x.go
+++ b/src/internal/runtime/syscall/linux/defs_linux_mips64x.go
@@ -4,7 +4,7 @@
//go:build linux && (mips64 || mips64le)
-package syscall
+package linux
const (
SYS_CLOSE = 5003
diff --git a/src/internal/runtime/syscall/defs_linux_mipsx.go b/src/internal/runtime/syscall/linux/defs_linux_mipsx.go
index 7b4dee08af..a9be21414c 100644
--- a/src/internal/runtime/syscall/defs_linux_mipsx.go
+++ b/src/internal/runtime/syscall/linux/defs_linux_mipsx.go
@@ -4,7 +4,7 @@
//go:build linux && (mips || mipsle)
-package syscall
+package linux
const (
SYS_CLOSE = 4006
diff --git a/src/internal/runtime/syscall/defs_linux_ppc64x.go b/src/internal/runtime/syscall/linux/defs_linux_ppc64x.go
index 4656517628..63f4e5d786 100644
--- a/src/internal/runtime/syscall/defs_linux_ppc64x.go
+++ b/src/internal/runtime/syscall/linux/defs_linux_ppc64x.go
@@ -4,7 +4,7 @@
//go:build linux && (ppc64 || ppc64le)
-package syscall
+package linux
const (
SYS_CLOSE = 6
diff --git a/src/internal/runtime/syscall/defs_linux_riscv64.go b/src/internal/runtime/syscall/linux/defs_linux_riscv64.go
index 2501434ceb..8aa61c391d 100644
--- a/src/internal/runtime/syscall/defs_linux_riscv64.go
+++ b/src/internal/runtime/syscall/linux/defs_linux_riscv64.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package syscall
+package linux
const (
SYS_CLOSE = 57
diff --git a/src/internal/runtime/syscall/defs_linux_s390x.go b/src/internal/runtime/syscall/linux/defs_linux_s390x.go
index 8005890e45..52945db0e5 100644
--- a/src/internal/runtime/syscall/defs_linux_s390x.go
+++ b/src/internal/runtime/syscall/linux/defs_linux_s390x.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package syscall
+package linux
const (
SYS_CLOSE = 6
diff --git a/src/internal/runtime/syscall/syscall_linux.go b/src/internal/runtime/syscall/linux/syscall_linux.go
index 49e5f8de2c..8201e7d190 100644
--- a/src/internal/runtime/syscall/syscall_linux.go
+++ b/src/internal/runtime/syscall/linux/syscall_linux.go
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package syscall provides the syscall primitives required for the runtime.
-package syscall
+// Package linux provides the syscall primitives required for the runtime.
+package linux
import (
"internal/goarch"
"unsafe"
)
-// TODO(https://go.dev/issue/51087): This package is incomplete and currently
-// only contains very minimal support for Linux.
+// TODO(https://go.dev/issue/51087): Move remaining syscalls to this package.
// Syscall6 calls system call number 'num' with arguments a1-6.
func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
diff --git a/src/internal/runtime/syscall/syscall_linux_test.go b/src/internal/runtime/syscall/linux/syscall_linux_test.go
index 14bb31c4b8..10eb2d56bf 100644
--- a/src/internal/runtime/syscall/syscall_linux_test.go
+++ b/src/internal/runtime/syscall/linux/syscall_linux_test.go
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package syscall_test
+package linux_test
import (
- "internal/runtime/syscall"
+ "internal/runtime/syscall/linux"
"testing"
)
func TestEpollctlErrorSign(t *testing.T) {
- v := syscall.EpollCtl(-1, 1, -1, &syscall.EpollEvent{})
+ v := linux.EpollCtl(-1, 1, -1, &linux.EpollEvent{})
const EBADF = 0x09
if v != EBADF {
diff --git a/src/internal/runtime/syscall/windows/asm_windows_386.s b/src/internal/runtime/syscall/windows/asm_windows_386.s
new file mode 100644
index 0000000000..29cce00309
--- /dev/null
+++ b/src/internal/runtime/syscall/windows/asm_windows_386.s
@@ -0,0 +1,48 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·StdCall<ABIInternal>(SB),NOSPLIT,$0
+ JMP ·asmstdcall(SB)
+
+TEXT ·asmstdcall(SB),NOSPLIT,$0
+ MOVL fn+0(FP), BX
+ MOVL SP, BP // save stack pointer
+
+ // SetLastError(0).
+ MOVL $0, 0x34(FS)
+
+ MOVL StdCallInfo_N(BX), CX
+
+ // Fast version, do not store args on the stack.
+ CMPL CX, $0
+ JE docall
+
+ // Copy args to the stack.
+ MOVL CX, AX
+ SALL $2, AX
+ SUBL AX, SP // room for args
+ MOVL SP, DI
+ MOVL StdCallInfo_Args(BX), SI
+ CLD
+ REP; MOVSL
+
+docall:
+ // Call stdcall or cdecl function.
+ // DI SI BP BX are preserved, SP is not
+ CALL StdCallInfo_Fn(BX)
+ MOVL BP, SP
+
+ // Return result.
+ MOVL fn+0(FP), BX
+ MOVL AX, StdCallInfo_R1(BX)
+ MOVL DX, StdCallInfo_R2(BX)
+
+ // GetLastError().
+ MOVL 0x34(FS), AX
+ MOVL AX, StdCallInfo_Err(BX)
+
+ RET
diff --git a/src/internal/runtime/syscall/windows/asm_windows_amd64.s b/src/internal/runtime/syscall/windows/asm_windows_amd64.s
new file mode 100644
index 0000000000..c31cbcdd14
--- /dev/null
+++ b/src/internal/runtime/syscall/windows/asm_windows_amd64.s
@@ -0,0 +1,84 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+TEXT ·StdCall<ABIInternal>(SB),NOSPLIT,$0
+ MOVQ AX, CX
+ JMP ·asmstdcall(SB)
+
+TEXT ·asmstdcall(SB),NOSPLIT,$16
+ MOVQ SP, AX
+ ANDQ $~15, SP // alignment as per Windows requirement
+ MOVQ AX, 8(SP)
+ MOVQ CX, 0(SP) // asmcgocall will put first argument into CX.
+
+ MOVQ StdCallInfo_Fn(CX), AX
+ MOVQ StdCallInfo_Args(CX), SI
+ MOVQ StdCallInfo_N(CX), CX
+
+ // SetLastError(0).
+ MOVQ 0x30(GS), DI
+ MOVL $0, 0x68(DI)
+
+ SUBQ $(const_MaxArgs*8), SP // room for args
+
+ // Fast version, do not store args on the stack.
+ CMPL CX, $0; JE _0args
+ CMPL CX, $1; JE _1args
+ CMPL CX, $2; JE _2args
+ CMPL CX, $3; JE _3args
+ CMPL CX, $4; JE _4args
+
+ // Check we have enough room for args.
+ CMPL CX, $const_MaxArgs
+ JLE 2(PC)
+ INT $3 // not enough room -> crash
+
+ // Copy args to the stack.
+ MOVQ SP, DI
+ CLD
+ REP; MOVSQ
+ MOVQ SP, SI
+
+ // Load first 4 args into correspondent registers.
+ // Floating point arguments are passed in the XMM
+ // registers. Set them here in case any of the arguments
+ // are floating point values. For details see
+ // https://learn.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-170
+_4args:
+ MOVQ 24(SI), R9
+ MOVQ R9, X3
+_3args:
+ MOVQ 16(SI), R8
+ MOVQ R8, X2
+_2args:
+ MOVQ 8(SI), DX
+ MOVQ DX, X1
+_1args:
+ MOVQ 0(SI), CX
+ MOVQ CX, X0
+_0args:
+
+ // Call stdcall function.
+ CALL AX
+
+ ADDQ $(const_MaxArgs*8), SP
+
+ // Return result.
+ MOVQ 0(SP), CX
+ MOVQ 8(SP), SP
+ MOVQ AX, StdCallInfo_R1(CX)
+ // Floating point return values are returned in XMM0. Setting r2 to this
+ // value in case this call returned a floating point value. For details,
+ // see https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
+ MOVQ X0, StdCallInfo_R2(CX)
+
+ // GetLastError().
+ MOVQ 0x30(GS), DI
+ MOVL 0x68(DI), AX
+ MOVQ AX, StdCallInfo_Err(CX)
+
+ RET
diff --git a/src/internal/runtime/syscall/windows/asm_windows_arm.s b/src/internal/runtime/syscall/windows/asm_windows_arm.s
new file mode 100644
index 0000000000..8cc4d5c9aa
--- /dev/null
+++ b/src/internal/runtime/syscall/windows/asm_windows_arm.s
@@ -0,0 +1,77 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "textflag.h"
+#include "time_windows.h"
+
+TEXT ·StdCall<ABIInternal>(SB),NOSPLIT,$0
+ B ·asmstdcall(SB)
+
+TEXT ·asmstdcall(SB),NOSPLIT|NOFRAME,$0
+ MOVM.DB.W [R4, R5, R14], (R13) // push {r4, r5, lr}
+ MOVW R0, R4 // put fn * in r4
+ MOVW R13, R5 // save stack pointer in r5
+
+ // SetLastError(0)
+ MOVW $0, R0
+ MRC 15, 0, R1, C13, C0, 2
+ MOVW R0, 0x34(R1)
+
+ MOVW 8(R4), R12 // fn->Args
+
+ // Do we have more than 4 arguments?
+ MOVW 4(R4), R0 // fn->n
+ SUB.S $4, R0, R2
+ BLE loadregs
+
+ // Reserve stack space for remaining args
+ SUB R2<<2, R13
+ BIC $0x7, R13 // alignment for ABI
+
+ // R0: count of arguments
+ // R1:
+ // R2: loop counter, from 0 to (n-4)
+ // R3: scratch
+ // R4: pointer to StdCallInfo struct
+ // R12: fn->args
+ MOVW $0, R2
+stackargs:
+ ADD $4, R2, R3 // r3 = args[4 + i]
+ MOVW R3<<2(R12), R3
+ MOVW R3, R2<<2(R13) // stack[i] = r3
+
+ ADD $1, R2 // i++
+ SUB $4, R0, R3 // while (i < (n - 4))
+ CMP R3, R2
+ BLT stackargs
+
+loadregs:
+ CMP $3, R0
+ MOVW.GT 12(R12), R3
+
+ CMP $2, R0
+ MOVW.GT 8(R12), R2
+
+ CMP $1, R0
+ MOVW.GT 4(R12), R1
+
+ CMP $0, R0
+ MOVW.GT 0(R12), R0
+
+ BIC $0x7, R13 // alignment for ABI
+ MOVW 0(R4), R12 // branch to fn->fn
+ BL (R12)
+
+ MOVW R5, R13 // free stack space
+ MOVW R0, 12(R4) // save return value to fn->r1
+ MOVW R1, 16(R4)
+
+ // GetLastError
+ MRC 15, 0, R1, C13, C0, 2
+ MOVW 0x34(R1), R0
+ MOVW R0, 20(R4) // store in fn->err
+
+ MOVM.IA.W (R13), [R4, R5, R15]
diff --git a/src/internal/runtime/syscall/windows/asm_windows_arm64.s b/src/internal/runtime/syscall/windows/asm_windows_arm64.s
new file mode 100644
index 0000000000..fb4cda0f83
--- /dev/null
+++ b/src/internal/runtime/syscall/windows/asm_windows_arm64.s
@@ -0,0 +1,90 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Offsets into Thread Environment Block (pointer in R18)
+#define TEB_error 0x68
+
+TEXT ·StdCall<ABIInternal>(SB),NOSPLIT,$0
+ B ·asmstdcall(SB)
+
+TEXT ·asmstdcall(SB),NOSPLIT,$16
+ STP (R19, R20), 16(RSP) // save old R19, R20
+ MOVD R0, R19 // save fn pointer
+ MOVD RSP, R20 // save stack pointer
+
+ // SetLastError(0)
+ MOVD $0, TEB_error(R18_PLATFORM)
+ MOVD StdCallInfo_Args(R19), R12
+
+ // Do we have more than 8 arguments?
+ MOVD StdCallInfo_N(R19), R0
+ CMP $0, R0; BEQ _0args
+ CMP $1, R0; BEQ _1args
+ CMP $2, R0; BEQ _2args
+ CMP $3, R0; BEQ _3args
+ CMP $4, R0; BEQ _4args
+ CMP $5, R0; BEQ _5args
+ CMP $6, R0; BEQ _6args
+ CMP $7, R0; BEQ _7args
+ CMP $8, R0; BEQ _8args
+
+ // Reserve stack space for remaining args
+ SUB $8, R0, R2
+ ADD $1, R2, R3 // make even number of words for stack alignment
+ AND $~1, R3
+ LSL $3, R3
+ SUB R3, RSP
+
+ // R4: size of stack arguments (n-8)*8
+ // R5: &args[8]
+ // R6: loop counter, from 0 to (n-8)*8
+ // R7: scratch
+ // R8: copy of RSP - (R2)(RSP) assembles as (R2)(ZR)
+ SUB $8, R0, R4
+ LSL $3, R4
+ ADD $(8*8), R12, R5
+ MOVD $0, R6
+ MOVD RSP, R8
+stackargs:
+ MOVD (R6)(R5), R7
+ MOVD R7, (R6)(R8)
+ ADD $8, R6
+ CMP R6, R4
+ BNE stackargs
+
+_8args:
+ MOVD (7*8)(R12), R7
+_7args:
+ MOVD (6*8)(R12), R6
+_6args:
+ MOVD (5*8)(R12), R5
+_5args:
+ MOVD (4*8)(R12), R4
+_4args:
+ MOVD (3*8)(R12), R3
+_3args:
+ MOVD (2*8)(R12), R2
+_2args:
+ MOVD (1*8)(R12), R1
+_1args:
+ MOVD (0*8)(R12), R0
+_0args:
+
+ MOVD StdCallInfo_Fn(R19), R12
+ BL (R12)
+
+ MOVD R20, RSP // free stack space
+ MOVD R0, StdCallInfo_R1(R19) // save return value
+ // TODO(rsc) floating point like amd64 in StdCallInfo_R2?
+
+ // GetLastError
+ MOVD TEB_error(R18_PLATFORM), R0
+ MOVD R0, StdCallInfo_Err(R19)
+
+ // Restore callee-saved registers.
+ LDP 16(RSP), (R19, R20)
+ RET
diff --git a/src/internal/runtime/syscall/windows/syscall_windows.go b/src/internal/runtime/syscall/windows/syscall_windows.go
new file mode 100644
index 0000000000..0d350f0d7f
--- /dev/null
+++ b/src/internal/runtime/syscall/windows/syscall_windows.go
@@ -0,0 +1,44 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package windows provides the syscall primitives required for the runtime.
+
+package windows
+
+import (
+ "internal/abi"
+)
+
+// MaxArgs should be divisible by 2, as Windows stack
+// must be kept 16-byte aligned on syscall entry.
+//
+// Although it only permits maximum 42 parameters, it
+// is arguably large enough.
+const MaxArgs = 42
+
+// StdCallInfo is a structure used to pass parameters to the system call.
+type StdCallInfo struct {
+ Fn uintptr
+ N uintptr // number of parameters
+ Args uintptr // parameters
+ R1 uintptr // return values
+ R2 uintptr
+ Err uintptr // error number
+}
+
+// StdCall calls a function using Windows' stdcall convention.
+//
+//go:noescape
+func StdCall(fn *StdCallInfo)
+
+// asmstdcall is the function pointer for [AsmStdCallAddr].
+func asmstdcall(fn *StdCallInfo)
+
+// AsmStdCallAddr is the address of a function that accepts a pointer
+// to [StdCallInfo] stored on the stack following the C calling convention,
+// and calls the function using Windows' stdcall calling convention.
+// Shouldn't be called directly from Go.
+func AsmStdCallAddr() uintptr {
+ return abi.FuncPCABI0(asmstdcall)
+}
diff --git a/src/internal/syscall/windows/security_windows.go b/src/internal/syscall/windows/security_windows.go
index f0ab52ac81..2c35ad31f4 100644
--- a/src/internal/syscall/windows/security_windows.go
+++ b/src/internal/syscall/windows/security_windows.go
@@ -262,3 +262,6 @@ func GetSidSubAuthorityCount(sid *syscall.SID) uint8 {
defer runtime.KeepAlive(sid)
return *(*uint8)(unsafe.Pointer(getSidSubAuthorityCount(sid)))
}
+
+//sys InitializeAcl(acl *ACL, length uint32, revision uint32) (err error) = advapi32.InitializeAcl
+//sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *syscall.SID, group *syscall.SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW
diff --git a/src/internal/syscall/windows/types_windows.go b/src/internal/syscall/windows/types_windows.go
index 93664b4b7d..3855df393d 100644
--- a/src/internal/syscall/windows/types_windows.go
+++ b/src/internal/syscall/windows/types_windows.go
@@ -260,3 +260,21 @@ type FILE_COMPLETION_INFORMATION struct {
// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa
// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ns-wdm-_osversioninfoexw
const VER_NT_WORKSTATION = 0x0000001
+
+// https://learn.microsoft.com/en-us/windows/win32/api/accctrl/ne-accctrl-se_object_type
+type SE_OBJECT_TYPE uint32
+
+const (
+ SE_UNKNOWN_OBJECT_TYPE SE_OBJECT_TYPE = 0
+ SE_FILE_OBJECT SE_OBJECT_TYPE = 1
+)
+
+// https://learn.microsoft.com/en-us/windows/win32/secauthz/security-information
+type SECURITY_INFORMATION uint32
+
+const (
+ DACL_SECURITY_INFORMATION SECURITY_INFORMATION = 0x00000004
+ UNPROTECTED_DACL_SECURITY_INFORMATION SECURITY_INFORMATION = 0x20000000
+)
+
+const ACL_REVISION = 2
diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go
index 90cf0b92a4..c08b2ccdba 100644
--- a/src/internal/syscall/windows/zsyscall_windows.go
+++ b/src/internal/syscall/windows/zsyscall_windows.go
@@ -54,6 +54,7 @@ var (
procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount")
procImpersonateLoggedOnUser = modadvapi32.NewProc("ImpersonateLoggedOnUser")
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
+ procInitializeAcl = modadvapi32.NewProc("InitializeAcl")
procIsValidSid = modadvapi32.NewProc("IsValidSid")
procLogonUserW = modadvapi32.NewProc("LogonUserW")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
@@ -62,6 +63,7 @@ var (
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus")
procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
+ procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW")
procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation")
procProcessPrng = modbcryptprimitives.NewProc("ProcessPrng")
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
@@ -166,6 +168,14 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) {
return
}
+func InitializeAcl(acl *ACL, length uint32, revision uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procInitializeAcl.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(length), uintptr(revision))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func IsValidSid(sid *syscall.SID) (valid bool) {
r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
valid = r0 != 0
@@ -234,6 +244,23 @@ func RevertToSelf() (err error) {
return
}
+func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *syscall.SID, group *syscall.SID, dacl *ACL, sacl *ACL) (ret error) {
+ var _p0 *uint16
+ _p0, ret = syscall.UTF16PtrFromString(objectName)
+ if ret != nil {
+ return
+ }
+ return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl)
+}
+
+func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *syscall.SID, group *syscall.SID, dacl *ACL, sacl *ACL) (ret error) {
+ r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
+ if r0 != 0 {
+ ret = syscall.Errno(r0)
+ }
+ return
+}
+
func SetTokenInformation(tokenHandle syscall.Token, tokenInformationClass uint32, tokenInformation unsafe.Pointer, tokenInformationLength uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(tokenHandle), uintptr(tokenInformationClass), uintptr(tokenInformation), uintptr(tokenInformationLength), 0, 0)
if r1 == 0 {
diff --git a/src/internal/trace/trace_test.go b/src/internal/trace/trace_test.go
index bd3f078f05..ce79960065 100644
--- a/src/internal/trace/trace_test.go
+++ b/src/internal/trace/trace_test.go
@@ -610,7 +610,6 @@ func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace
buildCmd.Args = append(buildCmd.Args, "-race")
}
buildCmd.Args = append(buildCmd.Args, testPath)
- buildCmd.Env = append(os.Environ(), "GOEXPERIMENT=rangefunc")
buildOutput, err := buildCmd.CombinedOutput()
if err != nil {
t.Fatalf("failed to build %s: %v: output:\n%s", testPath, err, buildOutput)
diff --git a/src/internal/types/errors/codes.go b/src/internal/types/errors/codes.go
index f8c9eb920f..b0f7d2d446 100644
--- a/src/internal/types/errors/codes.go
+++ b/src/internal/types/errors/codes.go
@@ -881,7 +881,9 @@ const (
// context in which it is used.
//
// Example:
- // var _ = 1 + []int{}
+ // func f[T ~int8 | ~int16 | ~int32 | ~int64](x T) T {
+ // return x + 1024
+ // }
InvalidUntypedConversion
// BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument
diff --git a/src/internal/types/testdata/check/builtins1.go b/src/internal/types/testdata/check/builtins1.go
index 422a5462d1..341723ccb0 100644
--- a/src/internal/types/testdata/check/builtins1.go
+++ b/src/internal/types/testdata/check/builtins1.go
@@ -211,7 +211,7 @@ func _[
var x2 P2
_ = max(x2)
_ = max(x2, x2)
- _ = max(1, 2 /* ERROR "cannot convert 2 (untyped int constant) to type P2" */, x2) // error at 2 because max is 2
+ _ = max(1, 2, x2 /* ERROR "mismatched types untyped int (previous argument) and P2 (type of x2)" */ )
_ = max(x1, x2 /* ERROR "mismatched types P1 (previous argument) and P2 (type of x2)" */ )
}
@@ -232,7 +232,7 @@ func _[
var x2 P2
_ = min(x2)
_ = min(x2, x2)
- _ = min(1 /* ERROR "cannot convert 1 (untyped int constant) to type P2" */ , 2, x2) // error at 1 because min is 1
+ _ = min(1, 2, x2 /* ERROR "mismatched types untyped int (previous argument) and P2 (type of x2)" */ )
_ = min(x1, x2 /* ERROR "mismatched types P1 (previous argument) and P2 (type of x2)" */ )
}
diff --git a/src/internal/types/testdata/check/expr2.go b/src/internal/types/testdata/check/expr2.go
index ebb85eb233..603f5ae190 100644
--- a/src/internal/types/testdata/check/expr2.go
+++ b/src/internal/types/testdata/check/expr2.go
@@ -201,7 +201,7 @@ func interfaces() {
var s11 S11
var s2 S2
- _ = i == 0 /* ERROR "cannot convert" */
+ _ = i == 0 /* ERROR "invalid operation: i == 0 (mismatched types interface{m() int} and untyped int)" */
_ = i == s1 /* ERROR "mismatched types" */
_ = i == &s1
_ = i == &s11
diff --git a/src/internal/types/testdata/fixedbugs/issue60434.go b/src/internal/types/testdata/fixedbugs/issue60434.go
index e1d76527f3..68aa7c2fdf 100644
--- a/src/internal/types/testdata/fixedbugs/issue60434.go
+++ b/src/internal/types/testdata/fixedbugs/issue60434.go
@@ -13,5 +13,5 @@ var s struct{ x int }
func _() {
f(s.y /* ERROR "s.y undefined" */)
- f(1 /* ERROR "cannot convert 1" */ / s)
+ f(1 /* ERROR "invalid operation: 1 / s (mismatched types untyped int and struct{x int})" */ / s)
}
diff --git a/src/internal/types/testdata/fixedbugs/issue73428.go b/src/internal/types/testdata/fixedbugs/issue73428.go
new file mode 100644
index 0000000000..b452b90fe3
--- /dev/null
+++ b/src/internal/types/testdata/fixedbugs/issue73428.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f() {}
+
+const c = 0
+
+var v int
+var _ = f < c // ERROR "invalid operation: f < c (mismatched types func() and untyped int)"
+var _ = f < v // ERROR "invalid operation: f < v (mismatched types func() and int)"
diff --git a/src/net/dial.go b/src/net/dial.go
index db0404c3f8..6264984cec 100644
--- a/src/net/dial.go
+++ b/src/net/dial.go
@@ -837,7 +837,7 @@ func (lc *ListenConfig) Listen(ctx context.Context, network, address string) (Li
// parameters.
//
// The ctx argument is used while resolving the address on which to listen;
-// it does not affect the returned Listener.
+// it does not affect the returned PacketConn.
func (lc *ListenConfig) ListenPacket(ctx context.Context, network, address string) (PacketConn, error) {
addrs, err := DefaultResolver.resolveAddrList(ctx, "listen", network, address, nil)
if err != nil {
diff --git a/src/net/http/server.go b/src/net/http/server.go
index 1ca5a227ef..cf0bd0a91d 100644
--- a/src/net/http/server.go
+++ b/src/net/http/server.go
@@ -1614,7 +1614,7 @@ func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {
// It's illegal to call this before the header has been flushed.
func (w *response) bodyAllowed() bool {
if !w.wroteHeader {
- panic("")
+ panic("net/http: bodyAllowed called before the header was written")
}
return bodyAllowedForStatus(w.status)
}
diff --git a/src/net/tcpconn_keepalive_test.go b/src/net/tcpconn_keepalive_test.go
index 53d0be034f..4bf2f9ef20 100644
--- a/src/net/tcpconn_keepalive_test.go
+++ b/src/net/tcpconn_keepalive_test.go
@@ -22,7 +22,7 @@ func TestTCPConnKeepAliveConfigDialer(t *testing.T) {
oldCfg KeepAliveConfig
)
testPreHookSetKeepAlive = func(nfd *netFD) {
- oldCfg, errHook = getCurrentKeepAliveSettings(fdType(nfd.pfd.Sysfd))
+ oldCfg, errHook = getCurrentKeepAliveSettings(nfd.pfd.Sysfd)
}
handler := func(ls *localServer, ln Listener) {
@@ -80,7 +80,7 @@ func TestTCPConnKeepAliveConfigListener(t *testing.T) {
oldCfg KeepAliveConfig
)
testPreHookSetKeepAlive = func(nfd *netFD) {
- oldCfg, errHook = getCurrentKeepAliveSettings(fdType(nfd.pfd.Sysfd))
+ oldCfg, errHook = getCurrentKeepAliveSettings(nfd.pfd.Sysfd)
}
ch := make(chan Conn, 1)
diff --git a/src/os/exec/dot_test.go b/src/os/exec/dot_test.go
index 1bf0d9c760..b95639e6c8 100644
--- a/src/os/exec/dot_test.go
+++ b/src/os/exec/dot_test.go
@@ -177,4 +177,48 @@ func TestLookPath(t *testing.T) {
}
}
})
+
+ checker := func(test string) func(t *testing.T) {
+ return func(t *testing.T) {
+ t.Helper()
+ t.Logf("PATH=%s", os.Getenv("PATH"))
+ p, err := LookPath(test)
+ if err == nil {
+ t.Errorf("%q: error expected, got nil", test)
+ }
+ if p != "" {
+ t.Errorf("%q: path returned should be \"\". Got %q", test, p)
+ }
+ }
+ }
+
+ // Reference behavior for the next test
+ t.Run(pathVar+"=$OTHER2", func(t *testing.T) {
+ t.Run("empty", checker(""))
+ t.Run("dot", checker("."))
+ t.Run("dotdot1", checker("abc/.."))
+ t.Run("dotdot2", checker(".."))
+ })
+
+ // Test the behavior when PATH contains an executable file which is not a directory
+ t.Run(pathVar+"=exe", func(t *testing.T) {
+ // Inject an executable file (not a directory) in PATH.
+ // Use our own binary os.Args[0].
+ t.Setenv(pathVar, testenv.Executable(t))
+ t.Run("empty", checker(""))
+ t.Run("dot", checker("."))
+ t.Run("dotdot1", checker("abc/.."))
+ t.Run("dotdot2", checker(".."))
+ })
+
+ // Test the behavior when PATH contains an executable file which is not a directory
+ t.Run(pathVar+"=exe/xx", func(t *testing.T) {
+ // Inject an executable file (not a directory) in PATH.
+ // Use our own binary os.Args[0].
+ t.Setenv(pathVar, filepath.Join(testenv.Executable(t), "xx"))
+ t.Run("empty", checker(""))
+ t.Run("dot", checker("."))
+ t.Run("dotdot1", checker("abc/.."))
+ t.Run("dotdot2", checker(".."))
+ })
}
diff --git a/src/os/exec/exec.go b/src/os/exec/exec.go
index 91a6831b04..38354a5244 100644
--- a/src/os/exec/exec.go
+++ b/src/os/exec/exec.go
@@ -1328,3 +1328,13 @@ func addCriticalEnv(env []string) []string {
// Code should use errors.Is(err, ErrDot), not err == ErrDot,
// to test whether a returned error err is due to this condition.
var ErrDot = errors.New("cannot run executable found relative to current directory")
+
+// validateLookPath excludes paths that can't be valid
+// executable names. See issue #74466 and CVE-2025-47906.
+func validateLookPath(s string) error {
+ switch s {
+ case "", ".", "..":
+ return ErrNotFound
+ }
+ return nil
+}
diff --git a/src/os/exec/lp_plan9.go b/src/os/exec/lp_plan9.go
index 87359b3551..0430af9eef 100644
--- a/src/os/exec/lp_plan9.go
+++ b/src/os/exec/lp_plan9.go
@@ -36,6 +36,10 @@ func findExecutable(file string) error {
// As of Go 1.19, LookPath will instead return that path along with an error satisfying
// [errors.Is](err, [ErrDot]). See the package documentation for more details.
func LookPath(file string) (string, error) {
+ if err := validateLookPath(file); err != nil {
+ return "", &Error{file, err}
+ }
+
// skip the path lookup for these prefixes
skip := []string{"/", "#", "./", "../"}
diff --git a/src/os/exec/lp_unix.go b/src/os/exec/lp_unix.go
index 8617d45e98..e5fddbafe2 100644
--- a/src/os/exec/lp_unix.go
+++ b/src/os/exec/lp_unix.go
@@ -54,6 +54,10 @@ func LookPath(file string) (string, error) {
// (only bypass the path if file begins with / or ./ or ../)
// but that would not match all the Unix shells.
+ if err := validateLookPath(file); err != nil {
+ return "", &Error{file, err}
+ }
+
if strings.Contains(file, "/") {
err := findExecutable(file)
if err == nil {
diff --git a/src/os/exec/lp_windows.go b/src/os/exec/lp_windows.go
index 1225674306..e01e7bbbba 100644
--- a/src/os/exec/lp_windows.go
+++ b/src/os/exec/lp_windows.go
@@ -67,6 +67,10 @@ func findExecutable(file string, exts []string) (string, error) {
// As of Go 1.19, LookPath will instead return that path along with an error satisfying
// [errors.Is](err, [ErrDot]). See the package documentation for more details.
func LookPath(file string) (string, error) {
+ if err := validateLookPath(file); err != nil {
+ return "", &Error{file, err}
+ }
+
return lookPath(file, pathExt())
}
@@ -80,6 +84,10 @@ func LookPath(file string) (string, error) {
// "C:\foo\example.com" would be returned as-is even if the
// program is actually "C:\foo\example.com.exe".
func lookExtensions(path, dir string) (string, error) {
+ if err := validateLookPath(path); err != nil {
+ return "", &Error{path, err}
+ }
+
if filepath.Base(path) == path {
path = "." + string(filepath.Separator) + path
}
diff --git a/src/os/file.go b/src/os/file.go
index 9603ac61e6..66269c199e 100644
--- a/src/os/file.go
+++ b/src/os/file.go
@@ -707,9 +707,9 @@ func (f *File) SyscallConn() (syscall.RawConn, error) {
// Fd returns the system file descriptor or handle referencing the open file.
// If f is closed, the descriptor becomes invalid.
-// If f is garbage collected, a cleanup may close the descriptor,
-// making it invalid; see [runtime.AddCleanup] for more information on when
-// a cleanup might be run.
+// If f is garbage collected, a finalizer may close the descriptor,
+// making it invalid; see [runtime.SetFinalizer] for more information on when
+// a finalizer might be run.
//
// Do not close the returned descriptor; that could cause a later
// close of f to close an unrelated descriptor.
diff --git a/src/os/file_plan9.go b/src/os/file_plan9.go
index 656a3e0bb0..17026409eb 100644
--- a/src/os/file_plan9.go
+++ b/src/os/file_plan9.go
@@ -23,7 +23,7 @@ func fixLongPath(path string) string {
// file is the real representation of *File.
// The extra level of indirection ensures that no clients of os
-// can overwrite this data, which could cause the cleanup
+// can overwrite this data, which could cause the finalizer
// to close the wrong file descriptor.
type file struct {
fdmu poll.FDMutex
@@ -31,7 +31,6 @@ type file struct {
name string
dirinfo atomic.Pointer[dirInfo] // nil unless directory being read
appendMode bool // whether file is opened for appending
- cleanup runtime.Cleanup // cleanup closes the file when no longer referenced
}
// fd is the Plan 9 implementation of Fd.
@@ -49,7 +48,7 @@ func newFileFromNewFile(fd uintptr, name string) *File {
return nil
}
f := &File{&file{sysfd: fdi, name: name}}
- f.cleanup = runtime.AddCleanup(f, func(f *file) { f.close() }, f.file)
+ runtime.SetFinalizer(f.file, (*file).close)
return f
}
@@ -160,9 +159,8 @@ func (file *file) close() error {
err := file.decref()
- // There is no need for a cleanup at this point. File must be alive at the point
- // where cleanup.stop is called.
- file.cleanup.Stop()
+ // no need for a finalizer anymore
+ runtime.SetFinalizer(file, nil)
return err
}
diff --git a/src/os/file_unix.go b/src/os/file_unix.go
index 721f08c911..2074df70fe 100644
--- a/src/os/file_unix.go
+++ b/src/os/file_unix.go
@@ -54,7 +54,7 @@ func rename(oldname, newname string) error {
// file is the real representation of *File.
// The extra level of indirection ensures that no clients of os
-// can overwrite this data, which could cause the cleanup
+// can overwrite this data, which could cause the finalizer
// to close the wrong file descriptor.
type file struct {
pfd poll.FD
@@ -63,7 +63,6 @@ type file struct {
nonblock bool // whether we set nonblocking mode
stdoutOrErr bool // whether this is stdout or stderr
appendMode bool // whether file is opened for appending
- cleanup runtime.Cleanup // cleanup closes the file when no longer referenced
}
// fd is the Unix implementation of Fd.
@@ -222,8 +221,7 @@ func newFile(fd int, name string, kind newFileKind, nonBlocking bool) *File {
}
}
- // Close the file when the File is not live.
- f.cleanup = runtime.AddCleanup(f, func(f *file) { f.close() }, f.file)
+ runtime.SetFinalizer(f.file, (*file).close)
return f
}
@@ -320,9 +318,8 @@ func (file *file) close() error {
err = &PathError{Op: "close", Path: file.name, Err: e}
}
- // There is no need for a cleanup at this point. File must be alive at the point
- // where cleanup.stop is called.
- file.cleanup.Stop()
+ // no need for a finalizer anymore
+ runtime.SetFinalizer(file, nil)
return err
}
diff --git a/src/os/file_windows.go b/src/os/file_windows.go
index ee6735fe44..7e94328710 100644
--- a/src/os/file_windows.go
+++ b/src/os/file_windows.go
@@ -22,14 +22,13 @@ const _UTIME_OMIT = -1
// file is the real representation of *File.
// The extra level of indirection ensures that no clients of os
-// can overwrite this data, which could cause the cleanup
+// can overwrite this data, which could cause the finalizer
// to close the wrong file descriptor.
type file struct {
pfd poll.FD
name string
dirinfo atomic.Pointer[dirInfo] // nil unless directory being read
appendMode bool // whether file is opened for appending
- cleanup runtime.Cleanup // cleanup closes the file when no longer referenced
}
// fd is the Windows implementation of Fd.
@@ -69,7 +68,7 @@ func newFile(h syscall.Handle, name string, kind string, nonBlocking bool) *File
},
name: name,
}}
- f.cleanup = runtime.AddCleanup(f, func(f *file) { f.close() }, f.file)
+ runtime.SetFinalizer(f.file, (*file).close)
// Ignore initialization errors.
// Assume any problems will show up in later I/O.
@@ -144,9 +143,8 @@ func (file *file) close() error {
err = &PathError{Op: "close", Path: file.name, Err: e}
}
- // There is no need for a cleanup at this point. File must be alive at the point
- // where cleanup.stop is called.
- file.cleanup.Stop()
+ // no need for a finalizer anymore
+ runtime.SetFinalizer(file, nil)
return err
}
diff --git a/src/os/os_windows_test.go b/src/os/os_windows_test.go
index 89a61f0229..515d1c1359 100644
--- a/src/os/os_windows_test.go
+++ b/src/os/os_windows_test.go
@@ -64,9 +64,6 @@ func TestSameWindowsFile(t *testing.T) {
}
p := filepath.VolumeName(path) + filepath.Base(path)
- if err != nil {
- t.Fatal(err)
- }
ia3, err := os.Stat(p)
if err != nil {
t.Fatal(err)
diff --git a/src/os/root_openat.go b/src/os/root_openat.go
index 192c29e319..e433bd5093 100644
--- a/src/os/root_openat.go
+++ b/src/os/root_openat.go
@@ -22,11 +22,10 @@ type root struct {
// refs is incremented while an operation is using fd.
// closed is set when Close is called.
// fd is closed when closed is true and refs is 0.
- mu sync.Mutex
- fd sysfdType
- refs int // number of active operations
- closed bool // set when closed
- cleanup runtime.Cleanup // cleanup closes the file when no longer referenced
+ mu sync.Mutex
+ fd sysfdType
+ refs int // number of active operations
+ closed bool // set when closed
}
func (r *root) Close() error {
@@ -36,9 +35,7 @@ func (r *root) Close() error {
syscall.Close(r.fd)
}
r.closed = true
- // There is no need for a cleanup at this point. Root must be alive at the point
- // where cleanup.stop is called.
- r.cleanup.Stop()
+ runtime.SetFinalizer(r, nil) // no need for a finalizer any more
return nil
}
diff --git a/src/os/root_unix.go b/src/os/root_unix.go
index ed21afffb5..4d6fc19a08 100644
--- a/src/os/root_unix.go
+++ b/src/os/root_unix.go
@@ -56,7 +56,7 @@ func newRoot(fd int, name string) (*Root, error) {
fd: fd,
name: name,
}}
- r.root.cleanup = runtime.AddCleanup(r, func(f *root) { f.Close() }, r.root)
+ runtime.SetFinalizer(r.root, (*root).Close)
return r, nil
}
diff --git a/src/os/root_windows.go b/src/os/root_windows.go
index a918606806..523ee48d13 100644
--- a/src/os/root_windows.go
+++ b/src/os/root_windows.go
@@ -113,7 +113,7 @@ func newRoot(fd syscall.Handle, name string) (*Root, error) {
fd: fd,
name: name,
}}
- r.root.cleanup = runtime.AddCleanup(r, func(f *root) { f.Close() }, r.root)
+ runtime.SetFinalizer(r.root, (*root).Close)
return r, nil
}
diff --git a/src/os/user/user_windows_test.go b/src/os/user/user_windows_test.go
index 7dca2fc5f9..c95811594d 100644
--- a/src/os/user/user_windows_test.go
+++ b/src/os/user/user_windows_test.go
@@ -7,6 +7,7 @@ package user
import (
"crypto/rand"
"encoding/base64"
+ "encoding/binary"
"errors"
"fmt"
"internal/syscall/windows"
@@ -16,11 +17,93 @@ import (
"runtime"
"slices"
"strconv"
+ "strings"
+ "sync"
"syscall"
"testing"
+ "unicode"
+ "unicode/utf8"
"unsafe"
)
+// addUserAccount creates a local user account.
+// It returns the name and password of the new account.
+// Multiple programs or goroutines calling addUserAccount simultaneously will not choose the same directory.
+func addUserAccount(t *testing.T) (name, password string) {
+ t.TempDir()
+ pattern := t.Name()
+ // Windows limits the user name to 20 characters,
+ // leave space for a 4 digits random suffix.
+ const maxNameLen, suffixLen = 20, 4
+ pattern = pattern[:min(len(pattern), maxNameLen-suffixLen)]
+ // Drop unusual characters from the account name.
+ mapper := func(r rune) rune {
+ if r < utf8.RuneSelf {
+ if '0' <= r && r <= '9' ||
+ 'a' <= r && r <= 'z' ||
+ 'A' <= r && r <= 'Z' {
+ return r
+ }
+ } else if unicode.IsLetter(r) || unicode.IsNumber(r) {
+ return r
+ }
+ return -1
+ }
+ pattern = strings.Map(mapper, pattern)
+
+ // Generate a long random password.
+ var pwd [33]byte
+ rand.Read(pwd[:])
+ // Add special chars to ensure it satisfies password requirements.
+ password = base64.StdEncoding.EncodeToString(pwd[:]) + "_-As@!%*(1)4#2"
+ password16, err := syscall.UTF16PtrFromString(password)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ try := 0
+ for {
+ // Calculate a random suffix to append to the user name.
+ var suffix [2]byte
+ rand.Read(suffix[:])
+ suffixStr := strconv.FormatUint(uint64(binary.LittleEndian.Uint16(suffix[:])), 10)
+ name := pattern + suffixStr[:min(len(suffixStr), suffixLen)]
+ name16, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Create user.
+ userInfo := windows.UserInfo1{
+ Name: name16,
+ Password: password16,
+ Priv: windows.USER_PRIV_USER,
+ }
+ err = windows.NetUserAdd(nil, 1, (*byte)(unsafe.Pointer(&userInfo)), nil)
+ if errors.Is(err, syscall.ERROR_ACCESS_DENIED) {
+ t.Skip("skipping test; don't have permission to create user")
+ }
+ // If the user already exists, try again with a different name.
+ if errors.Is(err, windows.NERR_UserExists) {
+ if try++; try < 1000 {
+ t.Log("user already exists, trying again with a different name")
+ continue
+ }
+ }
+ if err != nil {
+ t.Fatalf("NetUserAdd failed: %v", err)
+ }
+ // Delete the user when the test is done.
+ t.Cleanup(func() {
+ if err := windows.NetUserDel(nil, name16); err != nil {
+ if !errors.Is(err, windows.NERR_UserNotFound) {
+ t.Fatal(err)
+ }
+ }
+ })
+ return name, password
+ }
+}
+
// windowsTestAccount creates a test user and returns a token for that user.
// If the user already exists, it will be deleted and recreated.
// The caller is responsible for closing the token.
@@ -32,47 +115,15 @@ func windowsTestAccount(t *testing.T) (syscall.Token, *User) {
// See https://dev.go/issue/70396.
t.Skip("skipping non-hermetic test outside of Go builders")
}
- const testUserName = "GoStdTestUser01"
- var password [33]byte
- rand.Read(password[:])
- // Add special chars to ensure it satisfies password requirements.
- pwd := base64.StdEncoding.EncodeToString(password[:]) + "_-As@!%*(1)4#2"
- name, err := syscall.UTF16PtrFromString(testUserName)
+ name, password := addUserAccount(t)
+ name16, err := syscall.UTF16PtrFromString(name)
if err != nil {
t.Fatal(err)
}
- pwd16, err := syscall.UTF16PtrFromString(pwd)
+ pwd16, err := syscall.UTF16PtrFromString(password)
if err != nil {
t.Fatal(err)
}
- userInfo := windows.UserInfo1{
- Name: name,
- Password: pwd16,
- Priv: windows.USER_PRIV_USER,
- }
- // Create user.
- err = windows.NetUserAdd(nil, 1, (*byte)(unsafe.Pointer(&userInfo)), nil)
- if errors.Is(err, syscall.ERROR_ACCESS_DENIED) {
- t.Skip("skipping test; don't have permission to create user")
- }
- if errors.Is(err, windows.NERR_UserExists) {
- // User already exists, delete and recreate.
- if err = windows.NetUserDel(nil, name); err != nil {
- t.Fatal(err)
- }
- if err = windows.NetUserAdd(nil, 1, (*byte)(unsafe.Pointer(&userInfo)), nil); err != nil {
- t.Fatal(err)
- }
- } else if err != nil {
- t.Fatal(err)
- }
- t.Cleanup(func() {
- if err = windows.NetUserDel(nil, name); err != nil {
- if !errors.Is(err, windows.NERR_UserNotFound) {
- t.Fatal(err)
- }
- }
- })
domain, err := syscall.UTF16PtrFromString(".")
if err != nil {
t.Fatal(err)
@@ -80,13 +131,13 @@ func windowsTestAccount(t *testing.T) (syscall.Token, *User) {
const LOGON32_PROVIDER_DEFAULT = 0
const LOGON32_LOGON_INTERACTIVE = 2
var token syscall.Token
- if err = windows.LogonUser(name, domain, pwd16, LOGON32_LOGON_INTERACTIVE, LOGON32_PROVIDER_DEFAULT, &token); err != nil {
+ if err = windows.LogonUser(name16, domain, pwd16, LOGON32_LOGON_INTERACTIVE, LOGON32_PROVIDER_DEFAULT, &token); err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
token.Close()
})
- usr, err := Lookup(testUserName)
+ usr, err := Lookup(name)
if err != nil {
t.Fatal(err)
}
@@ -211,9 +262,21 @@ func TestGroupIdsTestUser(t *testing.T) {
}
}
+var isSystemDefaultLCIDEnglish = sync.OnceValue(func() bool {
+ // GetSystemDefaultLCID()
+ // https://learn.microsoft.com/en-us/windows/win32/api/winnls/nf-winnls-getsystemdefaultlcid
+ r, _, _ := syscall.MustLoadDLL("kernel32.dll").MustFindProc("GetSystemDefaultLCID").Call()
+ lcid := uint32(r)
+
+ lcidLow := lcid & 0xFF
+ // 0x0409 is en-US
+ // 0x1000 is "Locale without assigned LCID"
+ return lcidLow == 0x00 || lcidLow == 0x09
+})
+
var serviceAccounts = []struct {
sid string
- name string
+ name string // name on english Windows
}{
{"S-1-5-18", "NT AUTHORITY\\SYSTEM"},
{"S-1-5-19", "NT AUTHORITY\\LOCAL SERVICE"},
@@ -223,14 +286,21 @@ var serviceAccounts = []struct {
func TestLookupServiceAccount(t *testing.T) {
t.Parallel()
for _, tt := range serviceAccounts {
- u, err := Lookup(tt.name)
- if err != nil {
- t.Errorf("Lookup(%q): %v", tt.name, err)
- continue
- }
- if u.Uid != tt.sid {
- t.Errorf("unexpected uid for %q; got %q, want %q", u.Name, u.Uid, tt.sid)
- }
+ t.Run(tt.name, func(t *testing.T) {
+ u, err := Lookup(tt.name)
+ if err != nil {
+ t.Logf("Lookup(%q): %v", tt.name, err)
+ if !isSystemDefaultLCIDEnglish() {
+ t.Skipf("test not supported on non-English Windows")
+ }
+ t.Fail()
+ return
+ }
+ if u.Uid != tt.sid {
+ t.Errorf("unexpected uid for %q; got %q, want %q", u.Name, u.Uid, tt.sid)
+ }
+ t.Logf("Lookup(%q): %q", tt.name, u.Username)
+ })
}
}
@@ -246,7 +316,11 @@ func TestLookupIdServiceAccount(t *testing.T) {
t.Errorf("unexpected gid for %q; got %q, want %q", u.Name, u.Gid, tt.sid)
}
if u.Username != tt.name {
- t.Errorf("unexpected user name for %q; got %q, want %q", u.Gid, u.Username, tt.name)
+ if isSystemDefaultLCIDEnglish() {
+ t.Errorf("unexpected user name for %q; got %q, want %q", u.Gid, u.Username, tt.name)
+ } else {
+ t.Logf("user name for %q: %q", u.Gid, u.Username)
+ }
}
}
}
@@ -254,14 +328,20 @@ func TestLookupIdServiceAccount(t *testing.T) {
func TestLookupGroupServiceAccount(t *testing.T) {
t.Parallel()
for _, tt := range serviceAccounts {
- u, err := LookupGroup(tt.name)
- if err != nil {
- t.Errorf("LookupGroup(%q): %v", tt.name, err)
- continue
- }
- if u.Gid != tt.sid {
- t.Errorf("unexpected gid for %q; got %q, want %q", u.Name, u.Gid, tt.sid)
- }
+ t.Run(tt.name, func(t *testing.T) {
+ g, err := LookupGroup(tt.name)
+ if err != nil {
+ t.Logf("LookupGroup(%q): %v", tt.name, err)
+ if !isSystemDefaultLCIDEnglish() {
+ t.Skipf("test not supported on non-English Windows")
+ }
+ t.Fail()
+ return
+ }
+ if g.Gid != tt.sid {
+ t.Errorf("unexpected gid for %q; got %q, want %q", g.Name, g.Gid, tt.sid)
+ }
+ })
}
}
diff --git a/src/reflect/abi.go b/src/reflect/abi.go
index b67d821743..bcd0a95f1b 100644
--- a/src/reflect/abi.go
+++ b/src/reflect/abi.go
@@ -166,7 +166,7 @@ func (a *abiSeq) addRcvr(rcvr *abi.Type) (*abiStep, bool) {
// The receiver is always one word.
a.valueStart = append(a.valueStart, len(a.steps))
var ok, ptr bool
- if rcvr.IfaceIndir() || rcvr.Pointers() {
+ if !rcvr.IsDirectIface() || rcvr.Pointers() {
ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1)
ptr = true
} else {
diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go
index cd3e306a57..da7b2d7764 100644
--- a/src/reflect/all_test.go
+++ b/src/reflect/all_test.go
@@ -12,7 +12,6 @@ import (
"go/token"
"internal/asan"
"internal/goarch"
- "internal/goexperiment"
"internal/msan"
"internal/race"
"internal/testenv"
@@ -1277,10 +1276,6 @@ var deepEqualPerfTests = []struct {
}
func TestDeepEqualAllocs(t *testing.T) {
- // TODO(prattmic): maps on stack
- if goexperiment.SwissMap {
- t.Skipf("Maps on stack not yet implemented")
- }
if asan.Enabled {
t.Skip("test allocates more with -asan; see #70079")
}
@@ -7343,7 +7338,8 @@ func TestGCBits(t *testing.T) {
verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1))
verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1))
- testGCBitsMap(t)
+ // For maps, we don't manually construct GC data, instead using the
+ // public reflect API in groupAndSlotOf.
}
func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) }
diff --git a/src/reflect/badlinkname.go b/src/reflect/badlinkname.go
index eb701bff03..8a9bea6721 100644
--- a/src/reflect/badlinkname.go
+++ b/src/reflect/badlinkname.go
@@ -27,7 +27,7 @@ import (
//
//go:linkname unusedIfaceIndir reflect.ifaceIndir
func unusedIfaceIndir(t *abi.Type) bool {
- return t.Kind_&abi.KindDirectIface == 0
+ return !t.IsDirectIface()
}
//go:linkname valueInterface
diff --git a/src/reflect/export_noswiss_test.go b/src/reflect/export_noswiss_test.go
deleted file mode 100644
index 34e5e92055..0000000000
--- a/src/reflect/export_noswiss_test.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2024 Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package reflect
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-func MapBucketOf(x, y Type) Type {
- return toType(bucketOf(x.common(), y.common()))
-}
-
-func CachedBucketOf(m Type) Type {
- t := m.(*rtype)
- if Kind(t.t.Kind_&abi.KindMask) != Map {
- panic("not map")
- }
- tt := (*mapType)(unsafe.Pointer(t))
- return toType(tt.Bucket)
-}
diff --git a/src/reflect/export_swiss_test.go b/src/reflect/export_swiss_test.go
deleted file mode 100644
index ac3cd0adf7..0000000000
--- a/src/reflect/export_swiss_test.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2024 Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package reflect
-
-func MapGroupOf(x, y Type) Type {
- grp, _ := groupAndSlotOf(x, y)
- return grp
-}
diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go
index eedd063fcb..fc209fdfba 100644
--- a/src/reflect/export_test.go
+++ b/src/reflect/export_test.go
@@ -152,3 +152,8 @@ var MethodValueCallCodePtr = methodValueCallCodePtr
var InternalIsZero = isZero
var IsRegularMemory = isRegularMemory
+
+func MapGroupOf(x, y Type) Type {
+ grp, _ := groupAndSlotOf(x, y)
+ return grp
+}
diff --git a/src/reflect/makefunc.go b/src/reflect/makefunc.go
index 5da6cd2ec7..d35c92a14c 100644
--- a/src/reflect/makefunc.go
+++ b/src/reflect/makefunc.go
@@ -8,6 +8,7 @@ package reflect
import (
"internal/abi"
+ "internal/goarch"
"unsafe"
)
@@ -164,13 +165,18 @@ func moveMakeFuncArgPtrs(ctxt *makeFuncCtxt, args *abi.RegArgs) {
for i, arg := range args.Ints {
// Avoid write barriers! Because our write barrier enqueues what
// was there before, we might enqueue garbage.
+ // Also avoid bounds checks, we don't have the stack space for it.
+ // (Normally the prove pass removes them, but for -N builds we
+ // use too much stack.)
+ // ptr := &args.Ptrs[i] (but cast from *unsafe.Pointer to *uintptr)
+ ptr := (*uintptr)(add(unsafe.Pointer(unsafe.SliceData(args.Ptrs[:])), uintptr(i)*goarch.PtrSize, "always in [0:IntArgRegs]"))
if ctxt.regPtrs.Get(i) {
- *(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = arg
+ *ptr = arg
} else {
// We *must* zero this space ourselves because it's defined in
// assembly code and the GC will scan these pointers. Otherwise,
// there will be garbage here.
- *(*uintptr)(unsafe.Pointer(&args.Ptrs[i])) = 0
+ *ptr = 0
}
}
}
diff --git a/src/reflect/map_swiss.go b/src/reflect/map.go
index 2eac51e57d..9d25b1818c 100644
--- a/src/reflect/map_swiss.go
+++ b/src/reflect/map.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.swissmap
-
package reflect
import (
@@ -14,16 +12,11 @@ import (
"unsafe"
)
-// mapType represents a map type.
-//
-// TODO(prattmic): Only used within this file, could be cleaned up.
-type mapType = abi.SwissMapType
-
func (t *rtype) Key() Type {
if t.Kind() != Map {
panic("reflect: Key of non-map type " + t.String())
}
- tt := (*mapType)(unsafe.Pointer(t))
+ tt := (*abi.MapType)(unsafe.Pointer(t))
return toType(tt.Key)
}
@@ -50,7 +43,7 @@ func MapOf(key, elem Type) Type {
// Look in known types.
s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
for _, tt := range typesByString(s) {
- mt := (*mapType)(unsafe.Pointer(tt))
+ mt := (*abi.MapType)(unsafe.Pointer(tt))
if mt.Key == ktyp && mt.Elem == etyp {
ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
return ti.(Type)
@@ -63,9 +56,9 @@ func MapOf(key, elem Type) Type {
// Note: flag values must match those used in the TMAP case
// in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
- mt := **(**mapType)(unsafe.Pointer(&imap))
+ mt := **(**abi.MapType)(unsafe.Pointer(&imap))
mt.Str = resolveReflectName(newName(s, "", false, false))
- mt.TFlag = 0
+ mt.TFlag = abi.TFlagDirectIface
mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
mt.Key = ktyp
mt.Elem = etyp
@@ -78,16 +71,16 @@ func MapOf(key, elem Type) Type {
mt.ElemOff = slot.Field(1).Offset
mt.Flags = 0
if needKeyUpdate(ktyp) {
- mt.Flags |= abi.SwissMapNeedKeyUpdate
+ mt.Flags |= abi.MapNeedKeyUpdate
}
if hashMightPanic(ktyp) {
- mt.Flags |= abi.SwissMapHashMightPanic
+ mt.Flags |= abi.MapHashMightPanic
}
- if ktyp.Size_ > abi.SwissMapMaxKeyBytes {
- mt.Flags |= abi.SwissMapIndirectKey
+ if ktyp.Size_ > abi.MapMaxKeyBytes {
+ mt.Flags |= abi.MapIndirectKey
}
- if etyp.Size_ > abi.SwissMapMaxKeyBytes {
- mt.Flags |= abi.SwissMapIndirectElem
+ if etyp.Size_ > abi.MapMaxKeyBytes {
+ mt.Flags |= abi.MapIndirectElem
}
mt.PtrToThis = 0
@@ -98,16 +91,16 @@ func MapOf(key, elem Type) Type {
func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
// type group struct {
// ctrl uint64
- // slots [abi.SwissMapGroupSlots]struct {
+ // slots [abi.MapGroupSlots]struct {
// key keyType
// elem elemType
// }
// }
- if ktyp.Size() > abi.SwissMapMaxKeyBytes {
+ if ktyp.Size() > abi.MapMaxKeyBytes {
ktyp = PointerTo(ktyp)
}
- if etyp.Size() > abi.SwissMapMaxElemBytes {
+ if etyp.Size() > abi.MapMaxElemBytes {
etyp = PointerTo(etyp)
}
@@ -130,7 +123,7 @@ func groupAndSlotOf(ktyp, etyp Type) (Type, Type) {
},
{
Name: "Slots",
- Type: ArrayOf(abi.SwissMapGroupSlots, slot),
+ Type: ArrayOf(abi.MapGroupSlots, slot),
},
}
group := StructOf(fields)
@@ -145,7 +138,7 @@ var stringType = rtypeOf("")
// As in Go, the key's value must be assignable to the map's key type.
func (v Value) MapIndex(key Value) Value {
v.mustBe(Map)
- tt := (*mapType)(unsafe.Pointer(v.typ()))
+ tt := (*abi.MapType)(unsafe.Pointer(v.typ()))
// Do not require key to be exported, so that DeepEqual
// and other programs can use all the keys returned by
@@ -156,7 +149,7 @@ func (v Value) MapIndex(key Value) Value {
// of unexported fields.
var e unsafe.Pointer
- if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+ if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
k := *(*string)(key.ptr)
e = mapaccess_faststr(v.typ(), v.pointer(), k)
} else {
@@ -181,7 +174,7 @@ func (v Value) MapIndex(key Value) Value {
// Equivalent to runtime.mapIterStart.
//
//go:noinline
-func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) {
+func mapIterStart(t *abi.MapType, m *maps.Map, it *maps.Iter) {
if race.Enabled && m != nil {
callerpc := sys.GetCallerPC()
race.ReadPC(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart))
@@ -209,7 +202,7 @@ func mapIterNext(it *maps.Iter) {
// It returns an empty slice if v represents a nil map.
func (v Value) MapKeys() []Value {
v.mustBe(Map)
- tt := (*mapType)(unsafe.Pointer(v.typ()))
+ tt := (*abi.MapType)(unsafe.Pointer(v.typ()))
keyType := tt.Key
fl := v.flag.ro() | flag(keyType.Kind())
@@ -248,10 +241,6 @@ type MapIter struct {
hiter maps.Iter
}
-// TODO(prattmic): only for sharing the linkname declarations with old maps.
-// Remove with old maps.
-type hiter = maps.Iter
-
// Key returns the key of iter's current map entry.
func (iter *MapIter) Key() Value {
if !iter.hiter.Initialized() {
@@ -262,7 +251,7 @@ func (iter *MapIter) Key() Value {
panic("MapIter.Key called on exhausted iterator")
}
- t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+ t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
ktype := t.Key
return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
}
@@ -287,7 +276,7 @@ func (v Value) SetIterKey(iter *MapIter) {
target = v.ptr
}
- t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+ t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
ktype := t.Key
iter.m.mustBeExported() // do not let unexported m leak
@@ -306,7 +295,7 @@ func (iter *MapIter) Value() Value {
panic("MapIter.Value called on exhausted iterator")
}
- t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+ t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
vtype := t.Elem
return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
}
@@ -331,7 +320,7 @@ func (v Value) SetIterValue(iter *MapIter) {
target = v.ptr
}
- t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+ t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
vtype := t.Elem
iter.m.mustBeExported() // do not let unexported m leak
@@ -348,7 +337,7 @@ func (iter *MapIter) Next() bool {
panic("MapIter.Next called on an iterator that does not have an associated map Value")
}
if !iter.hiter.Initialized() {
- t := (*mapType)(unsafe.Pointer(iter.m.typ()))
+ t := (*abi.MapType)(unsafe.Pointer(iter.m.typ()))
m := (*maps.Map)(iter.m.pointer())
mapIterStart(t, m, &iter.hiter)
} else {
@@ -408,9 +397,9 @@ func (v Value) SetMapIndex(key, elem Value) {
v.mustBe(Map)
v.mustBeExported()
key.mustBeExported()
- tt := (*mapType)(unsafe.Pointer(v.typ()))
+ tt := (*abi.MapType)(unsafe.Pointer(v.typ()))
- if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes {
+ if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes {
k := *(*string)(key.ptr)
if elem.typ() == nil {
mapdelete_faststr(v.typ(), v.pointer(), k)
diff --git a/src/reflect/map_noswiss.go b/src/reflect/map_noswiss.go
deleted file mode 100644
index 19696a4f4b..0000000000
--- a/src/reflect/map_noswiss.go
+++ /dev/null
@@ -1,484 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package reflect
-
-import (
- "internal/abi"
- "internal/goarch"
- "unsafe"
-)
-
-// mapType represents a map type.
-type mapType struct {
- abi.OldMapType
-}
-
-// Pushed from runtime.
-
-//go:noescape
-func mapiterinit(t *abi.Type, m unsafe.Pointer, it *hiter)
-
-//go:noescape
-func mapiternext(it *hiter)
-
-func (t *rtype) Key() Type {
- if t.Kind() != Map {
- panic("reflect: Key of non-map type " + t.String())
- }
- tt := (*mapType)(unsafe.Pointer(t))
- return toType(tt.Key)
-}
-
-// MapOf returns the map type with the given key and element types.
-// For example, if k represents int and e represents string,
-// MapOf(k, e) represents map[int]string.
-//
-// If the key type is not a valid map key type (that is, if it does
-// not implement Go's == operator), MapOf panics.
-func MapOf(key, elem Type) Type {
- ktyp := key.common()
- etyp := elem.common()
-
- if ktyp.Equal == nil {
- panic("reflect.MapOf: invalid key type " + stringFor(ktyp))
- }
-
- // Look in cache.
- ckey := cacheKey{Map, ktyp, etyp, 0}
- if mt, ok := lookupCache.Load(ckey); ok {
- return mt.(Type)
- }
-
- // Look in known types.
- s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp)
- for _, tt := range typesByString(s) {
- mt := (*mapType)(unsafe.Pointer(tt))
- if mt.Key == ktyp && mt.Elem == etyp {
- ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt))
- return ti.(Type)
- }
- }
-
- // Make a map type.
- // Note: flag values must match those used in the TMAP case
- // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
- var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
- mt := **(**mapType)(unsafe.Pointer(&imap))
- mt.Str = resolveReflectName(newName(s, "", false, false))
- mt.TFlag = 0
- mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash))
- mt.Key = ktyp
- mt.Elem = etyp
- mt.Bucket = bucketOf(ktyp, etyp)
- mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
- return typehash(ktyp, p, seed)
- }
- mt.Flags = 0
- if ktyp.Size_ > abi.OldMapMaxKeyBytes {
- mt.KeySize = uint8(goarch.PtrSize)
- mt.Flags |= 1 // indirect key
- } else {
- mt.KeySize = uint8(ktyp.Size_)
- }
- if etyp.Size_ > abi.OldMapMaxElemBytes {
- mt.ValueSize = uint8(goarch.PtrSize)
- mt.Flags |= 2 // indirect value
- } else {
- mt.ValueSize = uint8(etyp.Size_)
- }
- mt.BucketSize = uint16(mt.Bucket.Size_)
- if isReflexive(ktyp) {
- mt.Flags |= 4
- }
- if needKeyUpdate(ktyp) {
- mt.Flags |= 8
- }
- if hashMightPanic(ktyp) {
- mt.Flags |= 16
- }
- mt.PtrToThis = 0
-
- ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type))
- return ti.(Type)
-}
-
-func bucketOf(ktyp, etyp *abi.Type) *abi.Type {
- if ktyp.Size_ > abi.OldMapMaxKeyBytes {
- ktyp = ptrTo(ktyp)
- }
- if etyp.Size_ > abi.OldMapMaxElemBytes {
- etyp = ptrTo(etyp)
- }
-
- // Prepare GC data if any.
- // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes,
- // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap.
- // Note that since the key and value are known to be <= 128 bytes,
- // they're guaranteed to have bitmaps instead of GC programs.
- var gcdata *byte
- var ptrdata uintptr
-
- size := abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
- if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 {
- panic("reflect: bad size computation in MapOf")
- }
-
- if ktyp.Pointers() || etyp.Pointers() {
- nptr := (abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
- n := (nptr + 7) / 8
-
- // Runtime needs pointer masks to be a multiple of uintptr in size.
- n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
- mask := make([]byte, n)
- base := uintptr(abi.OldMapBucketCount / goarch.PtrSize)
-
- if ktyp.Pointers() {
- emitGCMask(mask, base, ktyp, abi.OldMapBucketCount)
- }
- base += abi.OldMapBucketCount * ktyp.Size_ / goarch.PtrSize
-
- if etyp.Pointers() {
- emitGCMask(mask, base, etyp, abi.OldMapBucketCount)
- }
- base += abi.OldMapBucketCount * etyp.Size_ / goarch.PtrSize
-
- word := base
- mask[word/8] |= 1 << (word % 8)
- gcdata = &mask[0]
- ptrdata = (word + 1) * goarch.PtrSize
-
- // overflow word must be last
- if ptrdata != size {
- panic("reflect: bad layout computation in MapOf")
- }
- }
-
- b := &abi.Type{
- Align_: goarch.PtrSize,
- Size_: size,
- Kind_: abi.Struct,
- PtrBytes: ptrdata,
- GCData: gcdata,
- }
- s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")"
- b.Str = resolveReflectName(newName(s, "", false, false))
- return b
-}
-
-var stringType = rtypeOf("")
-
-// MapIndex returns the value associated with key in the map v.
-// It panics if v's Kind is not [Map].
-// It returns the zero Value if key is not found in the map or if v represents a nil map.
-// As in Go, the key's value must be assignable to the map's key type.
-func (v Value) MapIndex(key Value) Value {
- v.mustBe(Map)
- tt := (*mapType)(unsafe.Pointer(v.typ()))
-
- // Do not require key to be exported, so that DeepEqual
- // and other programs can use all the keys returned by
- // MapKeys as arguments to MapIndex. If either the map
- // or the key is unexported, though, the result will be
- // considered unexported. This is consistent with the
- // behavior for structs, which allow read but not write
- // of unexported fields.
-
- var e unsafe.Pointer
- if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes {
- k := *(*string)(key.ptr)
- e = mapaccess_faststr(v.typ(), v.pointer(), k)
- } else {
- key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil)
- var k unsafe.Pointer
- if key.flag&flagIndir != 0 {
- k = key.ptr
- } else {
- k = unsafe.Pointer(&key.ptr)
- }
- e = mapaccess(v.typ(), v.pointer(), k)
- }
- if e == nil {
- return Value{}
- }
- typ := tt.Elem
- fl := (v.flag | key.flag).ro()
- fl |= flag(typ.Kind())
- return copyVal(typ, fl, e)
-}
-
-// MapKeys returns a slice containing all the keys present in the map,
-// in unspecified order.
-// It panics if v's Kind is not [Map].
-// It returns an empty slice if v represents a nil map.
-func (v Value) MapKeys() []Value {
- v.mustBe(Map)
- tt := (*mapType)(unsafe.Pointer(v.typ()))
- keyType := tt.Key
-
- fl := v.flag.ro() | flag(keyType.Kind())
-
- m := v.pointer()
- mlen := int(0)
- if m != nil {
- mlen = maplen(m)
- }
- var it hiter
- mapiterinit(v.typ(), m, &it)
- a := make([]Value, mlen)
- var i int
- for i = 0; i < len(a); i++ {
- key := it.key
- if key == nil {
- // Someone deleted an entry from the map since we
- // called maplen above. It's a data race, but nothing
- // we can do about it.
- break
- }
- a[i] = copyVal(keyType, fl, key)
- mapiternext(&it)
- }
- return a[:i]
-}
-
-// hiter's structure matches runtime.hiter's structure.
-// Having a clone here allows us to embed a map iterator
-// inside type MapIter so that MapIters can be re-used
-// without doing any allocations.
-type hiter struct {
- key unsafe.Pointer
- elem unsafe.Pointer
- t unsafe.Pointer
- h unsafe.Pointer
- buckets unsafe.Pointer
- bptr unsafe.Pointer
- overflow *[]unsafe.Pointer
- oldoverflow *[]unsafe.Pointer
- startBucket uintptr
- offset uint8
- wrapped bool
- B uint8
- i uint8
- bucket uintptr
- checkBucket uintptr
- clearSeq uint64
-}
-
-func (h *hiter) initialized() bool {
- return h.t != nil
-}
-
-// A MapIter is an iterator for ranging over a map.
-// See [Value.MapRange].
-type MapIter struct {
- m Value
- hiter hiter
-}
-
-// Key returns the key of iter's current map entry.
-func (iter *MapIter) Key() Value {
- if !iter.hiter.initialized() {
- panic("MapIter.Key called before Next")
- }
- iterkey := iter.hiter.key
- if iterkey == nil {
- panic("MapIter.Key called on exhausted iterator")
- }
-
- t := (*mapType)(unsafe.Pointer(iter.m.typ()))
- ktype := t.Key
- return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey)
-}
-
-// SetIterKey assigns to v the key of iter's current map entry.
-// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value.
-// As in Go, the key must be assignable to v's type and
-// must not be derived from an unexported field.
-// It panics if [Value.CanSet] returns false.
-func (v Value) SetIterKey(iter *MapIter) {
- if !iter.hiter.initialized() {
- panic("reflect: Value.SetIterKey called before Next")
- }
- iterkey := iter.hiter.key
- if iterkey == nil {
- panic("reflect: Value.SetIterKey called on exhausted iterator")
- }
-
- v.mustBeAssignable()
- var target unsafe.Pointer
- if v.kind() == Interface {
- target = v.ptr
- }
-
- t := (*mapType)(unsafe.Pointer(iter.m.typ()))
- ktype := t.Key
-
- iter.m.mustBeExported() // do not let unexported m leak
- key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir}
- key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target)
- typedmemmove(v.typ(), v.ptr, key.ptr)
-}
-
-// Value returns the value of iter's current map entry.
-func (iter *MapIter) Value() Value {
- if !iter.hiter.initialized() {
- panic("MapIter.Value called before Next")
- }
- iterelem := iter.hiter.elem
- if iterelem == nil {
- panic("MapIter.Value called on exhausted iterator")
- }
-
- t := (*mapType)(unsafe.Pointer(iter.m.typ()))
- vtype := t.Elem
- return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem)
-}
-
-// SetIterValue assigns to v the value of iter's current map entry.
-// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value.
-// As in Go, the value must be assignable to v's type and
-// must not be derived from an unexported field.
-// It panics if [Value.CanSet] returns false.
-func (v Value) SetIterValue(iter *MapIter) {
- if !iter.hiter.initialized() {
- panic("reflect: Value.SetIterValue called before Next")
- }
- iterelem := iter.hiter.elem
- if iterelem == nil {
- panic("reflect: Value.SetIterValue called on exhausted iterator")
- }
-
- v.mustBeAssignable()
- var target unsafe.Pointer
- if v.kind() == Interface {
- target = v.ptr
- }
-
- t := (*mapType)(unsafe.Pointer(iter.m.typ()))
- vtype := t.Elem
-
- iter.m.mustBeExported() // do not let unexported m leak
- elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir}
- elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target)
- typedmemmove(v.typ(), v.ptr, elem.ptr)
-}
-
-// Next advances the map iterator and reports whether there is another
-// entry. It returns false when iter is exhausted; subsequent
-// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic.
-func (iter *MapIter) Next() bool {
- if !iter.m.IsValid() {
- panic("MapIter.Next called on an iterator that does not have an associated map Value")
- }
- if !iter.hiter.initialized() {
- mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter)
- } else {
- if iter.hiter.key == nil {
- panic("MapIter.Next called on exhausted iterator")
- }
- mapiternext(&iter.hiter)
- }
- return iter.hiter.key != nil
-}
-
-// Reset modifies iter to iterate over v.
-// It panics if v's Kind is not [Map] and v is not the zero Value.
-// Reset(Value{}) causes iter to not to refer to any map,
-// which may allow the previously iterated-over map to be garbage collected.
-func (iter *MapIter) Reset(v Value) {
- if v.IsValid() {
- v.mustBe(Map)
- }
- iter.m = v
- iter.hiter = hiter{}
-}
-
-// MapRange returns a range iterator for a map.
-// It panics if v's Kind is not [Map].
-//
-// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry.
-// [MapIter.Next] returns false when the iterator is exhausted.
-// MapRange follows the same iteration semantics as a range statement.
-//
-// Example:
-//
-// iter := reflect.ValueOf(m).MapRange()
-// for iter.Next() {
-// k := iter.Key()
-// v := iter.Value()
-// ...
-// }
-func (v Value) MapRange() *MapIter {
- // This is inlinable to take advantage of "function outlining".
- // The allocation of MapIter can be stack allocated if the caller
- // does not allow it to escape.
- // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/
- if v.kind() != Map {
- v.panicNotMap()
- }
- return &MapIter{m: v}
-}
-
-// SetMapIndex sets the element associated with key in the map v to elem.
-// It panics if v's Kind is not [Map].
-// If elem is the zero Value, SetMapIndex deletes the key from the map.
-// Otherwise if v holds a nil map, SetMapIndex will panic.
-// As in Go, key's elem must be assignable to the map's key type,
-// and elem's value must be assignable to the map's elem type.
-func (v Value) SetMapIndex(key, elem Value) {
- v.mustBe(Map)
- v.mustBeExported()
- key.mustBeExported()
- tt := (*mapType)(unsafe.Pointer(v.typ()))
-
- if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes {
- k := *(*string)(key.ptr)
- if elem.typ() == nil {
- mapdelete_faststr(v.typ(), v.pointer(), k)
- return
- }
- elem.mustBeExported()
- elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
- var e unsafe.Pointer
- if elem.flag&flagIndir != 0 {
- e = elem.ptr
- } else {
- e = unsafe.Pointer(&elem.ptr)
- }
- mapassign_faststr(v.typ(), v.pointer(), k, e)
- return
- }
-
- key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil)
- var k unsafe.Pointer
- if key.flag&flagIndir != 0 {
- k = key.ptr
- } else {
- k = unsafe.Pointer(&key.ptr)
- }
- if elem.typ() == nil {
- mapdelete(v.typ(), v.pointer(), k)
- return
- }
- elem.mustBeExported()
- elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil)
- var e unsafe.Pointer
- if elem.flag&flagIndir != 0 {
- e = elem.ptr
- } else {
- e = unsafe.Pointer(&elem.ptr)
- }
- mapassign(v.typ(), v.pointer(), k, e)
-}
-
-// Force slow panicking path not inlined, so it won't add to the
-// inlining budget of the caller.
-// TODO: undo when the inliner is no longer bottom-up only.
-//
-//go:noinline
-func (f flag) panicNotMap() {
- f.mustBe(Map)
-}
diff --git a/src/reflect/map_noswiss_test.go b/src/reflect/map_noswiss_test.go
deleted file mode 100644
index 52fcf89535..0000000000
--- a/src/reflect/map_noswiss_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package reflect_test
-
-import (
- "internal/abi"
- "internal/goarch"
- . "reflect"
- "testing"
-)
-
-func testGCBitsMap(t *testing.T) {
- const bucketCount = abi.OldMapBucketCount
-
- hdr := make([]byte, bucketCount/goarch.PtrSize)
-
- verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) {
- verifyGCBits(t, MapBucketOf(k, e), want)
- verifyGCBits(t, CachedBucketOf(TypeOf(m)), want)
- }
- verifyMapBucket(t,
- Tscalar, Tptr,
- map[Xscalar]Xptr(nil),
- join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
- Tscalarptr, Tptr,
- map[Xscalarptr]Xptr(nil),
- join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t, Tint64, Tptr,
- map[int64]Xptr(nil),
- join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
- Tscalar, Tscalar,
- map[Xscalar]Xscalar(nil),
- empty)
- verifyMapBucket(t,
- ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar),
- map[[2]Xscalarptr][3]Xptrscalar(nil),
- join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
- map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
- join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar),
- map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil),
- join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
- map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
- join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1)))
- verifyMapBucket(t,
- ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar),
- map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil),
- join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1)))
-}
diff --git a/src/reflect/map_swiss_test.go b/src/reflect/map_test.go
index 621140aa60..621b5fdd73 100644
--- a/src/reflect/map_swiss_test.go
+++ b/src/reflect/map_test.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.swissmap
-
package reflect_test
import (
@@ -11,11 +9,6 @@ import (
"testing"
)
-func testGCBitsMap(t *testing.T) {
- // Unlike old maps, we don't manually construct GC data for swiss maps,
- // instead using the public reflect API in groupAndSlotOf.
-}
-
// See also runtime_test.TestGroupSizeZero.
func TestGroupSizeZero(t *testing.T) {
st := reflect.TypeFor[struct{}]()
diff --git a/src/reflect/type.go b/src/reflect/type.go
index 0004cab985..cec8662c01 100644
--- a/src/reflect/type.go
+++ b/src/reflect/type.go
@@ -1813,7 +1813,7 @@ func ChanOf(dir ChanDir, t Type) Type {
var ichan any = (chan unsafe.Pointer)(nil)
prototype := *(**chanType)(unsafe.Pointer(&ichan))
ch := *prototype
- ch.TFlag = abi.TFlagRegularMemory
+ ch.TFlag = abi.TFlagRegularMemory | abi.TFlagDirectIface
ch.Dir = abi.ChanDir(dir)
ch.Str = resolveReflectName(newName(s, "", false, false))
ch.Hash = fnv1(typ.Hash, 'c', byte(dir))
@@ -1894,7 +1894,7 @@ func FuncOf(in, out []Type, variadic bool) Type {
hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash))
}
- ft.TFlag = 0
+ ft.TFlag = abi.TFlagDirectIface
ft.Hash = hash
ft.InCount = uint16(len(in))
ft.OutCount = uint16(len(out))
@@ -2313,7 +2313,7 @@ func StructOf(fields []StructField) Type {
// Issue 15924.
panic("reflect: embedded type with methods not implemented if type is not first field")
}
- if len(fields) > 1 && ft.Kind_&abi.KindDirectIface != 0 {
+ if len(fields) > 1 && ft.IsDirectIface() {
panic("reflect: embedded type with methods not implemented for non-pointer type")
}
for _, m := range unt.Methods() {
@@ -2524,11 +2524,11 @@ func StructOf(fields []StructField) Type {
}
switch {
- case len(fs) == 1 && !fs[0].Typ.IfaceIndir():
+ case len(fs) == 1 && fs[0].Typ.IsDirectIface():
// structs of 1 direct iface type can be direct
- typ.Kind_ |= abi.KindDirectIface
+ typ.TFlag |= abi.TFlagDirectIface
default:
- typ.Kind_ &^= abi.KindDirectIface
+ typ.TFlag &^= abi.TFlagDirectIface
}
return addToCache(toType(&typ.Type))
@@ -2694,11 +2694,11 @@ func ArrayOf(length int, elem Type) Type {
}
switch {
- case length == 1 && !typ.IfaceIndir():
+ case length == 1 && typ.IsDirectIface():
// array of 1 direct iface type can be direct
- array.Kind_ |= abi.KindDirectIface
+ array.TFlag |= abi.TFlagDirectIface
default:
- array.Kind_ &^= abi.KindDirectIface
+ array.TFlag &^= abi.TFlagDirectIface
}
ti, _ := lookupCache.LoadOrStore(ckey, toRType(&array.Type))
@@ -2834,7 +2834,7 @@ func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) {
return
}
- switch Kind(t.Kind_ & abi.KindMask) {
+ switch Kind(t.Kind()) {
case Chan, Func, Map, Pointer, Slice, String, UnsafePointer:
// 1 pointer at start of representation
for bv.n < uint32(offset/goarch.PtrSize) {
diff --git a/src/reflect/value.go b/src/reflect/value.go
index ffdf789648..6f65ef81dc 100644
--- a/src/reflect/value.go
+++ b/src/reflect/value.go
@@ -55,7 +55,7 @@ type Value struct {
// - flagIndir: val holds a pointer to the data
// - flagAddr: v.CanAddr is true (implies flagIndir and ptr is non-nil)
// - flagMethod: v is a method value.
- // If ifaceIndir(typ), code can assume that flagIndir is set.
+ // If !typ.IsDirectIface(), code can assume that flagIndir is set.
//
// The remaining 22+ bits give a method number for method values.
// If flag.kind() != Func, code can assume that flagMethod is unset.
@@ -125,7 +125,7 @@ func packEface(v Value) any {
e := abi.EmptyInterface{}
// First, fill in the data portion of the interface.
switch {
- case t.IfaceIndir():
+ case !t.IsDirectIface():
if v.flag&flagIndir == 0 {
panic("bad indir")
}
@@ -159,7 +159,7 @@ func unpackEface(i any) Value {
return Value{}
}
f := flag(t.Kind())
- if t.IfaceIndir() {
+ if !t.IsDirectIface() {
f |= flagIndir
}
return Value{t, e.Data, f}
@@ -624,7 +624,7 @@ func (v Value) call(op string, in []Value) []Value {
}
// Handle pointers passed in registers.
- if !tv.IfaceIndir() {
+ if tv.IsDirectIface() {
// Pointer-valued data gets put directly
// into v.ptr.
if steps[0].kind != abiStepPointer {
@@ -714,7 +714,7 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs
v := Value{typ, nil, flag(typ.Kind())}
steps := abid.call.stepsForValue(i)
if st := steps[0]; st.kind == abiStepStack {
- if typ.IfaceIndir() {
+ if !typ.IsDirectIface() {
// value cannot be inlined in interface data.
// Must make a copy, because f might keep a reference to it,
// and we cannot let f keep a reference to the stack frame
@@ -728,7 +728,7 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs
v.ptr = *(*unsafe.Pointer)(add(ptr, st.stkOff, "1-ptr"))
}
} else {
- if typ.IfaceIndir() {
+ if !typ.IsDirectIface() {
// All that's left is values passed in registers that we need to
// create space for the values.
v.flag |= flagIndir
@@ -914,7 +914,7 @@ func storeRcvr(v Value, p unsafe.Pointer) {
// the interface data word becomes the receiver word
iface := (*nonEmptyInterface)(v.ptr)
*(*unsafe.Pointer)(p) = iface.word
- } else if v.flag&flagIndir != 0 && !t.IfaceIndir() {
+ } else if v.flag&flagIndir != 0 && t.IsDirectIface() {
*(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr)
} else {
*(*unsafe.Pointer)(p) = v.ptr
@@ -1224,7 +1224,7 @@ func (v Value) Elem() Value {
case Pointer:
ptr := v.ptr
if v.flag&flagIndir != 0 {
- if v.typ().IfaceIndir() {
+ if !v.typ().IsDirectIface() {
// This is a pointer to a not-in-heap object. ptr points to a uintptr
// in the heap. That uintptr is the address of a not-in-heap object.
// In general, pointers to not-in-heap objects can be total junk.
@@ -1852,7 +1852,7 @@ func (v Value) lenNonSlice() int {
// copyVal returns a Value containing the map key or value at ptr,
// allocating a new variable as needed.
func copyVal(typ *abi.Type, fl flag, ptr unsafe.Pointer) Value {
- if typ.IfaceIndir() {
+ if !typ.IsDirectIface() {
// Copy result so future changes to the map
// won't change the underlying value.
c := unsafe_New(typ)
@@ -2076,7 +2076,7 @@ func (v Value) recv(nb bool) (val Value, ok bool) {
t := tt.Elem
val = Value{t, nil, flag(t.Kind())}
var p unsafe.Pointer
- if t.IfaceIndir() {
+ if !t.IsDirectIface() {
p = unsafe_New(t)
val.ptr = p
val.flag |= flagIndir
@@ -2952,7 +2952,7 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
t := tt.Elem
p := runcases[chosen].val
fl := flag(t.Kind())
- if t.IfaceIndir() {
+ if !t.IsDirectIface() {
recv = Value{t, p, fl | flagIndir}
} else {
recv = Value{t, *(*unsafe.Pointer)(p), fl}
@@ -3065,7 +3065,7 @@ func Zero(typ Type) Value {
}
t := &typ.(*rtype).t
fl := flag(t.Kind())
- if t.IfaceIndir() {
+ if !t.IsDirectIface() {
var p unsafe.Pointer
if t.Size() <= abi.ZeroValSize {
p = unsafe.Pointer(&zeroVal[0])
@@ -3088,7 +3088,7 @@ func New(typ Type) Value {
}
t := &typ.(*rtype).t
pt := ptrTo(t)
- if pt.IfaceIndir() {
+ if !pt.IsDirectIface() {
// This is a pointer to a not-in-heap type.
panic("reflect: New of type that may not be allocated in heap (possibly undefined cgo C type)")
}
diff --git a/src/regexp/find_test.go b/src/regexp/find_test.go
index 2edbe9b86e..49e9619cef 100644
--- a/src/regexp/find_test.go
+++ b/src/regexp/find_test.go
@@ -98,6 +98,8 @@ var findTests = []FindTest{
{`\B`, "x y", nil},
{`\B`, "xx yy", build(2, 1, 1, 4, 4)},
{`(|a)*`, "aa", build(3, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2)},
+ {`0A|0[aA]`, "0a", build(1, 0, 2)},
+ {`0[aA]|0A`, "0a", build(1, 0, 2)},
// RE2 tests
{`[^\S\s]`, "abcd", nil},
diff --git a/src/regexp/syntax/regexp.go b/src/regexp/syntax/regexp.go
index f15d205123..499492884e 100644
--- a/src/regexp/syntax/regexp.go
+++ b/src/regexp/syntax/regexp.go
@@ -76,7 +76,7 @@ func (x *Regexp) Equal(y *Regexp) bool {
}
case OpLiteral, OpCharClass:
- return slices.Equal(x.Rune, y.Rune)
+ return x.Flags&FoldCase == y.Flags&FoldCase && slices.Equal(x.Rune, y.Rune)
case OpAlternate, OpConcat:
return slices.EqualFunc(x.Sub, y.Sub, (*Regexp).Equal)
diff --git a/src/runtime/alg.go b/src/runtime/alg.go
index df32bc7941..b956f9d05a 100644
--- a/src/runtime/alg.go
+++ b/src/runtime/alg.go
@@ -144,7 +144,7 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr {
// we want to report the struct, not the slice).
panic(errorString("hash of unhashable type " + toRType(t).string()))
}
- if isDirectIface(t) {
+ if t.IsDirectIface() {
return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
return c1 * typehash(t, a.data, h^c0)
@@ -171,7 +171,7 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
// See comment in interhash above.
panic(errorString("hash of unhashable type " + toRType(t).string()))
}
- if isDirectIface(t) {
+ if t.IsDirectIface() {
return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
return c1 * typehash(t, a.data, h^c0)
@@ -211,7 +211,7 @@ func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, t.Size_)
}
}
- switch t.Kind_ & abi.KindMask {
+ switch t.Kind() {
case abi.Float32:
return f32hash(p, h)
case abi.Float64:
@@ -306,7 +306,7 @@ func efaceeq(t *_type, x, y unsafe.Pointer) bool {
if eq == nil {
panic(errorString("comparing uncomparable type " + toRType(t).string()))
}
- if isDirectIface(t) {
+ if t.IsDirectIface() {
// Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof.
// Maps and funcs are not comparable, so they can't reach here.
// Ptrs, chans, and single-element items can be compared directly using ==.
@@ -323,7 +323,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool {
if eq == nil {
panic(errorString("comparing uncomparable type " + toRType(t).string()))
}
- if isDirectIface(t) {
+ if t.IsDirectIface() {
// See comment in efaceeq.
return x == y
}
diff --git a/src/runtime/arena.go b/src/runtime/arena.go
index e807995810..52a2a99d6c 100644
--- a/src/runtime/arena.go
+++ b/src/runtime/arena.go
@@ -111,7 +111,7 @@ func arena_newArena() unsafe.Pointer {
//go:linkname arena_arena_New arena.runtime_arena_arena_New
func arena_arena_New(arena unsafe.Pointer, typ any) any {
t := (*_type)(efaceOf(&typ).data)
- if t.Kind_&abi.KindMask != abi.Pointer {
+ if t.Kind() != abi.Pointer {
throw("arena_New: non-pointer type")
}
te := (*ptrtype)(unsafe.Pointer(t)).Elem
@@ -145,7 +145,7 @@ func arena_heapify(s any) any {
var v unsafe.Pointer
e := efaceOf(&s)
t := e._type
- switch t.Kind_ & abi.KindMask {
+ switch t.Kind() {
case abi.String:
v = stringStructOf((*string)(e.data)).str
case abi.Slice:
@@ -162,7 +162,7 @@ func arena_heapify(s any) any {
}
// Heap-allocate storage for a copy.
var x any
- switch t.Kind_ & abi.KindMask {
+ switch t.Kind() {
case abi.String:
s1 := s.(string)
s2, b := rawstring(len(s1))
@@ -293,11 +293,11 @@ func (a *userArena) slice(sl any, cap int) {
}
i := efaceOf(&sl)
typ := i._type
- if typ.Kind_&abi.KindMask != abi.Pointer {
+ if typ.Kind() != abi.Pointer {
panic("slice result of non-ptr type")
}
typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
- if typ.Kind_&abi.KindMask != abi.Slice {
+ if typ.Kind() != abi.Slice {
panic("slice of non-ptr-to-slice type")
}
typ = (*slicetype)(unsafe.Pointer(typ)).Elem
@@ -745,7 +745,9 @@ func newUserArenaChunk() (unsafe.Pointer, *mspan) {
// does represent additional work for the GC, but we also have no idea
// what that looks like until we actually allocate things into the
// arena).
- deductAssistCredit(userArenaChunkBytes)
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(userArenaChunkBytes)
+ }
// Set mp.mallocing to keep from being preempted by GC.
mp := acquirem()
diff --git a/src/runtime/asan_riscv64.s b/src/runtime/asan_riscv64.s
index eb76e61ffb..5a333361dd 100644
--- a/src/runtime/asan_riscv64.s
+++ b/src/runtime/asan_riscv64.s
@@ -81,13 +81,13 @@ TEXT asancall<>(SB), NOSPLIT, $0-0
MOV g_m(g), X21
// Switch to g0 stack if we aren't already on g0 or gsignal.
- MOV m_gsignal(X21), X21
- BEQ X21, g, call
+ MOV m_gsignal(X21), X22
+ BEQ X22, g, call
- MOV m_g0(X21), X21
- BEQ X21, g, call
+ MOV m_g0(X21), X22
+ BEQ X22, g, call
- MOV (g_sched+gobuf_sp)(X21), X2
+ MOV (g_sched+gobuf_sp)(X22), X2
call:
JALR RA, X14
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index 62ab83985f..df32e90fda 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -1509,161 +1509,47 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0
MOVL $32, DI
JMP gcWriteBarrier<>(SB)
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-8
- MOVL AX, x+0(FP)
- MOVL CX, y+4(FP)
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8
- MOVL AX, x+0(FP)
- MOVL CX, y+4(FP)
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8
- MOVL CX, x+0(FP)
- MOVL DX, y+4(FP)
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8
- MOVL CX, x+0(FP)
- MOVL DX, y+4(FP)
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8
- MOVL CX, x+0(FP)
- MOVL DX, y+4(FP)
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8
- MOVL CX, x+0(FP)
- MOVL DX, y+4(FP)
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8
- MOVL AX, x+0(FP)
- MOVL CX, y+4(FP)
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8
- MOVL AX, x+0(FP)
- MOVL CX, y+4(FP)
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8
- MOVL DX, x+0(FP)
- MOVL BX, y+4(FP)
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8
- MOVL DX, x+0(FP)
- MOVL BX, y+4(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8
- MOVL DX, x+0(FP)
- MOVL BX, y+4(FP)
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8
- MOVL DX, x+0(FP)
- MOVL BX, y+4(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8
- MOVL CX, x+0(FP)
- MOVL DX, y+4(FP)
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8
- MOVL CX, x+0(FP)
- MOVL DX, y+4(FP)
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8
- MOVL AX, x+0(FP)
- MOVL CX, y+4(FP)
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8
- MOVL AX, x+0(FP)
- MOVL CX, y+4(FP)
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-8
- MOVL DX, x+0(FP)
- MOVL BX, y+4(FP)
- JMP runtime·goPanicSliceConvert(SB)
+TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$40-0
+ NO_LOCAL_POINTERS
+ // Save all int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ MOVL AX, 8(SP)
+ MOVL CX, 12(SP)
+ MOVL DX, 16(SP)
+ MOVL BX, 20(SP)
+ // skip SP @ 24(SP)
+ MOVL BP, 28(SP)
+ MOVL SI, 32(SP)
+ MOVL DI, 36(SP)
+
+ MOVL SP, AX // hide SP read from vet
+ MOVL 40(AX), AX // PC immediately after call to panicBounds
+ MOVL AX, 0(SP)
+ LEAL 8(SP), AX
+ MOVL AX, 4(SP)
+ CALL runtime·panicBounds32<ABIInternal>(SB)
+ RET
-// Extended versions for 64-bit indexes.
-TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL AX, lo+4(FP)
- MOVL CX, y+8(FP)
- JMP runtime·goPanicExtendIndex(SB)
-TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL AX, lo+4(FP)
- MOVL CX, y+8(FP)
- JMP runtime·goPanicExtendIndexU(SB)
-TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL CX, lo+4(FP)
- MOVL DX, y+8(FP)
- JMP runtime·goPanicExtendSliceAlen(SB)
-TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL CX, lo+4(FP)
- MOVL DX, y+8(FP)
- JMP runtime·goPanicExtendSliceAlenU(SB)
-TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL CX, lo+4(FP)
- MOVL DX, y+8(FP)
- JMP runtime·goPanicExtendSliceAcap(SB)
-TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL CX, lo+4(FP)
- MOVL DX, y+8(FP)
- JMP runtime·goPanicExtendSliceAcapU(SB)
-TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL AX, lo+4(FP)
- MOVL CX, y+8(FP)
- JMP runtime·goPanicExtendSliceB(SB)
-TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL AX, lo+4(FP)
- MOVL CX, y+8(FP)
- JMP runtime·goPanicExtendSliceBU(SB)
-TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL DX, lo+4(FP)
- MOVL BX, y+8(FP)
- JMP runtime·goPanicExtendSlice3Alen(SB)
-TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL DX, lo+4(FP)
- MOVL BX, y+8(FP)
- JMP runtime·goPanicExtendSlice3AlenU(SB)
-TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL DX, lo+4(FP)
- MOVL BX, y+8(FP)
- JMP runtime·goPanicExtendSlice3Acap(SB)
-TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL DX, lo+4(FP)
- MOVL BX, y+8(FP)
- JMP runtime·goPanicExtendSlice3AcapU(SB)
-TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL CX, lo+4(FP)
- MOVL DX, y+8(FP)
- JMP runtime·goPanicExtendSlice3B(SB)
-TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL CX, lo+4(FP)
- MOVL DX, y+8(FP)
- JMP runtime·goPanicExtendSlice3BU(SB)
-TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL AX, lo+4(FP)
- MOVL CX, y+8(FP)
- JMP runtime·goPanicExtendSlice3C(SB)
-TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12
- MOVL SI, hi+0(FP)
- MOVL AX, lo+4(FP)
- MOVL CX, y+8(FP)
- JMP runtime·goPanicExtendSlice3CU(SB)
+TEXT runtime·panicExtend<ABIInternal>(SB),NOSPLIT,$40-0
+ NO_LOCAL_POINTERS
+ // Save all int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ MOVL AX, 8(SP)
+ MOVL CX, 12(SP)
+ MOVL DX, 16(SP)
+ MOVL BX, 20(SP)
+ // skip SP @ 24(SP)
+ MOVL BP, 28(SP)
+ MOVL SI, 32(SP)
+ MOVL DI, 36(SP)
+
+ MOVL SP, AX // hide SP read from vet
+ MOVL 40(AX), AX // PC immediately after call to panicExtend
+ MOVL AX, 0(SP)
+ LEAL 8(SP), AX
+ MOVL AX, 4(SP)
+ CALL runtime·panicBounds32X<ABIInternal>(SB)
+ RET
#ifdef GOOS_android
// Use the free TLS_SLOT_APP slot #2 on Android Q.
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 8983eeafcb..cf1d49a4ad 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -2024,69 +2024,32 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16
BYTE $0xcc
RET
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-// Defined as ABIInternal since they do not use the stack-based Go ABI.
-TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicIndex<ABIInternal>(SB)
-TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicIndexU<ABIInternal>(SB)
-TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
-TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
-TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
-TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
-TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicSliceB<ABIInternal>(SB)
-TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicSliceBU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ DX, AX
- JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
-TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ DX, AX
- JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ DX, AX
- JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
-TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ DX, AX
- JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
-TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSlice3B<ABIInternal>(SB)
-TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, AX
- MOVQ DX, BX
- JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
-TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicSlice3C<ABIInternal>(SB)
-TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ CX, BX
- JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
-TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
- MOVQ DX, AX
- JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
+TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0
+ NO_LOCAL_POINTERS
+ // Save all 14 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ MOVQ AX, 16(SP)
+ MOVQ CX, 24(SP)
+ MOVQ DX, 32(SP)
+ MOVQ BX, 40(SP)
+ // skip SP @ 48(SP)
+ MOVQ BP, 56(SP)
+ MOVQ SI, 64(SP)
+ MOVQ DI, 72(SP)
+ MOVQ R8, 80(SP)
+ MOVQ R9, 88(SP)
+ MOVQ R10, 96(SP)
+ MOVQ R11, 104(SP)
+ MOVQ R12, 112(SP)
+ MOVQ R13, 120(SP)
+ // skip R14 @ 128(SP) (aka G)
+ MOVQ R15, 136(SP)
+
+ MOVQ SP, AX // hide SP read from vet
+ MOVQ 152(AX), AX // PC immediately after call to panicBounds
+ LEAQ 16(SP), BX
+ CALL runtime·panicBounds64<ABIInternal>(SB)
+ RET
#ifdef GOOS_android
// Use the free TLS_SLOT_APP slot #2 on Android Q.
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index ca9f0ced03..742b97f888 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -991,158 +991,56 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0
MOVW $32, R8
JMP gcWriteBarrier<>(SB)
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-8
- MOVW R0, x+0(FP)
- MOVW R1, y+4(FP)
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8
- MOVW R0, x+0(FP)
- MOVW R1, y+4(FP)
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8
- MOVW R0, x+0(FP)
- MOVW R1, y+4(FP)
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8
- MOVW R0, x+0(FP)
- MOVW R1, y+4(FP)
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8
- MOVW R0, x+0(FP)
- MOVW R1, y+4(FP)
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8
- MOVW R0, x+0(FP)
- MOVW R1, y+4(FP)
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSliceConvert(SB)
+TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$72-0
+ NO_LOCAL_POINTERS
+ // Save all int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ MOVW R0, 12(R13)
+ MOVW R1, 16(R13)
+ MOVW R2, 20(R13)
+ MOVW R3, 24(R13)
+ MOVW R4, 28(R13)
+ MOVW R5, 32(R13)
+ MOVW R6, 36(R13)
+ MOVW R7, 40(R13)
+ MOVW R8, 44(R13)
+ MOVW R9, 48(R13)
+ // skip R10 aka G @ 52(R13)
+ // skip R11 aka tmp @ 56(R13)
+ MOVW R12, 60(R13)
+ // skip R13 aka SP @ 64(R13)
+ MOVW R14, 68(R13)
+ // skip R15 aka PC @ 72(R13)
+
+ MOVW R14, 4(R13) // PC immediately after call to panicBounds
+ ADD $12, R13, R0 // pointer to save area
+ MOVW R0, 8(R13)
+ CALL runtime·panicBounds32<ABIInternal>(SB)
+ RET
-// Extended versions for 64-bit indexes.
-TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R0, lo+4(FP)
- MOVW R1, y+8(FP)
- JMP runtime·goPanicExtendIndex(SB)
-TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R0, lo+4(FP)
- MOVW R1, y+8(FP)
- JMP runtime·goPanicExtendIndexU(SB)
-TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendSliceAlen(SB)
-TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendSliceAlenU(SB)
-TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendSliceAcap(SB)
-TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendSliceAcapU(SB)
-TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R0, lo+4(FP)
- MOVW R1, y+8(FP)
- JMP runtime·goPanicExtendSliceB(SB)
-TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R0, lo+4(FP)
- MOVW R1, y+8(FP)
- JMP runtime·goPanicExtendSliceBU(SB)
-TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R2, lo+4(FP)
- MOVW R3, y+8(FP)
- JMP runtime·goPanicExtendSlice3Alen(SB)
-TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R2, lo+4(FP)
- MOVW R3, y+8(FP)
- JMP runtime·goPanicExtendSlice3AlenU(SB)
-TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R2, lo+4(FP)
- MOVW R3, y+8(FP)
- JMP runtime·goPanicExtendSlice3Acap(SB)
-TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R2, lo+4(FP)
- MOVW R3, y+8(FP)
- JMP runtime·goPanicExtendSlice3AcapU(SB)
-TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendSlice3B(SB)
-TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendSlice3BU(SB)
-TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R0, lo+4(FP)
- MOVW R1, y+8(FP)
- JMP runtime·goPanicExtendSlice3C(SB)
-TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12
- MOVW R4, hi+0(FP)
- MOVW R0, lo+4(FP)
- MOVW R1, y+8(FP)
- JMP runtime·goPanicExtendSlice3CU(SB)
+TEXT runtime·panicExtend<ABIInternal>(SB),NOSPLIT,$72-0
+ NO_LOCAL_POINTERS
+ // Save all int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ MOVW R0, 12(R13)
+ MOVW R1, 16(R13)
+ MOVW R2, 20(R13)
+ MOVW R3, 24(R13)
+ MOVW R4, 28(R13)
+ MOVW R5, 32(R13)
+ MOVW R6, 36(R13)
+ MOVW R7, 40(R13)
+ MOVW R8, 44(R13)
+ MOVW R9, 48(R13)
+ // skip R10 aka G @ 52(R13)
+ // skip R11 aka tmp @ 56(R13)
+ MOVW R12, 60(R13)
+ // skip R13 aka SP @ 64(R13)
+ // skip R14 aka LR @ 68(R13)
+ // skip R15 aka PC @ 72(R13)
+
+ MOVW R14, 4(R13) // PC immediately after call to panicExtend
+ ADD $12, R13, R0 // pointer to save area
+ MOVW R0, 8(R13)
+ CALL runtime·panicBounds32X<ABIInternal>(SB)
+ RET
diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index d2261c5160..a0072a3931 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -1574,70 +1574,22 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16
BREAK
RET
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-//
-// Defined as ABIInternal since the compiler generates ABIInternal
-// calls to it directly and it does not use the stack-based Go ABI.
-TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicIndex<ABIInternal>(SB)
-TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicIndexU<ABIInternal>(SB)
-TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R1, R0
- MOVD R2, R1
- JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
-TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R1, R0
- MOVD R2, R1
- JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
-TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R1, R0
- MOVD R2, R1
- JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
-TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R1, R0
- MOVD R2, R1
- JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
-TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSliceB<ABIInternal>(SB)
-TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSliceBU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R2, R0
- MOVD R3, R1
- JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
-TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R2, R0
- MOVD R3, R1
- JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R2, R0
- MOVD R3, R1
- JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
-TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R2, R0
- MOVD R3, R1
- JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
-TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R1, R0
- MOVD R2, R1
- JMP runtime·goPanicSlice3B<ABIInternal>(SB)
-TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R1, R0
- MOVD R2, R1
- JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
-TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSlice3C<ABIInternal>(SB)
-TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
-TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R2, R0
- MOVD R3, R1
- JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
+TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ STP (R0, R1), 24(RSP)
+ STP (R2, R3), 40(RSP)
+ STP (R4, R5), 56(RSP)
+ STP (R6, R7), 72(RSP)
+ STP (R8, R9), 88(RSP)
+ STP (R10, R11), 104(RSP)
+ STP (R12, R13), 120(RSP)
+ STP (R14, R15), 136(RSP)
+ MOVD LR, R0 // PC immediately after call to panicBounds
+ ADD $24, RSP, R1 // pointer to save area
+ CALL runtime·panicBounds64<ABIInternal>(SB)
+ RET
TEXT ·getfp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0
MOVD R29, R0
diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s
index e3b593961a..ee7f825e1f 100644
--- a/src/runtime/asm_loong64.s
+++ b/src/runtime/asm_loong64.s
@@ -70,8 +70,9 @@ nocgo:
// start this M
JAL runtime·mstart(SB)
- // Prevent dead-code elimination of debugCallV2, which is
+ // Prevent dead-code elimination of debugCallV2 and debugPinnerV1, which are
// intended to be called by debuggers.
+ MOVV $runtime·debugPinnerV1<ABIInternal>(SB), R0
MOVV $runtime·debugCallV2<ABIInternal>(SB), R0
MOVV R0, 1(R0)
@@ -1135,76 +1136,29 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16
BREAK
RET
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R20, R4
- MOVV R21, R5
- JMP runtime·goPanicIndex<ABIInternal>(SB)
-TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R20, R4
- MOVV R21, R5
- JMP runtime·goPanicIndexU<ABIInternal>(SB)
-TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R21, R4
- MOVV R23, R5
- JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
-TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R21, R4
- MOVV R23, R5
- JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
-TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R21, R4
- MOVV R23, R5
- JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
-TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R21, R4
- MOVV R23, R5
- JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
-TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R20, R4
- MOVV R21, R5
- JMP runtime·goPanicSliceB<ABIInternal>(SB)
-TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R20, R4
- MOVV R21, R5
- JMP runtime·goPanicSliceBU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R23, R4
- MOVV R24, R5
- JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
-TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R23, R4
- MOVV R24, R5
- JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R23, R4
- MOVV R24, R5
- JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
-TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R23, R4
- MOVV R24, R5
- JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
-TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R21, R4
- MOVV R23, R5
- JMP runtime·goPanicSlice3B<ABIInternal>(SB)
-TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R21, R4
- MOVV R23, R5
- JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
-TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R20, R4
- MOVV R21, R5
- JMP runtime·goPanicSlice3C<ABIInternal>(SB)
-TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R20, R4
- MOVV R21, R5
- JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
-TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
- MOVV R23, R4
- MOVV R24, R5
- JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
+TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ // Skip R0 aka ZERO, R1 aka LR, R2 aka thread pointer, R3 aka SP.
+ MOVV R4, 24(R3)
+ MOVV R5, 32(R3)
+ MOVV R6, 40(R3)
+ MOVV R7, 48(R3)
+ MOVV R8, 56(R3)
+ MOVV R9, 64(R3)
+ MOVV R10, 72(R3)
+ MOVV R11, 80(R3)
+ MOVV R12, 88(R3)
+ MOVV R13, 96(R3)
+ MOVV R14, 104(R3)
+ MOVV R15, 112(R3)
+ MOVV R16, 120(R3)
+ MOVV R17, 128(R3)
+ MOVV R18, 136(R3)
+ MOVV R19, 144(R3)
+
+ MOVV R1, R4 // PC immediately after call to panicBounds
+ ADDV $24, R3, R5 // pointer to save area
+ CALL runtime·panicBounds64<ABIInternal>(SB)
+ RET
diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s
index 9509d5ba77..d4523b4a74 100644
--- a/src/runtime/asm_mips64x.s
+++ b/src/runtime/asm_mips64x.s
@@ -791,76 +791,30 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0
MOVV $64, R25
JMP gcWriteBarrier<>(SB)
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
- MOVV R1, x+0(FP)
- MOVV R2, y+8(FP)
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
- MOVV R1, x+0(FP)
- MOVV R2, y+8(FP)
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
- MOVV R2, x+0(FP)
- MOVV R3, y+8(FP)
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
- MOVV R2, x+0(FP)
- MOVV R3, y+8(FP)
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
- MOVV R2, x+0(FP)
- MOVV R3, y+8(FP)
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
- MOVV R2, x+0(FP)
- MOVV R3, y+8(FP)
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
- MOVV R1, x+0(FP)
- MOVV R2, y+8(FP)
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
- MOVV R1, x+0(FP)
- MOVV R2, y+8(FP)
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
- MOVV R3, x+0(FP)
- MOVV R4, y+8(FP)
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
- MOVV R3, x+0(FP)
- MOVV R4, y+8(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
- MOVV R3, x+0(FP)
- MOVV R4, y+8(FP)
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
- MOVV R3, x+0(FP)
- MOVV R4, y+8(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
- MOVV R2, x+0(FP)
- MOVV R3, y+8(FP)
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
- MOVV R2, x+0(FP)
- MOVV R3, y+8(FP)
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
- MOVV R1, x+0(FP)
- MOVV R2, y+8(FP)
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
- MOVV R1, x+0(FP)
- MOVV R2, y+8(FP)
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
- MOVV R3, x+0(FP)
- MOVV R4, y+8(FP)
- JMP runtime·goPanicSliceConvert(SB)
+TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ // Skip R0 aka ZERO.
+ MOVV R1, 24(R29)
+ MOVV R2, 32(R29)
+ MOVV R3, 40(R29)
+ MOVV R4, 48(R29)
+ MOVV R5, 56(R29)
+ MOVV R6, 64(R29)
+ MOVV R7, 72(R29)
+ MOVV R8, 80(R29)
+ MOVV R9, 88(R29)
+ MOVV R10, 96(R29)
+ MOVV R11, 104(R29)
+ MOVV R12, 112(R29)
+ MOVV R13, 120(R29)
+ MOVV R14, 128(R29)
+ MOVV R15, 136(R29)
+ MOVV R16, 144(R29)
+
+ MOVV R31, 8(R29) // PC immediately after call to panicBounds
+ ADDV $24, R29, R1 // pointer to save area
+ MOVV R1, 16(R29)
+ CALL runtime·panicBounds64<ABIInternal>(SB)
+ RET
diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s
index 7245e8ac49..ec352f5828 100644
--- a/src/runtime/asm_mipsx.s
+++ b/src/runtime/asm_mipsx.s
@@ -787,158 +787,58 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0
MOVW $32, R25
JMP gcWriteBarrier<>(SB)
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8
- MOVW R3, x+0(FP)
- MOVW R4, y+4(FP)
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8
- MOVW R3, x+0(FP)
- MOVW R4, y+4(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8
- MOVW R3, x+0(FP)
- MOVW R4, y+4(FP)
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8
- MOVW R3, x+0(FP)
- MOVW R4, y+4(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8
- MOVW R2, x+0(FP)
- MOVW R3, y+4(FP)
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8
- MOVW R1, x+0(FP)
- MOVW R2, y+4(FP)
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-8
- MOVW R3, x+0(FP)
- MOVW R4, y+4(FP)
- JMP runtime·goPanicSliceConvert(SB)
+TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$72-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ // Skip R0 aka ZERO.
+ MOVW R1, 12(R29)
+ MOVW R2, 16(R29)
+ MOVW R3, 20(R29)
+ MOVW R4, 24(R29)
+ MOVW R5, 28(R29)
+ MOVW R6, 32(R29)
+ MOVW R7, 36(R29)
+ MOVW R8, 40(R29)
+ MOVW R9, 44(R29)
+ MOVW R10, 48(R29)
+ MOVW R11, 52(R29)
+ MOVW R12, 56(R29)
+ MOVW R13, 60(R29)
+ MOVW R14, 64(R29)
+ MOVW R15, 68(R29)
+ MOVW R16, 72(R29)
+
+ MOVW R31, 4(R29) // PC immediately after call to panicBounds
+ ADD $12, R29, R1 // pointer to save area
+ MOVW R1, 8(R29)
+ CALL runtime·panicBounds32<ABIInternal>(SB)
+ RET
-// Extended versions for 64-bit indexes.
-TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendIndex(SB)
-TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendIndexU(SB)
-TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R2, lo+4(FP)
- MOVW R3, y+8(FP)
- JMP runtime·goPanicExtendSliceAlen(SB)
-TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R2, lo+4(FP)
- MOVW R3, y+8(FP)
- JMP runtime·goPanicExtendSliceAlenU(SB)
-TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R2, lo+4(FP)
- MOVW R3, y+8(FP)
- JMP runtime·goPanicExtendSliceAcap(SB)
-TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R2, lo+4(FP)
- MOVW R3, y+8(FP)
- JMP runtime·goPanicExtendSliceAcapU(SB)
-TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendSliceB(SB)
-TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendSliceBU(SB)
-TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R3, lo+4(FP)
- MOVW R4, y+8(FP)
- JMP runtime·goPanicExtendSlice3Alen(SB)
-TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R3, lo+4(FP)
- MOVW R4, y+8(FP)
- JMP runtime·goPanicExtendSlice3AlenU(SB)
-TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R3, lo+4(FP)
- MOVW R4, y+8(FP)
- JMP runtime·goPanicExtendSlice3Acap(SB)
-TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R3, lo+4(FP)
- MOVW R4, y+8(FP)
- JMP runtime·goPanicExtendSlice3AcapU(SB)
-TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R2, lo+4(FP)
- MOVW R3, y+8(FP)
- JMP runtime·goPanicExtendSlice3B(SB)
-TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R2, lo+4(FP)
- MOVW R3, y+8(FP)
- JMP runtime·goPanicExtendSlice3BU(SB)
-TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendSlice3C(SB)
-TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12
- MOVW R5, hi+0(FP)
- MOVW R1, lo+4(FP)
- MOVW R2, y+8(FP)
- JMP runtime·goPanicExtendSlice3CU(SB)
+TEXT runtime·panicExtend<ABIInternal>(SB),NOSPLIT,$72-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ // Skip R0 aka ZERO.
+ MOVW R1, 12(R29)
+ MOVW R2, 16(R29)
+ MOVW R3, 20(R29)
+ MOVW R4, 24(R29)
+ MOVW R5, 28(R29)
+ MOVW R6, 32(R29)
+ MOVW R7, 36(R29)
+ MOVW R8, 40(R29)
+ MOVW R9, 44(R29)
+ MOVW R10, 48(R29)
+ MOVW R11, 52(R29)
+ MOVW R12, 56(R29)
+ MOVW R13, 60(R29)
+ MOVW R14, 64(R29)
+ MOVW R15, 68(R29)
+ MOVW R16, 72(R29)
+
+ MOVW R31, 4(R29) // PC immediately after call to panicBounds
+ ADD $12, R29, R1 // pointer to save area
+ MOVW R1, 8(R29)
+ CALL runtime·panicBounds32X<ABIInternal>(SB)
+ RET
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index 4031cdde9e..6b16d03c9a 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -884,80 +884,32 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0
MOV $64, X24
JMP gcWriteBarrier<>(SB)
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers (ssa/gen/RISCV64Ops.go), but the space for those
-// arguments are allocated in the caller's stack frame.
-// These stubs write the args into that stack space and then tail call to the
-// corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicIndex<ABIInternal>(SB)
-TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicIndexU<ABIInternal>(SB)
-TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
-TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
-TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
-TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
-TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSliceB<ABIInternal>(SB)
-TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSliceBU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
-TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
-TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
-TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSlice3B<ABIInternal>(SB)
-TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T1, X10
- MOV T2, X11
- JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
-TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSlice3C<ABIInternal>(SB)
-TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T0, X10
- MOV T1, X11
- JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
-TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
- MOV T2, X10
- MOV T3, X11
- JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
+TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ // Skip X0 aka ZERO, X1 aka LR, X2 aka SP, X3 aka GP, X4 aka TP.
+ MOV X5, 24(X2)
+ MOV X6, 32(X2)
+ MOV X7, 40(X2)
+ MOV X8, 48(X2)
+ MOV X9, 56(X2)
+ MOV X10, 64(X2)
+ MOV X11, 72(X2)
+ MOV X12, 80(X2)
+ MOV X13, 88(X2)
+ MOV X14, 96(X2)
+ MOV X15, 104(X2)
+ MOV X16, 112(X2)
+ MOV X17, 120(X2)
+ MOV X18, 128(X2)
+ MOV X19, 136(X2)
+ MOV X20, 144(X2)
+
+ MOV X1, X10 // PC immediately after call to panicBounds
+ ADD $24, X2, X11 // pointer to save area
+ CALL runtime·panicBounds64<ABIInternal>(SB)
+ RET
DATA runtime·mainPC+0(SB)/8,$runtime·main<ABIInternal>(SB)
GLOBL runtime·mainPC(SB),RODATA,$8
diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s
index 7fc88009e8..4cc1c0eb10 100644
--- a/src/runtime/asm_s390x.s
+++ b/src/runtime/asm_s390x.s
@@ -892,76 +892,18 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0
MOVD $64, R9
JMP gcWriteBarrier<>(SB)
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicIndex(SB)
-TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicIndexU(SB)
-TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlen(SB)
-TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAlenU(SB)
-TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcap(SB)
-TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSliceAcapU(SB)
-TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceB(SB)
-TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSliceBU(SB)
-TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Alen(SB)
-TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
-TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3Acap(SB)
-TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
-TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3B(SB)
-TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16
- MOVD R1, x+0(FP)
- MOVD R2, y+8(FP)
- JMP runtime·goPanicSlice3BU(SB)
-TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3C(SB)
-TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16
- MOVD R0, x+0(FP)
- MOVD R1, y+8(FP)
- JMP runtime·goPanicSlice3CU(SB)
-TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16
- MOVD R2, x+0(FP)
- MOVD R3, y+8(FP)
- JMP runtime·goPanicSliceConvert(SB)
+TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0
+ NO_LOCAL_POINTERS
+ // Save all 16 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ STMG R0, R12, 24(R15)
+ // Note that R10 @ 104 is not needed, it is an assembler temp
+ // skip R13 aka G @ 128
+ // skip R14 aka LR @ 136
+ // skip R15 aka SP @ 144
+
+ MOVD R14, 8(R15) // PC immediately after call to panicBounds
+ ADD $24, R15, R0 // pointer to save area
+ MOVD R0, 16(R15)
+ CALL runtime·panicBounds64<ABIInternal>(SB)
+ RET
diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s
index 751bf0aec7..85aa52e0f7 100644
--- a/src/runtime/asm_wasm.s
+++ b/src/runtime/asm_wasm.s
@@ -532,7 +532,7 @@ TEXT wasm_pc_f_loop(SB),NOSPLIT,$0
Get SP
I32Const $8
I32Sub
- I32Load16U $2 // PC_F
+ I32Load $2 // PC_F
CallIndirect $0
Drop
@@ -568,7 +568,7 @@ outer:
Get SP
I32Const $8
I32Sub
- I32Load16U $2 // PC_F
+ I32Load $2 // PC_F
Tee R2
Get R0
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index b046ab960f..18e1dc8baf 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -191,8 +191,8 @@ func cgocall(fn, arg unsafe.Pointer) int32 {
osPreemptExtExit(mp)
- // Save current syscall parameters, so m.winsyscall can be
- // used again if callback decide to make syscall.
+ // After exitsyscall we can be rescheduled on a different M,
+ // so we need to restore the original M's winsyscall.
winsyscall := mp.winsyscall
exitsyscall()
@@ -543,18 +543,18 @@ func cgoCheckPointer(ptr any, arg any) {
t := ep._type
top := true
- if arg != nil && (t.Kind_&abi.KindMask == abi.Pointer || t.Kind_&abi.KindMask == abi.UnsafePointer) {
+ if arg != nil && (t.Kind() == abi.Pointer || t.Kind() == abi.UnsafePointer) {
p := ep.data
- if t.Kind_&abi.KindDirectIface == 0 {
+ if !t.IsDirectIface() {
p = *(*unsafe.Pointer)(p)
}
if p == nil || !cgoIsGoPointer(p) {
return
}
aep := efaceOf(&arg)
- switch aep._type.Kind_ & abi.KindMask {
+ switch aep._type.Kind() {
case abi.Bool:
- if t.Kind_&abi.KindMask == abi.UnsafePointer {
+ if t.Kind() == abi.UnsafePointer {
// We don't know the type of the element.
break
}
@@ -578,7 +578,7 @@ func cgoCheckPointer(ptr any, arg any) {
// Check the array rather than the pointer.
pt := (*abi.PtrType)(unsafe.Pointer(aep._type))
t = pt.Elem
- if t.Kind_&abi.KindMask != abi.Array {
+ if t.Kind() != abi.Array {
throw("can't happen")
}
ep = aep
@@ -588,7 +588,7 @@ func cgoCheckPointer(ptr any, arg any) {
}
}
- cgoCheckArg(t, ep.data, t.Kind_&abi.KindDirectIface == 0, top, cgoCheckPointerFail)
+ cgoCheckArg(t, ep.data, !t.IsDirectIface(), top, cgoCheckPointerFail)
}
const cgoCheckPointerFail = "cgo argument has Go pointer to unpinned Go pointer"
@@ -605,7 +605,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
return
}
- switch t.Kind_ & abi.KindMask {
+ switch t.Kind() {
default:
throw("can't happen")
case abi.Array:
@@ -614,7 +614,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
if at.Len != 1 {
throw("can't happen")
}
- cgoCheckArg(at.Elem, p, at.Elem.Kind_&abi.KindDirectIface == 0, top, msg)
+ cgoCheckArg(at.Elem, p, !at.Elem.IsDirectIface(), top, msg)
return
}
for i := uintptr(0); i < at.Len; i++ {
@@ -652,7 +652,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
if !top && !isPinned(p) {
panic(errorString(msg))
}
- cgoCheckArg(it, p, it.Kind_&abi.KindDirectIface == 0, false, msg)
+ cgoCheckArg(it, p, !it.IsDirectIface(), false, msg)
case abi.Slice:
st := (*slicetype)(unsafe.Pointer(t))
s := (*slice)(p)
@@ -684,7 +684,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
if len(st.Fields) != 1 {
throw("can't happen")
}
- cgoCheckArg(st.Fields[0].Typ, p, st.Fields[0].Typ.Kind_&abi.KindDirectIface == 0, top, msg)
+ cgoCheckArg(st.Fields[0].Typ, p, !st.Fields[0].Typ.IsDirectIface(), top, msg)
return
}
for _, f := range st.Fields {
@@ -792,5 +792,5 @@ func cgoCheckResult(val any) {
ep := efaceOf(&val)
t := ep._type
- cgoCheckArg(t, ep.data, t.Kind_&abi.KindDirectIface == 0, false, cgoResultFail)
+ cgoCheckArg(t, ep.data, !t.IsDirectIface(), false, cgoResultFail)
}
diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go
index 8696672065..2db86e0562 100644
--- a/src/runtime/crash_test.go
+++ b/src/runtime/crash_test.go
@@ -186,21 +186,6 @@ func buildTestProg(t *testing.T, binary string, flags ...string) (string, error)
t.Logf("running %v", cmd)
cmd.Dir = "testdata/" + binary
cmd = testenv.CleanCmdEnv(cmd)
-
- // Add the rangefunc GOEXPERIMENT unconditionally since some tests depend on it.
- // TODO(61405): Remove this once it's enabled by default.
- edited := false
- for i := range cmd.Env {
- e := cmd.Env[i]
- if _, vars, ok := strings.Cut(e, "GOEXPERIMENT="); ok {
- cmd.Env[i] = "GOEXPERIMENT=" + vars + ",rangefunc"
- edited = true
- }
- }
- if !edited {
- cmd.Env = append(cmd.Env, "GOEXPERIMENT=rangefunc")
- }
-
out, err := cmd.CombinedOutput()
if err != nil {
target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out)
diff --git a/src/runtime/crash_unix_test.go b/src/runtime/crash_unix_test.go
index 101107d2f7..f9c12d5404 100644
--- a/src/runtime/crash_unix_test.go
+++ b/src/runtime/crash_unix_test.go
@@ -65,7 +65,7 @@ func TestCrashDumpsAllThreads(t *testing.T) {
t.Skipf("skipping; not supported on %v", runtime.GOOS)
}
- if runtime.GOOS == "openbsd" && (runtime.GOARCH == "arm" || runtime.GOARCH == "mips64" || runtime.GOARCH == "ppc64") {
+ if runtime.GOOS == "openbsd" && (runtime.GOARCH == "arm" || runtime.GOARCH == "ppc64") {
// This may be ncpu < 2 related...
t.Skipf("skipping; test fails on %s/%s - see issue #42464", runtime.GOOS, runtime.GOARCH)
}
diff --git a/src/runtime/debug.go b/src/runtime/debug.go
index c7592d3329..dacadd2721 100644
--- a/src/runtime/debug.go
+++ b/src/runtime/debug.go
@@ -13,13 +13,23 @@ import (
// simultaneously and returns the previous setting. If n < 1, it does not change
// the current setting.
//
+// # Default
+//
// If the GOMAXPROCS environment variable is set to a positive whole number,
// GOMAXPROCS defaults to that value.
//
-// Otherwise, the Go runtime selects an appropriate default value based on the
-// number of logical CPUs on the machine, the process’s CPU affinity mask, and,
-// on Linux, the process’s average CPU throughput limit based on cgroup CPU
-// quota, if any.
+// Otherwise, the Go runtime selects an appropriate default value from a combination of
+// - the number of logical CPUs on the machine,
+// - the process’s CPU affinity mask,
+// - and, on Linux, the process’s average CPU throughput limit based on cgroup CPU
+// quota, if any.
+//
+// If GODEBUG=containermaxprocs=0 is set and GOMAXPROCS is not set by the
+// environment variable, then GOMAXPROCS instead defaults to the value of
+// [runtime.NumCPU]. Note that GODEBUG=containermaxprocs=0 is [default] for
+// language version 1.24 and below.
+//
+// # Updates
//
// The Go runtime periodically updates the default value based on changes to
// the total logical CPU count, the CPU affinity mask, or cgroup quota. Setting
@@ -27,11 +37,36 @@ import (
// GOMAXPROCS disables automatic updates. The default value and automatic
// updates can be restored by calling [SetDefaultGOMAXPROCS].
//
-// If GODEBUG=containermaxprocs=0 is set, GOMAXPROCS defaults to the value of
-// [runtime.NumCPU]. If GODEBUG=updatemaxprocs=0 is set, the Go runtime does
-// not perform automatic GOMAXPROCS updating.
+// If GODEBUG=updatemaxprocs=0 is set, the Go runtime does not perform
+// automatic GOMAXPROCS updating. Note that GODEBUG=updatemaxprocs=0 is
+// [default] for language version 1.24 and below.
+//
+// # Compatibility
+//
+// Note that the default GOMAXPROCS behavior may change as the scheduler
+// improves, especially the implementation detail below.
+//
+// # Implementation details
+//
+// When computing default GOMAXPROCS via cgroups, the Go runtime computes the
+// "average CPU throughput limit" as the cgroup CPU quota / period. In cgroup
+// v2, these values come from the cpu.max file. In cgroup v1, they come from
+// cpu.cfs_quota_us and cpu.cfs_period_us, respectively. In container runtimes
+// that allow configuring CPU limits, this value usually corresponds to the
+// "CPU limit" option, not "CPU request".
+//
+// The Go runtime typically selects the default GOMAXPROCS as the minimum of
+// the logical CPU count, the CPU affinity mask count, or the cgroup CPU
+// throughput limit. However, it will never set GOMAXPROCS less than 2 unless
+// the logical CPU count or CPU affinity mask count are below 2.
+//
+// If the cgroup CPU throughput limit is not a whole number, the Go runtime
+// rounds up to the next whole number.
+//
+// GOMAXPROCS updates are performed up to once per second, or less if the
+// application is idle.
//
-// The default GOMAXPROCS behavior may change as the scheduler improves.
+// [default]: https://go.dev/doc/godebug#default
func GOMAXPROCS(n int) int {
if GOARCH == "wasm" && n > 1 {
n = 1 // WebAssembly has no threads yet, so only one CPU is possible.
diff --git a/src/runtime/debuglog.go b/src/runtime/debuglog.go
index 50fba3568d..e993e396c1 100644
--- a/src/runtime/debuglog.go
+++ b/src/runtime/debuglog.go
@@ -327,7 +327,7 @@ func (l *dloggerImpl) p(x any) *dloggerImpl {
l.w.uvarint(0)
} else {
v := efaceOf(&x)
- switch v._type.Kind_ & abi.KindMask {
+ switch v._type.Kind() {
case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer:
l.w.uvarint(uint64(uintptr(v.data)))
default:
diff --git a/src/runtime/defs_openbsd.go b/src/runtime/defs_openbsd.go
index d93c087a81..9564a3354c 100644
--- a/src/runtime/defs_openbsd.go
+++ b/src/runtime/defs_openbsd.go
@@ -11,7 +11,6 @@ GOARCH=amd64 go tool cgo -godefs defs_openbsd.go
GOARCH=386 go tool cgo -godefs defs_openbsd.go
GOARCH=arm go tool cgo -godefs defs_openbsd.go
GOARCH=arm64 go tool cgo -godefs defs_openbsd.go
-GOARCH=mips64 go tool cgo -godefs defs_openbsd.go
*/
package runtime
diff --git a/src/runtime/defs_openbsd_mips64.go b/src/runtime/defs_openbsd_mips64.go
deleted file mode 100644
index 7958044d04..0000000000
--- a/src/runtime/defs_openbsd_mips64.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Generated from:
-//
-// GOARCH=mips64 go tool cgo -godefs defs_openbsd.go
-//
-// Then converted to the form used by the runtime.
-
-package runtime
-
-import "unsafe"
-
-const (
- _EINTR = 0x4
- _EFAULT = 0xe
- _EAGAIN = 0x23
- _ETIMEDOUT = 0x3c
-
- _O_WRONLY = 0x1
- _O_NONBLOCK = 0x4
- _O_CREAT = 0x200
- _O_TRUNC = 0x400
- _O_CLOEXEC = 0x10000
-
- _PROT_NONE = 0x0
- _PROT_READ = 0x1
- _PROT_WRITE = 0x2
- _PROT_EXEC = 0x4
-
- _MAP_ANON = 0x1000
- _MAP_PRIVATE = 0x2
- _MAP_FIXED = 0x10
- _MAP_STACK = 0x4000
-
- _MADV_DONTNEED = 0x4
- _MADV_FREE = 0x6
-
- _SA_SIGINFO = 0x40
- _SA_RESTART = 0x2
- _SA_ONSTACK = 0x1
-
- _SIGHUP = 0x1
- _SIGINT = 0x2
- _SIGQUIT = 0x3
- _SIGILL = 0x4
- _SIGTRAP = 0x5
- _SIGABRT = 0x6
- _SIGEMT = 0x7
- _SIGFPE = 0x8
- _SIGKILL = 0x9
- _SIGBUS = 0xa
- _SIGSEGV = 0xb
- _SIGSYS = 0xc
- _SIGPIPE = 0xd
- _SIGALRM = 0xe
- _SIGTERM = 0xf
- _SIGURG = 0x10
- _SIGSTOP = 0x11
- _SIGTSTP = 0x12
- _SIGCONT = 0x13
- _SIGCHLD = 0x14
- _SIGTTIN = 0x15
- _SIGTTOU = 0x16
- _SIGIO = 0x17
- _SIGXCPU = 0x18
- _SIGXFSZ = 0x19
- _SIGVTALRM = 0x1a
- _SIGPROF = 0x1b
- _SIGWINCH = 0x1c
- _SIGINFO = 0x1d
- _SIGUSR1 = 0x1e
- _SIGUSR2 = 0x1f
-
- _FPE_INTDIV = 0x1
- _FPE_INTOVF = 0x2
- _FPE_FLTDIV = 0x3
- _FPE_FLTOVF = 0x4
- _FPE_FLTUND = 0x5
- _FPE_FLTRES = 0x6
- _FPE_FLTINV = 0x7
- _FPE_FLTSUB = 0x8
-
- _BUS_ADRALN = 0x1
- _BUS_ADRERR = 0x2
- _BUS_OBJERR = 0x3
-
- _SEGV_MAPERR = 0x1
- _SEGV_ACCERR = 0x2
-
- _ITIMER_REAL = 0x0
- _ITIMER_VIRTUAL = 0x1
- _ITIMER_PROF = 0x2
-
- _EV_ADD = 0x1
- _EV_DELETE = 0x2
- _EV_CLEAR = 0x20
- _EV_ERROR = 0x4000
- _EV_EOF = 0x8000
- _EVFILT_READ = -0x1
- _EVFILT_WRITE = -0x2
-)
-
-type tforkt struct {
- tf_tcb unsafe.Pointer
- tf_tid *int32
- tf_stack uintptr
-}
-
-type sigcontext struct {
- sc_cookie uint64
- sc_mask uint64
- sc_pc uint64
- sc_regs [32]uint64
- mullo uint64
- mulhi uint64
- sc_fpregs [33]uint64
- sc_fpused uint64
- sc_fpc_eir uint64
- _xxx [8]int64
-}
-
-type siginfo struct {
- si_signo int32
- si_code int32
- si_errno int32
- pad_cgo_0 [4]byte
- _data [120]byte
-}
-
-type stackt struct {
- ss_sp uintptr
- ss_size uintptr
- ss_flags int32
- pad_cgo_0 [4]byte
-}
-
-type timespec struct {
- tv_sec int64
- tv_nsec int64
-}
-
-//go:nosplit
-func (ts *timespec) setNsec(ns int64) {
- ts.tv_sec = ns / 1e9
- ts.tv_nsec = ns % 1e9
-}
-
-type timeval struct {
- tv_sec int64
- tv_usec int64
-}
-
-func (tv *timeval) set_usec(x int32) {
- tv.tv_usec = int64(x)
-}
-
-type itimerval struct {
- it_interval timeval
- it_value timeval
-}
-
-type keventt struct {
- ident uint64
- filter int16
- flags uint16
- fflags uint32
- data int64
- udata *byte
-}
diff --git a/src/runtime/ehooks_test.go b/src/runtime/ehooks_test.go
index c7f51740fb..380d709876 100644
--- a/src/runtime/ehooks_test.go
+++ b/src/runtime/ehooks_test.go
@@ -63,12 +63,12 @@ func TestExitHooks(t *testing.T) {
outs = strings.TrimSpace(outs)
if s.expected != "" && s.expected != outs {
t.Fatalf("failed %s: wanted %q\noutput:\n%s",
- s.mode, s.expected, outs)
+ s.mode, s.expected, outs)
}
for _, need := range s.musthave {
if !strings.Contains(outs, need) {
t.Fatalf("failed mode %s: output does not contain %q\noutput:\n%s",
- s.mode, need, outs)
+ s.mode, need, outs)
}
}
if s.expected == "" && s.musthave == nil && outs != "" {
diff --git a/src/runtime/error.go b/src/runtime/error.go
index 8e50c0fea4..f95b14d780 100644
--- a/src/runtime/error.go
+++ b/src/runtime/error.go
@@ -132,52 +132,34 @@ type boundsError struct {
// Instead, we keep track of whether x should be interpreted as signed or unsigned.
// y is known to be nonnegative and to fit in an int.
signed bool
- code boundsErrorCode
+ code abi.BoundsErrorCode
}
-type boundsErrorCode uint8
-
-const (
- boundsIndex boundsErrorCode = iota // s[x], 0 <= x < len(s) failed
-
- boundsSliceAlen // s[?:x], 0 <= x <= len(s) failed
- boundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed
- boundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen)
-
- boundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed
- boundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed
- boundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen)
- boundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen)
-
- boundsConvert // (*[x]T)(s), 0 <= x <= len(s) failed
- // Note: in the above, len(s) and cap(s) are stored in y
-)
-
// boundsErrorFmts provide error text for various out-of-bounds panics.
// Note: if you change these strings, you should adjust the size of the buffer
// in boundsError.Error below as well.
var boundsErrorFmts = [...]string{
- boundsIndex: "index out of range [%x] with length %y",
- boundsSliceAlen: "slice bounds out of range [:%x] with length %y",
- boundsSliceAcap: "slice bounds out of range [:%x] with capacity %y",
- boundsSliceB: "slice bounds out of range [%x:%y]",
- boundsSlice3Alen: "slice bounds out of range [::%x] with length %y",
- boundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y",
- boundsSlice3B: "slice bounds out of range [:%x:%y]",
- boundsSlice3C: "slice bounds out of range [%x:%y:]",
- boundsConvert: "cannot convert slice with length %y to array or pointer to array with length %x",
+ abi.BoundsIndex: "index out of range [%x] with length %y",
+ abi.BoundsSliceAlen: "slice bounds out of range [:%x] with length %y",
+ abi.BoundsSliceAcap: "slice bounds out of range [:%x] with capacity %y",
+ abi.BoundsSliceB: "slice bounds out of range [%x:%y]",
+ abi.BoundsSlice3Alen: "slice bounds out of range [::%x] with length %y",
+ abi.BoundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y",
+ abi.BoundsSlice3B: "slice bounds out of range [:%x:%y]",
+ abi.BoundsSlice3C: "slice bounds out of range [%x:%y:]",
+ abi.BoundsConvert: "cannot convert slice with length %y to array or pointer to array with length %x",
}
// boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y.
var boundsNegErrorFmts = [...]string{
- boundsIndex: "index out of range [%x]",
- boundsSliceAlen: "slice bounds out of range [:%x]",
- boundsSliceAcap: "slice bounds out of range [:%x]",
- boundsSliceB: "slice bounds out of range [%x:]",
- boundsSlice3Alen: "slice bounds out of range [::%x]",
- boundsSlice3Acap: "slice bounds out of range [::%x]",
- boundsSlice3B: "slice bounds out of range [:%x:]",
- boundsSlice3C: "slice bounds out of range [%x::]",
+ abi.BoundsIndex: "index out of range [%x]",
+ abi.BoundsSliceAlen: "slice bounds out of range [:%x]",
+ abi.BoundsSliceAcap: "slice bounds out of range [:%x]",
+ abi.BoundsSliceB: "slice bounds out of range [%x:]",
+ abi.BoundsSlice3Alen: "slice bounds out of range [::%x]",
+ abi.BoundsSlice3Acap: "slice bounds out of range [::%x]",
+ abi.BoundsSlice3B: "slice bounds out of range [:%x:]",
+ abi.BoundsSlice3C: "slice bounds out of range [%x::]",
}
func (e boundsError) RuntimeError() {}
@@ -278,7 +260,7 @@ func printanycustomtype(i any) {
eface := efaceOf(&i)
typestring := toRType(eface._type).string()
- switch eface._type.Kind_ {
+ switch eface._type.Kind() {
case abi.String:
print(typestring, `("`)
printindented(*(*string)(eface.data))
diff --git a/src/runtime/export_debug_test.go b/src/runtime/export_debug_test.go
index 96f6fd9eea..94dc974804 100644
--- a/src/runtime/export_debug_test.go
+++ b/src/runtime/export_debug_test.go
@@ -33,13 +33,13 @@ func InjectDebugCall(gp *g, fn any, regArgs *abi.RegArgs, stackArgs any, tkill f
}
f := efaceOf(&fn)
- if f._type == nil || f._type.Kind_&abi.KindMask != abi.Func {
+ if f._type == nil || f._type.Kind() != abi.Func {
return nil, plainError("fn must be a function")
}
fv := (*funcval)(f.data)
a := efaceOf(&stackArgs)
- if a._type != nil && a._type.Kind_&abi.KindMask != abi.Pointer {
+ if a._type != nil && a._type.Kind() != abi.Pointer {
return nil, plainError("args must be a pointer or nil")
}
argp := a.data
diff --git a/src/runtime/export_map_noswiss_test.go b/src/runtime/export_map_noswiss_test.go
deleted file mode 100644
index 4638afa6b8..0000000000
--- a/src/runtime/export_map_noswiss_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-const RuntimeHmapSize = unsafe.Sizeof(hmap{})
-
-func OverLoadFactor(count int, B uint8) bool {
- return overLoadFactor(count, B)
-}
-
-func MapBucketsCount(m map[int]int) int {
- h := *(**hmap)(unsafe.Pointer(&m))
- return 1 << h.B
-}
-
-func MapBucketsPointerIsNil(m map[int]int) bool {
- h := *(**hmap)(unsafe.Pointer(&m))
- return h.buckets == nil
-}
-
-func MapTombstoneCheck(m map[int]int) {
- // Make sure emptyOne and emptyRest are distributed correctly.
- // We should have a series of filled and emptyOne cells, followed by
- // a series of emptyRest cells.
- h := *(**hmap)(unsafe.Pointer(&m))
- i := any(m)
- t := *(**maptype)(unsafe.Pointer(&i))
-
- for x := 0; x < 1<<h.B; x++ {
- b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
- n := 0
- for b := b0; b != nil; b = b.overflow(t) {
- for i := 0; i < abi.OldMapBucketCount; i++ {
- if b.tophash[i] != emptyRest {
- n++
- }
- }
- }
- k := 0
- for b := b0; b != nil; b = b.overflow(t) {
- for i := 0; i < abi.OldMapBucketCount; i++ {
- if k < n && b.tophash[i] == emptyRest {
- panic("early emptyRest")
- }
- if k >= n && b.tophash[i] != emptyRest {
- panic("late non-emptyRest")
- }
- if k == n-1 && b.tophash[i] == emptyOne {
- panic("last non-emptyRest entry is emptyOne")
- }
- k++
- }
- }
- }
-}
diff --git a/src/runtime/export_map_swiss_test.go b/src/runtime/export_map_swiss_test.go
deleted file mode 100644
index 55a7d6ff04..0000000000
--- a/src/runtime/export_map_swiss_test.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package runtime
-
-func MapTombstoneCheck(m map[int]int) {
- // TODO
-}
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 81542deb59..1f55717f0a 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -9,6 +9,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
+ "internal/goexperiment"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/gc"
@@ -58,9 +59,6 @@ const CrashStackImplemented = crashStackImplemented
const TracebackInnerFrames = tracebackInnerFrames
const TracebackOuterFrames = tracebackOuterFrames
-var MapKeys = keys
-var MapValues = values
-
var LockPartialOrder = lockPartialOrder
type TimeTimer = timeTimer
@@ -417,7 +415,8 @@ func ReadMemStatsSlow() (base, slow MemStats) {
slow.HeapReleased += uint64(pg) * pageSize
}
for _, p := range allp {
- pg := sys.OnesCount64(p.pcache.scav)
+ // Only count scav bits for pages in the cache
+ pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
slow.HeapReleased += uint64(pg) * pageSize
}
@@ -1122,12 +1121,16 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
// Lock so that we can safely access the bitmap.
lock(&mheap_.lock)
+
+ heapBase := mheap_.pages.inUse.ranges[0].base.addr()
+ secondArenaBase := arenaBase(arenaIndex(heapBase) + 1)
chunkLoop:
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
chunk := mheap_.pages.tryChunkOf(i)
if chunk == nil {
continue
}
+ cb := chunkBase(i)
for j := 0; j < pallocChunkPages/64; j++ {
// Run over each 64-bit bitmap section and ensure
// scavenged is being cleared properly on allocation.
@@ -1137,12 +1140,20 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
want := chunk.scavenged[j] &^ chunk.pallocBits[j]
got := chunk.scavenged[j]
if want != got {
+ // When goexperiment.RandomizedHeapBase64 is set we use a
+ // series of padding pages to generate randomized heap base
+ // address which have both the alloc and scav bits set. If
+ // we see this for a chunk between the address of the heap
+ // base, and the address of the second arena continue.
+ if goexperiment.RandomizedHeapBase64 && (cb >= heapBase && cb < secondArenaBase) {
+ continue
+ }
ok = false
if n >= len(mismatches) {
break chunkLoop
}
mismatches[n] = BitsMismatch{
- Base: chunkBase(i) + uintptr(j)*64*pageSize,
+ Base: cb + uintptr(j)*64*pageSize,
Got: got,
Want: want,
}
@@ -1761,7 +1772,7 @@ func NewUserArena() *UserArena {
func (a *UserArena) New(out *any) {
i := efaceOf(out)
typ := i._type
- if typ.Kind_&abi.KindMask != abi.Pointer {
+ if typ.Kind() != abi.Pointer {
panic("new result of non-ptr type")
}
typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
diff --git a/src/runtime/export_windows_test.go b/src/runtime/export_windows_test.go
index 13d30d4bc4..caaf2dae51 100644
--- a/src/runtime/export_windows_test.go
+++ b/src/runtime/export_windows_test.go
@@ -11,8 +11,6 @@ import (
"unsafe"
)
-const MaxArgs = maxArgs
-
var (
OsYield = osyield
TimeBeginPeriodRetValue = &timeBeginPeriodRetValue
@@ -20,7 +18,7 @@ var (
func NumberOfProcessors() int32 {
var info systeminfo
- stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
+ stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
return int32(info.dwnumberofprocessors)
}
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index 5476035b2e..d9474034c2 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -206,7 +206,7 @@ func dumptype(t *_type) {
dwritebyte('.')
dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name)))
}
- dumpbool(t.Kind_&abi.KindDirectIface == 0 || t.Pointers())
+ dumpbool(!t.IsDirectIface() || t.Pointers())
}
// dump an object.
@@ -460,7 +460,7 @@ func dumproots() {
continue
}
spf := (*specialfinalizer)(unsafe.Pointer(sp))
- p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
+ p := unsafe.Pointer(s.base() + spf.special.offset)
dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
}
}
@@ -659,7 +659,7 @@ func dumpmemprof() {
continue
}
spp := (*specialprofile)(unsafe.Pointer(sp))
- p := s.base() + uintptr(spp.special.offset)
+ p := s.base() + spp.special.offset
dumpint(tagAllocSample)
dumpint(uint64(p))
dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
diff --git a/src/runtime/linkname_swiss.go b/src/runtime/linkname_shim.go
index 1be724477e..4ba3d1fb78 100644
--- a/src/runtime/linkname_swiss.go
+++ b/src/runtime/linkname_shim.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.swissmap
-
package runtime
import (
@@ -16,8 +14,7 @@ import (
// Legacy //go:linkname compatibility shims
//
// The functions below are unused by the toolchain, and exist only for
-// compatibility with existing //go:linkname use in the ecosystem (and in
-// map_noswiss.go for normal use via GOEXPERIMENT=noswissmap).
+// compatibility with existing //go:linkname use in the ecosystem.
// linknameIter is the it argument to mapiterinit and mapiternext.
//
@@ -27,7 +24,7 @@ import (
// type hiter struct {
// key unsafe.Pointer
// elem unsafe.Pointer
-// t *maptype
+// t *maptype // old map abi.Type
// h *hmap
// buckets unsafe.Pointer
// bptr *bmap
@@ -64,7 +61,7 @@ type linknameIter struct {
// Fields from hiter.
key unsafe.Pointer
elem unsafe.Pointer
- typ *abi.SwissMapType
+ typ *abi.MapType
// The real iterator.
it *maps.Iter
@@ -88,7 +85,7 @@ type linknameIter struct {
// See go.dev/issue/67401.
//
//go:linkname mapiterinit
-func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) {
+func mapiterinit(t *abi.MapType, m *maps.Map, it *linknameIter) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapiterinit))
@@ -120,7 +117,7 @@ func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) {
// See go.dev/issue/67401.
//
//go:linkname reflect_mapiterinit reflect.mapiterinit
-func reflect_mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) {
+func reflect_mapiterinit(t *abi.MapType, m *maps.Map, it *linknameIter) {
mapiterinit(t, m, it)
}
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index bc7dab9d20..d21b2c49b5 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -102,6 +102,7 @@ package runtime
import (
"internal/goarch"
+ "internal/goexperiment"
"internal/goos"
"internal/runtime/atomic"
"internal/runtime/gc"
@@ -345,6 +346,14 @@ const (
// metadata mappings back to the OS. That would be quite complex to do in general
// as the heap is likely fragmented after a reduction in heap size.
minHeapForMetadataHugePages = 1 << 30
+
+ // randomizeHeapBase indicates if the heap base address should be randomized.
+ // See comment in mallocinit for how the randomization is performed.
+ randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform
+
+ // randHeapBasePrefixMask is used to extract the top byte of the randomized
+ // heap base address.
+ randHeapBasePrefixMask = ^uintptr(0xff << (heapAddrBits - 8))
)
// physPageSize is the size in bytes of the OS's physical pages.
@@ -372,6 +381,24 @@ var (
physHugePageShift uint
)
+var (
+ // heapRandSeed is a random value that is populated in mallocinit if
+ // randomizeHeapBase is set. It is used in mallocinit, and mheap.grow, to
+ // randomize the base heap address.
+ heapRandSeed uintptr
+ heapRandSeedBitsRemaining int
+)
+
+func nextHeapRandBits(bits int) uintptr {
+ if bits > heapRandSeedBitsRemaining {
+ throw("not enough heapRandSeed bits remaining")
+ }
+ r := heapRandSeed >> (64 - bits)
+ heapRandSeed <<= bits
+ heapRandSeedBitsRemaining -= bits
+ return r
+}
+
func mallocinit() {
if gc.SizeClassToSize[tinySizeClass] != maxTinySize {
throw("bad TinySizeClass")
@@ -517,6 +544,42 @@ func mallocinit() {
//
// In race mode we have no choice but to just use the same hints because
// the race detector requires that the heap be mapped contiguously.
+ //
+ // If randomizeHeapBase is set, we attempt to randomize the base address
+ // as much as possible. We do this by generating a random uint64 via
+ // bootstrapRand and using it's bits to randomize portions of the base
+ // address as follows:
+ // * We first generate a random heapArenaBytes aligned address that we use for
+ // generating the hints.
+ // * On the first call to mheap.grow, we then generate a random PallocChunkBytes
+ // aligned offset into the mmap'd heap region, which we use as the base for
+ // the heap region.
+ // * We then select a page offset in that PallocChunkBytes region to start the
+ // heap at, and mark all the pages up to that offset as allocated.
+ //
+ // Our final randomized "heap base address" becomes the first byte of
+ // the first available page returned by the page allocator. This results
+ // in an address with at least heapAddrBits-gc.PageShift-2-(1*goarch.IsAmd64)
+ // bits of entropy.
+
+ var randHeapBase uintptr
+ var randHeapBasePrefix byte
+ // heapAddrBits is 48 on most platforms, but we only use 47 of those
+ // bits in order to provide a good amount of room for the heap to grow
+ // contiguously. On amd64, there are 48 bits, but the top bit is sign
+ // extended, so we throw away another bit, just to be safe.
+ randHeapAddrBits := heapAddrBits - 1 - (goarch.IsAmd64 * 1)
+ if randomizeHeapBase {
+ // Generate a random value, and take the bottom heapAddrBits-logHeapArenaBytes
+ // bits, using them as the top bits for randHeapBase.
+ heapRandSeed, heapRandSeedBitsRemaining = uintptr(bootstrapRand()), 64
+
+ topBits := (randHeapAddrBits - logHeapArenaBytes)
+ randHeapBase = nextHeapRandBits(topBits) << (randHeapAddrBits - topBits)
+ randHeapBase = alignUp(randHeapBase, heapArenaBytes)
+ randHeapBasePrefix = byte(randHeapBase >> (randHeapAddrBits - 8))
+ }
+
for i := 0x7f; i >= 0; i-- {
var p uintptr
switch {
@@ -528,6 +591,9 @@ func mallocinit() {
if p >= uintptrMask&0x00e000000000 {
continue
}
+ case randomizeHeapBase:
+ prefix := uintptr(randHeapBasePrefix+byte(i)) << (randHeapAddrBits - 8)
+ p = prefix | (randHeapBase & randHeapBasePrefixMask)
case GOARCH == "arm64" && GOOS == "ios":
p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
case GOARCH == "arm64":
diff --git a/src/runtime/map_swiss.go b/src/runtime/map.go
index c2cf08fcaa..4a0713cfc4 100644
--- a/src/runtime/map_swiss.go
+++ b/src/runtime/map.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.swissmap
-
package runtime
import (
@@ -19,12 +17,10 @@ const (
loadFactorDen = 8
)
-type maptype = abi.SwissMapType
-
//go:linkname maps_errNilAssign internal/runtime/maps.errNilAssign
var maps_errNilAssign error = plainError("assignment to entry in nil map")
-func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map {
+func makemap64(t *abi.MapType, hint int64, m *maps.Map) *maps.Map {
if int64(int(hint)) != hint {
hint = 0
}
@@ -32,7 +28,7 @@ func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map {
}
// makemap_small implements Go map creation for make(map[k]v) and
-// make(map[k]v, hint) when hint is known to be at most abi.SwissMapGroupSlots
+// make(map[k]v, hint) when hint is known to be at most abi.MapGroupSlots
// at compile time and the map needs to be allocated on the heap.
//
// makemap_small should be an internal detail,
@@ -63,7 +59,7 @@ func makemap_small() *maps.Map {
// See go.dev/issue/67401.
//
//go:linkname makemap
-func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map {
+func makemap(t *abi.MapType, hint int, m *maps.Map) *maps.Map {
if hint < 0 {
hint = 0
}
@@ -81,7 +77,7 @@ func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map {
// we want to avoid one layer of call.
//
//go:linkname mapaccess1
-func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
+func mapaccess1(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
// mapaccess2 should be an internal detail,
// but widely used packages access it using linkname.
@@ -92,9 +88,9 @@ func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Poi
// See go.dev/issue/67401.
//
//go:linkname mapaccess2
-func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool)
+func mapaccess2(t *abi.MapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool)
-func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer {
+func mapaccess1_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer {
e := mapaccess1(t, m, key)
if e == unsafe.Pointer(&zeroVal[0]) {
return zero
@@ -102,7 +98,7 @@ func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer)
return e
}
-func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
+func mapaccess2_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
e := mapaccess1(t, m, key)
if e == unsafe.Pointer(&zeroVal[0]) {
return zero, false
@@ -125,7 +121,7 @@ func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer)
// See go.dev/issue/67401.
//
//go:linkname mapassign
-func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
+func mapassign(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
// mapdelete should be an internal detail,
// but widely used packages access it using linkname.
@@ -136,7 +132,7 @@ func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Poin
// See go.dev/issue/67401.
//
//go:linkname mapdelete
-func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
+func mapdelete(t *abi.MapType, m *maps.Map, key unsafe.Pointer) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapdelete)
@@ -157,7 +153,7 @@ func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
// performs the first step of iteration. The Iter struct pointed to by 'it' is
// allocated on the stack by the compilers order pass or on the heap by
// reflect. Both need to have zeroed it since the struct contains pointers.
-func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) {
+func mapIterStart(t *abi.MapType, m *maps.Map, it *maps.Iter) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart))
@@ -179,7 +175,7 @@ func mapIterNext(it *maps.Iter) {
}
// mapclear deletes all keys from a map.
-func mapclear(t *abi.SwissMapType, m *maps.Map) {
+func mapclear(t *abi.MapType, m *maps.Map) {
if raceenabled && m != nil {
callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapclear)
@@ -205,7 +201,7 @@ func mapclear(t *abi.SwissMapType, m *maps.Map) {
// See go.dev/issue/67401.
//
//go:linkname reflect_makemap reflect.makemap
-func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map {
+func reflect_makemap(t *abi.MapType, cap int) *maps.Map {
// Check invariants and reflects math.
if t.Key.Equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
@@ -226,7 +222,7 @@ func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map {
// See go.dev/issue/67401.
//
//go:linkname reflect_mapaccess reflect.mapaccess
-func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
+func reflect_mapaccess(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer {
elem, ok := mapaccess2(t, m, key)
if !ok {
// reflect wants nil for a missing element
@@ -236,7 +232,7 @@ func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) uns
}
//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
-func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) unsafe.Pointer {
+func reflect_mapaccess_faststr(t *abi.MapType, m *maps.Map, key string) unsafe.Pointer {
elem, ok := mapaccess2_faststr(t, m, key)
if !ok {
// reflect wants nil for a missing element
@@ -254,24 +250,24 @@ func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) uns
// Do not remove or change the type signature.
//
//go:linkname reflect_mapassign reflect.mapassign0
-func reflect_mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) {
+func reflect_mapassign(t *abi.MapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) {
p := mapassign(t, m, key)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
-func reflect_mapassign_faststr(t *abi.SwissMapType, m *maps.Map, key string, elem unsafe.Pointer) {
+func reflect_mapassign_faststr(t *abi.MapType, m *maps.Map, key string, elem unsafe.Pointer) {
p := mapassign_faststr(t, m, key)
typedmemmove(t.Elem, p, elem)
}
//go:linkname reflect_mapdelete reflect.mapdelete
-func reflect_mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) {
+func reflect_mapdelete(t *abi.MapType, m *maps.Map, key unsafe.Pointer) {
mapdelete(t, m, key)
}
//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
-func reflect_mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, key string) {
+func reflect_mapdelete_faststr(t *abi.MapType, m *maps.Map, key string) {
mapdelete_faststr(t, m, key)
}
@@ -297,7 +293,7 @@ func reflect_maplen(m *maps.Map) int {
}
//go:linkname reflect_mapclear reflect.mapclear
-func reflect_mapclear(t *abi.SwissMapType, m *maps.Map) {
+func reflect_mapclear(t *abi.MapType, m *maps.Map) {
mapclear(t, m)
}
@@ -325,25 +321,9 @@ func mapinitnoop()
//go:linkname mapclone maps.clone
func mapclone(m any) any {
e := efaceOf(&m)
- typ := (*abi.SwissMapType)(unsafe.Pointer(e._type))
+ typ := (*abi.MapType)(unsafe.Pointer(e._type))
map_ := (*maps.Map)(e.data)
map_ = map_.Clone(typ)
e.data = (unsafe.Pointer)(map_)
return m
}
-
-// keys for implementing maps.keys
-//
-//go:linkname keys maps.keys
-func keys(m any, p unsafe.Pointer) {
- // Currently unused in the maps package.
- panic("unimplemented")
-}
-
-// values for implementing maps.values
-//
-//go:linkname values maps.values
-func values(m any, p unsafe.Pointer) {
- // Currently unused in the maps package.
- panic("unimplemented")
-}
diff --git a/src/runtime/map_fast32_swiss.go b/src/runtime/map_fast32.go
index 0a241d3793..17b4c31d02 100644
--- a/src/runtime/map_fast32_swiss.go
+++ b/src/runtime/map_fast32.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.swissmap
-
package runtime
import (
@@ -15,7 +13,7 @@ import (
// Functions below pushed from internal/runtime/maps.
//go:linkname mapaccess1_fast32
-func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer
+func mapaccess1_fast32(t *abi.MapType, m *maps.Map, key uint32) unsafe.Pointer
// mapaccess2_fast32 should be an internal detail,
// but widely used packages access it using linkname.
@@ -26,7 +24,7 @@ func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Poin
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_fast32
-func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Pointer, bool)
+func mapaccess2_fast32(t *abi.MapType, m *maps.Map, key uint32) (unsafe.Pointer, bool)
// mapassign_fast32 should be an internal detail,
// but widely used packages access it using linkname.
@@ -38,7 +36,7 @@ func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Poi
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast32
-func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer
+func mapassign_fast32(t *abi.MapType, m *maps.Map, key uint32) unsafe.Pointer
// mapassign_fast32ptr should be an internal detail,
// but widely used packages access it using linkname.
@@ -49,7 +47,7 @@ func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Point
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast32ptr
-func mapassign_fast32ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
+func mapassign_fast32ptr(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
//go:linkname mapdelete_fast32
-func mapdelete_fast32(t *abi.SwissMapType, m *maps.Map, key uint32)
+func mapdelete_fast32(t *abi.MapType, m *maps.Map, key uint32)
diff --git a/src/runtime/map_fast32_noswiss.go b/src/runtime/map_fast32_noswiss.go
deleted file mode 100644
index 751717b6cd..0000000000
--- a/src/runtime/map_fast32_noswiss.go
+++ /dev/null
@@ -1,493 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "internal/runtime/sys"
- "unsafe"
-)
-
-func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0])
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
- if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0])
-}
-
-// mapaccess2_fast32 should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapaccess2_fast32
-func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0]), false
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
- if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)), true
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
-}
-
-// mapassign_fast32 should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/bytedance/sonic
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign_fast32
-func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := sys.GetCallerPC()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
- hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapassign.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast32(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-
- var insertb *bmap
- var inserti uintptr
- var insertk unsafe.Pointer
-
-bucketloop:
- for {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- inserti = i
- insertb = b
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
- if k != key {
- continue
- }
- inserti = i
- insertb = b
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
- // store new key at insert position
- *(*uint32)(insertk) = key
-
- h.count++
-
-done:
- elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
- return elem
-}
-
-// mapassign_fast32ptr should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign_fast32ptr
-func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := sys.GetCallerPC()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
- hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapassign.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast32(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-
- var insertb *bmap
- var inserti uintptr
- var insertk unsafe.Pointer
-
-bucketloop:
- for {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- inserti = i
- insertb = b
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
- if k != key {
- continue
- }
- inserti = i
- insertb = b
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
- // store new key at insert position
- *(*unsafe.Pointer)(insertk) = key
-
- h.count++
-
-done:
- elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize))
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
- return elem
-}
-
-func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
- }
- if h == nil || h.count == 0 {
- return
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
-
- hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapdelete
- h.flags ^= hashWriting
-
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast32(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
- bOrig := b
-search:
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) {
- if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
- continue
- }
- // Only clear key if there are pointers in it.
- // This can only happen if pointers are 32 bit
- // wide as 64 bit pointers do not fit into a 32 bit key.
- if goarch.PtrSize == 4 && t.Key.Pointers() {
- // The key must be a pointer as we checked pointers are
- // 32 bits wide and the key is 32 bits wide also.
- *(*unsafe.Pointer)(k) = nil
- }
- e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize))
- if t.Elem.Pointers() {
- memclrHasPointers(e, t.Elem.Size_)
- } else {
- memclrNoHeapPointers(e, t.Elem.Size_)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- if i == abi.OldMapBucketCount-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = abi.OldMapBucketCount - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = uint32(rand())
- }
- break search
- }
- }
-
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate_fast32(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate_fast32(t, h, h.nevacuate)
- }
-}
-
-func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, abi.OldMapBucketCount*4)
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, abi.OldMapBucketCount*4)
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, abi.OldMapBucketCount*4)
- for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.Hasher(k, uintptr(h.hash0))
- if hash&newbit != 0 {
- useY = 1
- }
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
- dst := &xy[useY] // evacuation destination
-
- if dst.i == abi.OldMapBucketCount {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, abi.OldMapBucketCount*4)
- }
- dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
- // Copy key.
- if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled {
- // Write with a write barrier.
- *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
- } else {
- *(*uint32)(dst.k) = *(*uint32)(k)
- }
-
- typedmemmove(t.Elem, dst.e, e)
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, 4)
- dst.e = add(dst.e, uintptr(t.ValueSize))
- }
- }
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
- b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.BucketSize) - dataOffset
- memclrHasPointers(ptr, n)
- }
- }
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
-}
diff --git a/src/runtime/map_fast64_swiss.go b/src/runtime/map_fast64.go
index 8b7fcf88e8..8640acf6a6 100644
--- a/src/runtime/map_fast64_swiss.go
+++ b/src/runtime/map_fast64.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.swissmap
-
package runtime
import (
@@ -15,7 +13,7 @@ import (
// Functions below pushed from internal/runtime/maps.
//go:linkname mapaccess1_fast64
-func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer
+func mapaccess1_fast64(t *abi.MapType, m *maps.Map, key uint64) unsafe.Pointer
// mapaccess2_fast64 should be an internal detail,
// but widely used packages access it using linkname.
@@ -26,7 +24,7 @@ func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Poin
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_fast64
-func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Pointer, bool)
+func mapaccess2_fast64(t *abi.MapType, m *maps.Map, key uint64) (unsafe.Pointer, bool)
// mapassign_fast64 should be an internal detail,
// but widely used packages access it using linkname.
@@ -38,7 +36,7 @@ func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Poi
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast64
-func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer
+func mapassign_fast64(t *abi.MapType, m *maps.Map, key uint64) unsafe.Pointer
// mapassign_fast64ptr should be an internal detail,
// but widely used packages access it using linkname.
@@ -50,7 +48,7 @@ func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Point
// See go.dev/issue/67401.
//
//go:linkname mapassign_fast64ptr
-func mapassign_fast64ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
+func mapassign_fast64ptr(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
//go:linkname mapdelete_fast64
-func mapdelete_fast64(t *abi.SwissMapType, m *maps.Map, key uint64)
+func mapdelete_fast64(t *abi.MapType, m *maps.Map, key uint64)
diff --git a/src/runtime/map_fast64_noswiss.go b/src/runtime/map_fast64_noswiss.go
deleted file mode 100644
index abb272d2b6..0000000000
--- a/src/runtime/map_fast64_noswiss.go
+++ /dev/null
@@ -1,502 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "internal/runtime/sys"
- "unsafe"
-)
-
-func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0])
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
- if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0])
-}
-
-// mapaccess2_fast64 should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapaccess2_fast64
-func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0]), false
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
- if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)), true
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
-}
-
-// mapassign_fast64 should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/bytedance/sonic
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign_fast64
-func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := sys.GetCallerPC()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
- hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapassign.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast64(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-
- var insertb *bmap
- var inserti uintptr
- var insertk unsafe.Pointer
-
-bucketloop:
- for {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- insertb = b
- inserti = i
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
- if k != key {
- continue
- }
- insertb = b
- inserti = i
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
- // store new key at insert position
- *(*uint64)(insertk) = key
-
- h.count++
-
-done:
- elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
- return elem
-}
-
-// mapassign_fast64ptr should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/bytedance/sonic
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign_fast64ptr
-func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := sys.GetCallerPC()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
- hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapassign.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast64(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
-
- var insertb *bmap
- var inserti uintptr
- var insertk unsafe.Pointer
-
-bucketloop:
- for {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if isEmpty(b.tophash[i]) {
- if insertb == nil {
- insertb = b
- inserti = i
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
- if k != key {
- continue
- }
- insertb = b
- inserti = i
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
- // store new key at insert position
- *(*unsafe.Pointer)(insertk) = key
-
- h.count++
-
-done:
- elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize))
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
- return elem
-}
-
-func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
- }
- if h == nil || h.count == 0 {
- return
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
-
- hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapdelete
- h.flags ^= hashWriting
-
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_fast64(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
- bOrig := b
-search:
- for ; b != nil; b = b.overflow(t) {
- for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) {
- if key != *(*uint64)(k) || isEmpty(b.tophash[i]) {
- continue
- }
- // Only clear key if there are pointers in it.
- if t.Key.Pointers() {
- if goarch.PtrSize == 8 {
- *(*unsafe.Pointer)(k) = nil
- } else {
- // There are three ways to squeeze at one or more 32 bit pointers into 64 bits.
- // Just call memclrHasPointers instead of trying to handle all cases here.
- memclrHasPointers(k, 8)
- }
- }
- e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize))
- if t.Elem.Pointers() {
- memclrHasPointers(e, t.Elem.Size_)
- } else {
- memclrNoHeapPointers(e, t.Elem.Size_)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- if i == abi.OldMapBucketCount-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = abi.OldMapBucketCount - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = uint32(rand())
- }
- break search
- }
- }
-
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate_fast64(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate_fast64(t, h, h.nevacuate)
- }
-}
-
-func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, abi.OldMapBucketCount*8)
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, abi.OldMapBucketCount*8)
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, abi.OldMapBucketCount*8)
- for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.Hasher(k, uintptr(h.hash0))
- if hash&newbit != 0 {
- useY = 1
- }
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
- dst := &xy[useY] // evacuation destination
-
- if dst.i == abi.OldMapBucketCount {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, abi.OldMapBucketCount*8)
- }
- dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
- // Copy key.
- if t.Key.Pointers() && writeBarrier.enabled {
- if goarch.PtrSize == 8 {
- // Write with a write barrier.
- *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
- } else {
- // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
- // Give up and call typedmemmove.
- typedmemmove(t.Key, dst.k, k)
- }
- } else {
- *(*uint64)(dst.k) = *(*uint64)(k)
- }
-
- typedmemmove(t.Elem, dst.e, e)
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, 8)
- dst.e = add(dst.e, uintptr(t.ValueSize))
- }
- }
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
- b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.BucketSize) - dataOffset
- memclrHasPointers(ptr, n)
- }
- }
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
-}
diff --git a/src/runtime/map_faststr_swiss.go b/src/runtime/map_faststr.go
index 23f6c1e810..5a7b52d037 100644
--- a/src/runtime/map_faststr_swiss.go
+++ b/src/runtime/map_faststr.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.swissmap
-
package runtime
import (
@@ -15,7 +13,7 @@ import (
// Functions below pushed from internal/runtime/maps.
//go:linkname mapaccess1_faststr
-func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Pointer
+func mapaccess1_faststr(t *abi.MapType, m *maps.Map, ky string) unsafe.Pointer
// mapaccess2_faststr should be an internal detail,
// but widely used packages access it using linkname.
@@ -26,7 +24,7 @@ func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Poin
// See go.dev/issue/67401.
//
//go:linkname mapaccess2_faststr
-func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Pointer, bool)
+func mapaccess2_faststr(t *abi.MapType, m *maps.Map, ky string) (unsafe.Pointer, bool)
// mapassign_faststr should be an internal detail,
// but widely used packages access it using linkname.
@@ -38,7 +36,7 @@ func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Poi
// See go.dev/issue/67401.
//
//go:linkname mapassign_faststr
-func mapassign_faststr(t *abi.SwissMapType, m *maps.Map, s string) unsafe.Pointer
+func mapassign_faststr(t *abi.MapType, m *maps.Map, s string) unsafe.Pointer
//go:linkname mapdelete_faststr
-func mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, ky string)
+func mapdelete_faststr(t *abi.MapType, m *maps.Map, ky string)
diff --git a/src/runtime/map_faststr_noswiss.go b/src/runtime/map_faststr_noswiss.go
deleted file mode 100644
index e8b6a3f1ae..0000000000
--- a/src/runtime/map_faststr_noswiss.go
+++ /dev/null
@@ -1,507 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "internal/runtime/sys"
- "unsafe"
-)
-
-func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0])
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- key := stringStructOf(&ky)
- if h.B == 0 {
- // One-bucket table.
- b := (*bmap)(h.buckets)
- if key.len < 32 {
- // short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
- continue
- }
- if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
- }
- }
- return unsafe.Pointer(&zeroVal[0])
- }
- // long key, try not to do more comparisons than necessary
- keymaybe := uintptr(abi.OldMapBucketCount)
- for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
- continue
- }
- if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
- }
- // check first 4 bytes
- if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
- continue
- }
- // check last 4 bytes
- if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
- continue
- }
- if keymaybe != abi.OldMapBucketCount {
- // Two keys are potential matches. Use hash to distinguish them.
- goto dohash
- }
- keymaybe = i
- }
- if keymaybe != abi.OldMapBucketCount {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
- if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
- }
- }
- return unsafe.Pointer(&zeroVal[0])
- }
-dohash:
- hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
- for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || b.tophash[i] != top {
- continue
- }
- if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0])
-}
-
-// mapaccess2_faststr should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapaccess2_faststr
-func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(&zeroVal[0]), false
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- key := stringStructOf(&ky)
- if h.B == 0 {
- // One-bucket table.
- b := (*bmap)(h.buckets)
- if key.len < 32 {
- // short key, doing lots of comparisons is ok
- for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
- continue
- }
- if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
- }
- // long key, try not to do more comparisons than necessary
- keymaybe := uintptr(abi.OldMapBucketCount)
- for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || isEmpty(b.tophash[i]) {
- if b.tophash[i] == emptyRest {
- break
- }
- continue
- }
- if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
- }
- // check first 4 bytes
- if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
- continue
- }
- // check last 4 bytes
- if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
- continue
- }
- if keymaybe != abi.OldMapBucketCount {
- // Two keys are potential matches. Use hash to distinguish them.
- goto dohash
- }
- keymaybe = i
- }
- if keymaybe != abi.OldMapBucketCount {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
- if memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
- }
-dohash:
- hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
- for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || b.tophash[i] != top {
- continue
- }
- if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
-}
-
-// mapassign_faststr should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/bytedance/sonic
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign_faststr
-func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := sys.GetCallerPC()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
- key := stringStructOf(&s)
- hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapassign.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_faststr(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
- top := tophash(hash)
-
- var insertb *bmap
- var inserti uintptr
- var insertk unsafe.Pointer
-
-bucketloop:
- for {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if b.tophash[i] != top {
- if isEmpty(b.tophash[i]) && insertb == nil {
- insertb = b
- inserti = i
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
- if k.len != key.len {
- continue
- }
- if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
- continue
- }
- // already have a mapping for key. Update it.
- inserti = i
- insertb = b
- // Overwrite existing key, so it can be garbage collected.
- // The size is already guaranteed to be set correctly.
- k.str = key.str
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if insertb == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- insertb = h.newoverflow(t, b)
- inserti = 0 // not necessary, but avoids needlessly spilling inserti
- }
- insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = top // mask inserti to avoid bounds checks
-
- insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
- // store new key at insert position
- *((*stringStruct)(insertk)) = *key
- h.count++
-
-done:
- elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
- return elem
-}
-
-func mapdelete_faststr(t *maptype, h *hmap, ky string) {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
- }
- if h == nil || h.count == 0 {
- return
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
-
- key := stringStructOf(&ky)
- hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher for consistency with mapdelete
- h.flags ^= hashWriting
-
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork_faststr(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
- bOrig := b
- top := tophash(hash)
-search:
- for ; b != nil; b = b.overflow(t) {
- for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
- k := (*stringStruct)(kptr)
- if k.len != key.len || b.tophash[i] != top {
- continue
- }
- if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
- continue
- }
- // Clear key's pointer.
- k.str = nil
- e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize))
- if t.Elem.Pointers() {
- memclrHasPointers(e, t.Elem.Size_)
- } else {
- memclrNoHeapPointers(e, t.Elem.Size_)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- if i == abi.OldMapBucketCount-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = abi.OldMapBucketCount - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = uint32(rand())
- }
- break search
- }
- }
-
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate_faststr(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate_faststr(t, h, h.nevacuate)
- }
-}
-
-func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, abi.OldMapBucketCount*2*goarch.PtrSize)
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, abi.OldMapBucketCount*2*goarch.PtrSize)
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, abi.OldMapBucketCount*2*goarch.PtrSize)
- for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.Hasher(k, uintptr(h.hash0))
- if hash&newbit != 0 {
- useY = 1
- }
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
- dst := &xy[useY] // evacuation destination
-
- if dst.i == abi.OldMapBucketCount {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, abi.OldMapBucketCount*2*goarch.PtrSize)
- }
- dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
-
- // Copy key.
- *(*string)(dst.k) = *(*string)(k)
-
- typedmemmove(t.Elem, dst.e, e)
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, 2*goarch.PtrSize)
- dst.e = add(dst.e, uintptr(t.ValueSize))
- }
- }
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
- b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.BucketSize) - dataOffset
- memclrHasPointers(ptr, n)
- }
- }
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
-}
diff --git a/src/runtime/map_noswiss.go b/src/runtime/map_noswiss.go
deleted file mode 100644
index 7b3c98eb88..0000000000
--- a/src/runtime/map_noswiss.go
+++ /dev/null
@@ -1,1891 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime
-
-// This file contains the implementation of Go's map type.
-//
-// A map is just a hash table. The data is arranged
-// into an array of buckets. Each bucket contains up to
-// 8 key/elem pairs. The low-order bits of the hash are
-// used to select a bucket. Each bucket contains a few
-// high-order bits of each hash to distinguish the entries
-// within a single bucket.
-//
-// If more than 8 keys hash to a bucket, we chain on
-// extra buckets.
-//
-// When the hashtable grows, we allocate a new array
-// of buckets twice as big. Buckets are incrementally
-// copied from the old bucket array to the new bucket array.
-//
-// Map iterators walk through the array of buckets and
-// return the keys in walk order (bucket #, then overflow
-// chain order, then bucket index). To maintain iteration
-// semantics, we never move keys within their bucket (if
-// we did, keys might be returned 0 or 2 times). When
-// growing the table, iterators remain iterating through the
-// old table and must check the new table if the bucket
-// they are iterating through has been moved ("evacuated")
-// to the new table.
-
-// Picking loadFactor: too large and we have lots of overflow
-// buckets, too small and we waste a lot of space. I wrote
-// a simple program to check some stats for different loads:
-// (64-bit, 8 byte keys and elems)
-// loadFactor %overflow bytes/entry hitprobe missprobe
-// 4.00 2.13 20.77 3.00 4.00
-// 4.50 4.05 17.30 3.25 4.50
-// 5.00 6.85 14.77 3.50 5.00
-// 5.50 10.55 12.94 3.75 5.50
-// 6.00 15.27 11.67 4.00 6.00
-// 6.50 20.90 10.79 4.25 6.50
-// 7.00 27.14 10.15 4.50 7.00
-// 7.50 34.03 9.73 4.75 7.50
-// 8.00 41.10 9.40 5.00 8.00
-//
-// %overflow = percentage of buckets which have an overflow bucket
-// bytes/entry = overhead bytes used per key/elem pair
-// hitprobe = # of entries to check when looking up a present key
-// missprobe = # of entries to check when looking up an absent key
-//
-// Keep in mind this data is for maximally loaded tables, i.e. just
-// before the table grows. Typical tables will be somewhat less loaded.
-
-import (
- "internal/abi"
- "internal/goarch"
- "internal/runtime/atomic"
- "internal/runtime/maps"
- "internal/runtime/math"
- "internal/runtime/sys"
- "unsafe"
-)
-
-type maptype = abi.OldMapType
-
-const (
- // Maximum number of key/elem pairs a bucket can hold.
- bucketCntBits = abi.OldMapBucketCountBits
-
- // Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
- // Because of minimum alignment rules, bucketCnt is known to be at least 8.
- // Represent as loadFactorNum/loadFactorDen, to allow integer math.
- loadFactorDen = 2
- loadFactorNum = loadFactorDen * abi.OldMapBucketCount * 13 / 16
-
- // data offset should be the size of the bmap struct, but needs to be
- // aligned correctly. For amd64p32 this means 64-bit alignment
- // even though pointers are 32 bit.
- dataOffset = unsafe.Offsetof(struct {
- b bmap
- v int64
- }{}.v)
-
- // Possible tophash values. We reserve a few possibilities for special marks.
- // Each bucket (including its overflow buckets, if any) will have either all or none of its
- // entries in the evacuated* states (except during the evacuate() method, which only happens
- // during map writes and thus no one else can observe the map during that time).
- emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
- emptyOne = 1 // this cell is empty
- evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table.
- evacuatedY = 3 // same as above, but evacuated to second half of larger table.
- evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
- minTopHash = 5 // minimum tophash for a normal filled cell.
-
- // flags
- iterator = 1 // there may be an iterator using buckets
- oldIterator = 2 // there may be an iterator using oldbuckets
- hashWriting = 4 // a goroutine is writing to the map
- sameSizeGrow = 8 // the current map growth is to a new map of the same size
-
- // sentinel bucket ID for iterator checks
- noCheck = 1<<(8*goarch.PtrSize) - 1
-)
-
-// isEmpty reports whether the given tophash array entry represents an empty bucket entry.
-func isEmpty(x uint8) bool {
- return x <= emptyOne
-}
-
-// A header for a Go map.
-type hmap struct {
- // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
- // Make sure this stays in sync with the compiler's definition.
- count int // # live cells == size of map. Must be first (used by len() builtin)
- flags uint8
- B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
- noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
- hash0 uint32 // hash seed
-
- buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
- oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
- nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
- clearSeq uint64
-
- extra *mapextra // optional fields
-}
-
-// mapextra holds fields that are not present on all maps.
-type mapextra struct {
- // If both key and elem do not contain pointers and are inline, then we mark bucket
- // type as containing no pointers. This avoids scanning such maps.
- // However, bmap.overflow is a pointer. In order to keep overflow buckets
- // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
- // overflow and oldoverflow are only used if key and elem do not contain pointers.
- // overflow contains overflow buckets for hmap.buckets.
- // oldoverflow contains overflow buckets for hmap.oldbuckets.
- // The indirection allows to store a pointer to the slice in hiter.
- overflow *[]*bmap
- oldoverflow *[]*bmap
-
- // nextOverflow holds a pointer to a free overflow bucket.
- nextOverflow *bmap
-}
-
-// A bucket for a Go map.
-type bmap struct {
- // tophash generally contains the top byte of the hash value
- // for each key in this bucket. If tophash[0] < minTopHash,
- // tophash[0] is a bucket evacuation state instead.
- tophash [abi.OldMapBucketCount]uint8
- // Followed by bucketCnt keys and then bucketCnt elems.
- // NOTE: packing all the keys together and then all the elems together makes the
- // code a bit more complicated than alternating key/elem/key/elem/... but it allows
- // us to eliminate padding which would be needed for, e.g., map[int64]int8.
- // Followed by an overflow pointer.
-}
-
-// A hash iteration structure.
-// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
-// and reflect/value.go to match the layout of this structure.
-type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
- elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
- t *maptype
- h *hmap
- buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
- bptr *bmap // current bucket
- overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive
- oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive
- startBucket uintptr // bucket iteration started at
- offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
- wrapped bool // already wrapped around from end of bucket array to beginning
- B uint8
- i uint8
- bucket uintptr
- checkBucket uintptr
- clearSeq uint64
-}
-
-// bucketShift returns 1<<b, optimized for code generation.
-func bucketShift(b uint8) uintptr {
- // Masking the shift amount allows overflow checks to be elided.
- return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
-}
-
-// bucketMask returns 1<<b - 1, optimized for code generation.
-func bucketMask(b uint8) uintptr {
- return bucketShift(b) - 1
-}
-
-// tophash calculates the tophash value for hash.
-func tophash(hash uintptr) uint8 {
- top := uint8(hash >> (goarch.PtrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- return top
-}
-
-func evacuated(b *bmap) bool {
- h := b.tophash[0]
- return h > emptyOne && h < minTopHash
-}
-
-func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
-}
-
-func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
-}
-
-func (b *bmap) keys() unsafe.Pointer {
- return add(unsafe.Pointer(b), dataOffset)
-}
-
-// incrnoverflow increments h.noverflow.
-// noverflow counts the number of overflow buckets.
-// This is used to trigger same-size map growth.
-// See also tooManyOverflowBuckets.
-// To keep hmap small, noverflow is a uint16.
-// When there are few buckets, noverflow is an exact count.
-// When there are many buckets, noverflow is an approximate count.
-func (h *hmap) incrnoverflow() {
- // We trigger same-size map growth if there are
- // as many overflow buckets as buckets.
- // We need to be able to count to 1<<h.B.
- if h.B < 16 {
- h.noverflow++
- return
- }
- // Increment with probability 1/(1<<(h.B-15)).
- // When we reach 1<<15 - 1, we will have approximately
- // as many overflow buckets as buckets.
- mask := uint32(1)<<(h.B-15) - 1
- // Example: if h.B == 18, then mask == 7,
- // and rand() & 7 == 0 with probability 1/8.
- if uint32(rand())&mask == 0 {
- h.noverflow++
- }
-}
-
-func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
- var ovf *bmap
- if h.extra != nil && h.extra.nextOverflow != nil {
- // We have preallocated overflow buckets available.
- // See makeBucketArray for more details.
- ovf = h.extra.nextOverflow
- if ovf.overflow(t) == nil {
- // We're not at the end of the preallocated overflow buckets. Bump the pointer.
- h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
- } else {
- // This is the last preallocated overflow bucket.
- // Reset the overflow pointer on this bucket,
- // which was set to a non-nil sentinel value.
- ovf.setoverflow(t, nil)
- h.extra.nextOverflow = nil
- }
- } else {
- ovf = (*bmap)(newobject(t.Bucket))
- }
- h.incrnoverflow()
- if !t.Bucket.Pointers() {
- h.createOverflow()
- *h.extra.overflow = append(*h.extra.overflow, ovf)
- }
- b.setoverflow(t, ovf)
- return ovf
-}
-
-func (h *hmap) createOverflow() {
- if h.extra == nil {
- h.extra = new(mapextra)
- }
- if h.extra.overflow == nil {
- h.extra.overflow = new([]*bmap)
- }
-}
-
-func makemap64(t *maptype, hint int64, h *hmap) *hmap {
- if int64(int(hint)) != hint {
- hint = 0
- }
- return makemap(t, int(hint), h)
-}
-
-// makemap_small implements Go map creation for make(map[k]v) and
-// make(map[k]v, hint) when hint is known to be at most bucketCnt
-// at compile time and the map needs to be allocated on the heap.
-//
-// makemap_small should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/bytedance/sonic
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname makemap_small
-func makemap_small() *hmap {
- h := new(hmap)
- h.hash0 = uint32(rand())
- return h
-}
-
-// makemap implements Go map creation for make(map[k]v, hint).
-// If the compiler has determined that the map or the first bucket
-// can be created on the stack, h and/or bucket may be non-nil.
-// If h != nil, the map can be created directly in h.
-// If h.buckets != nil, bucket pointed to can be used as the first bucket.
-//
-// makemap should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname makemap
-func makemap(t *maptype, hint int, h *hmap) *hmap {
- mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
- if overflow || mem > maxAlloc {
- hint = 0
- }
-
- // initialize Hmap
- if h == nil {
- h = new(hmap)
- }
- h.hash0 = uint32(rand())
-
- // Find the size parameter B which will hold the requested # of elements.
- // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
- B := uint8(0)
- for overLoadFactor(hint, B) {
- B++
- }
- h.B = B
-
- // allocate initial hash table
- // if B == 0, the buckets field is allocated lazily later (in mapassign)
- // If hint is large zeroing this memory could take a while.
- if h.B != 0 {
- var nextOverflow *bmap
- h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
- if nextOverflow != nil {
- h.extra = new(mapextra)
- h.extra.nextOverflow = nextOverflow
- }
- }
-
- return h
-}
-
-// makeBucketArray initializes a backing array for map buckets.
-// 1<<b is the minimum number of buckets to allocate.
-// dirtyalloc should either be nil or a bucket array previously
-// allocated by makeBucketArray with the same t and b parameters.
-// If dirtyalloc is nil a new backing array will be alloced and
-// otherwise dirtyalloc will be cleared and reused as backing array.
-func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
- base := bucketShift(b)
- nbuckets := base
- // For small b, overflow buckets are unlikely.
- // Avoid the overhead of the calculation.
- if b >= 4 {
- // Add on the estimated number of overflow buckets
- // required to insert the median number of elements
- // used with this value of b.
- nbuckets += bucketShift(b - 4)
- sz := t.Bucket.Size_ * nbuckets
- up := roundupsize(sz, !t.Bucket.Pointers())
- if up != sz {
- nbuckets = up / t.Bucket.Size_
- }
- }
-
- if dirtyalloc == nil {
- buckets = newarray(t.Bucket, int(nbuckets))
- } else {
- // dirtyalloc was previously generated by
- // the above newarray(t.Bucket, int(nbuckets))
- // but may not be empty.
- buckets = dirtyalloc
- size := t.Bucket.Size_ * nbuckets
- if t.Bucket.Pointers() {
- memclrHasPointers(buckets, size)
- } else {
- memclrNoHeapPointers(buckets, size)
- }
- }
-
- if base != nbuckets {
- // We preallocated some overflow buckets.
- // To keep the overhead of tracking these overflow buckets to a minimum,
- // we use the convention that if a preallocated overflow bucket's overflow
- // pointer is nil, then there are more available by bumping the pointer.
- // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
- nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
- last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
- last.setoverflow(t, (*bmap)(buckets))
- }
- return buckets, nextOverflow
-}
-
-// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
-// it will return a reference to the zero object for the elem type if
-// the key is not in the map.
-// NOTE: The returned pointer may keep the whole map live, so don't
-// hold onto it for very long.
-func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- pc := abi.FuncPCABIInternal(mapaccess1)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.Key, key, callerpc, pc)
- }
- if msanenabled && h != nil {
- msanread(key, t.Key.Size_)
- }
- if asanenabled && h != nil {
- asanread(key, t.Key.Size_)
- }
- if h == nil || h.count == 0 {
- if err := maps.OldMapKeyError(t, key); err != nil {
- panic(err) // see issue 23734
- }
- return unsafe.Pointer(&zeroVal[0])
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- hash := t.Hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- if t.Key.Equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- if t.IndirectElem() {
- e = *((*unsafe.Pointer)(e))
- }
- return e
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0])
-}
-
-// mapaccess2 should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapaccess2
-func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- pc := abi.FuncPCABIInternal(mapaccess2)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.Key, key, callerpc, pc)
- }
- if msanenabled && h != nil {
- msanread(key, t.Key.Size_)
- }
- if asanenabled && h != nil {
- asanread(key, t.Key.Size_)
- }
- if h == nil || h.count == 0 {
- if err := maps.OldMapKeyError(t, key); err != nil {
- panic(err) // see issue 23734
- }
- return unsafe.Pointer(&zeroVal[0]), false
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- hash := t.Hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- if t.Key.Equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- if t.IndirectElem() {
- e = *((*unsafe.Pointer)(e))
- }
- return e, true
- }
- }
- }
- return unsafe.Pointer(&zeroVal[0]), false
-}
-
-// returns both key and elem. Used by map iterator.
-func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
- if h == nil || h.count == 0 {
- return nil, nil
- }
- hash := t.Hasher(key, uintptr(h.hash0))
- m := bucketMask(h.B)
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
- if c := h.oldbuckets; c != nil {
- if !h.sameSizeGrow() {
- // There used to be half as many buckets; mask down one more power of two.
- m >>= 1
- }
- oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := tophash(hash)
-bucketloop:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- if t.Key.Equal(key, k) {
- e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- if t.IndirectElem() {
- e = *((*unsafe.Pointer)(e))
- }
- return k, e
- }
- }
- }
- return nil, nil
-}
-
-func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
- e := mapaccess1(t, h, key)
- if e == unsafe.Pointer(&zeroVal[0]) {
- return zero
- }
- return e
-}
-
-func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
- e := mapaccess1(t, h, key)
- if e == unsafe.Pointer(&zeroVal[0]) {
- return zero, false
- }
- return e, true
-}
-
-// Like mapaccess, but allocates a slot for the key if it is not present in the map.
-//
-// mapassign should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/bytedance/sonic
-// - github.com/RomiChan/protobuf
-// - github.com/segmentio/encoding
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapassign
-func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if h == nil {
- panic(plainError("assignment to entry in nil map"))
- }
- if raceenabled {
- callerpc := sys.GetCallerPC()
- pc := abi.FuncPCABIInternal(mapassign)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.Key, key, callerpc, pc)
- }
- if msanenabled {
- msanread(key, t.Key.Size_)
- }
- if asanenabled {
- asanread(key, t.Key.Size_)
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
- hash := t.Hasher(key, uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher, since t.hasher may panic,
- // in which case we have not actually done a write.
- h.flags ^= hashWriting
-
- if h.buckets == nil {
- h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
- }
-
-again:
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
- top := tophash(hash)
-
- var inserti *uint8
- var insertk unsafe.Pointer
- var elem unsafe.Pointer
-bucketloop:
- for {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if b.tophash[i] != top {
- if isEmpty(b.tophash[i]) && inserti == nil {
- inserti = &b.tophash[i]
- insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- }
- if b.tophash[i] == emptyRest {
- break bucketloop
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- if !t.Key.Equal(key, k) {
- continue
- }
- // already have a mapping for key. Update it.
- if t.NeedKeyUpdate() {
- typedmemmove(t.Key, k, key)
- }
- elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- goto done
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // Did not find mapping for key. Allocate new cell & add entry.
-
- // If we hit the max load factor or we have too many overflow buckets,
- // and we're not already in the middle of growing, start growing.
- if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if inserti == nil {
- // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
- newb := h.newoverflow(t, b)
- inserti = &newb.tophash[0]
- insertk = add(unsafe.Pointer(newb), dataOffset)
- elem = add(insertk, abi.OldMapBucketCount*uintptr(t.KeySize))
- }
-
- // store new key/elem at insert position
- if t.IndirectKey() {
- kmem := newobject(t.Key)
- *(*unsafe.Pointer)(insertk) = kmem
- insertk = kmem
- }
- if t.IndirectElem() {
- vmem := newobject(t.Elem)
- *(*unsafe.Pointer)(elem) = vmem
- }
- typedmemmove(t.Key, insertk, key)
- *inserti = top
- h.count++
-
-done:
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
- if t.IndirectElem() {
- elem = *((*unsafe.Pointer)(elem))
- }
- return elem
-}
-
-// mapdelete should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/ugorji/go/codec
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapdelete
-func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- pc := abi.FuncPCABIInternal(mapdelete)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.Key, key, callerpc, pc)
- }
- if msanenabled && h != nil {
- msanread(key, t.Key.Size_)
- }
- if asanenabled && h != nil {
- asanread(key, t.Key.Size_)
- }
- if h == nil || h.count == 0 {
- if err := maps.OldMapKeyError(t, key); err != nil {
- panic(err) // see issue 23734
- }
- return
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
-
- hash := t.Hasher(key, uintptr(h.hash0))
-
- // Set hashWriting after calling t.hasher, since t.hasher may panic,
- // in which case we have not actually done a write (delete).
- h.flags ^= hashWriting
-
- bucket := hash & bucketMask(h.B)
- if h.growing() {
- growWork(t, h, bucket)
- }
- b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
- bOrig := b
- top := tophash(hash)
-search:
- for ; b != nil; b = b.overflow(t) {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == emptyRest {
- break search
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
- k2 := k
- if t.IndirectKey() {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if !t.Key.Equal(key, k2) {
- continue
- }
- // Only clear key if there are pointers in it.
- if t.IndirectKey() {
- *(*unsafe.Pointer)(k) = nil
- } else if t.Key.Pointers() {
- memclrHasPointers(k, t.Key.Size_)
- }
- e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- if t.IndirectElem() {
- *(*unsafe.Pointer)(e) = nil
- } else if t.Elem.Pointers() {
- memclrHasPointers(e, t.Elem.Size_)
- } else {
- memclrNoHeapPointers(e, t.Elem.Size_)
- }
- b.tophash[i] = emptyOne
- // If the bucket now ends in a bunch of emptyOne states,
- // change those to emptyRest states.
- // It would be nice to make this a separate function, but
- // for loops are not currently inlineable.
- if i == abi.OldMapBucketCount-1 {
- if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
- goto notLast
- }
- } else {
- if b.tophash[i+1] != emptyRest {
- goto notLast
- }
- }
- for {
- b.tophash[i] = emptyRest
- if i == 0 {
- if b == bOrig {
- break // beginning of initial bucket, we're done.
- }
- // Find previous bucket, continue at its last entry.
- c := b
- for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
- }
- i = abi.OldMapBucketCount - 1
- } else {
- i--
- }
- if b.tophash[i] != emptyOne {
- break
- }
- }
- notLast:
- h.count--
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- if h.count == 0 {
- h.hash0 = uint32(rand())
- }
- break search
- }
- }
-
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-// mapiterinit initializes the hiter struct used for ranging over maps.
-// The hiter struct pointed to by 'it' is allocated on the stack
-// by the compilers order pass or on the heap by reflect_mapiterinit.
-// Both need to have zeroed hiter since the struct contains pointers.
-//
-// mapiterinit should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/bytedance/sonic
-// - github.com/goccy/go-json
-// - github.com/RomiChan/protobuf
-// - github.com/segmentio/encoding
-// - github.com/ugorji/go/codec
-// - github.com/wI2L/jettison
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapiterinit
-func mapiterinit(t *maptype, h *hmap, it *hiter) {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
- }
-
- it.t = t
- if h == nil || h.count == 0 {
- return
- }
-
- if unsafe.Sizeof(hiter{}) != 8+12*goarch.PtrSize {
- throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
- }
- it.h = h
- it.clearSeq = h.clearSeq
-
- // grab snapshot of bucket state
- it.B = h.B
- it.buckets = h.buckets
- if !t.Bucket.Pointers() {
- // Allocate the current slice and remember pointers to both current and old.
- // This preserves all relevant overflow buckets alive even if
- // the table grows and/or overflow buckets are added to the table
- // while we are iterating.
- h.createOverflow()
- it.overflow = h.extra.overflow
- it.oldoverflow = h.extra.oldoverflow
- }
-
- // decide where to start
- r := uintptr(rand())
- it.startBucket = r & bucketMask(h.B)
- it.offset = uint8(r >> h.B & (abi.OldMapBucketCount - 1))
-
- // iterator state
- it.bucket = it.startBucket
-
- // Remember we have an iterator.
- // Can run concurrently with another mapiterinit().
- if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
- atomic.Or8(&h.flags, iterator|oldIterator)
- }
-
- mapiternext(it)
-}
-
-// mapiternext should be an internal detail,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/bytedance/sonic
-// - github.com/RomiChan/protobuf
-// - github.com/segmentio/encoding
-// - github.com/ugorji/go/codec
-// - gonum.org/v1/gonum
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname mapiternext
-func mapiternext(it *hiter) {
- h := it.h
- if raceenabled {
- callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map iteration and map write")
- }
- t := it.t
- bucket := it.bucket
- b := it.bptr
- i := it.i
- checkBucket := it.checkBucket
-
-next:
- if b == nil {
- if bucket == it.startBucket && it.wrapped {
- // end of iteration
- it.key = nil
- it.elem = nil
- return
- }
- if h.growing() && it.B == h.B {
- // Iterator was started in the middle of a grow, and the grow isn't done yet.
- // If the bucket we're looking at hasn't been filled in yet (i.e. the old
- // bucket hasn't been evacuated) then we need to iterate through the old
- // bucket and only return the ones that will be migrated to this bucket.
- oldbucket := bucket & it.h.oldbucketmask()
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
- if !evacuated(b) {
- checkBucket = bucket
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
- checkBucket = noCheck
- }
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
- checkBucket = noCheck
- }
- bucket++
- if bucket == bucketShift(it.B) {
- bucket = 0
- it.wrapped = true
- }
- i = 0
- }
- for ; i < abi.OldMapBucketCount; i++ {
- offi := (i + it.offset) & (abi.OldMapBucketCount - 1)
- if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
- // TODO: emptyRest is hard to use here, as we start iterating
- // in the middle of a bucket. It's feasible, just tricky.
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
- if checkBucket != noCheck && !h.sameSizeGrow() {
- // Special case: iterator was started during a grow to a larger size
- // and the grow is not done yet. We're working on a bucket whose
- // oldbucket has not been evacuated yet. Or at least, it wasn't
- // evacuated when we started the bucket. So we're iterating
- // through the oldbucket, skipping any keys that will go
- // to the other new bucket (each oldbucket expands to two
- // buckets during a grow).
- if t.ReflexiveKey() || t.Key.Equal(k, k) {
- // If the item in the oldbucket is not destined for
- // the current new bucket in the iteration, skip it.
- hash := t.Hasher(k, uintptr(h.hash0))
- if hash&bucketMask(it.B) != checkBucket {
- continue
- }
- } else {
- // Hash isn't repeatable if k != k (NaNs). We need a
- // repeatable and randomish choice of which direction
- // to send NaNs during evacuation. We'll use the low
- // bit of tophash to decide which way NaNs go.
- // NOTE: this case is why we need two evacuate tophash
- // values, evacuatedX and evacuatedY, that differ in
- // their low bit.
- if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
- continue
- }
- }
- }
- if it.clearSeq == h.clearSeq &&
- ((b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.ReflexiveKey() || t.Key.Equal(k, k))) {
- // This is the golden data, we can return it.
- // OR
- // key!=key, so the entry can't be deleted or updated, so we can just return it.
- // That's lucky for us because when key!=key we can't look it up successfully.
- it.key = k
- if t.IndirectElem() {
- e = *((*unsafe.Pointer)(e))
- }
- it.elem = e
- } else {
- // The hash table has grown since the iterator was started.
- // The golden data for this key is now somewhere else.
- // Check the current hash table for the data.
- // This code handles the case where the key
- // has been deleted, updated, or deleted and reinserted.
- // NOTE: we need to regrab the key as it has potentially been
- // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
- rk, re := mapaccessK(t, h, k)
- if rk == nil {
- continue // key has been deleted
- }
- it.key = rk
- it.elem = re
- }
- it.bucket = bucket
- if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
- it.bptr = b
- }
- it.i = i + 1
- it.checkBucket = checkBucket
- return
- }
- b = b.overflow(t)
- i = 0
- goto next
-}
-
-// mapclear deletes all keys from a map.
-// It is called by the compiler.
-func mapclear(t *maptype, h *hmap) {
- if raceenabled && h != nil {
- callerpc := sys.GetCallerPC()
- pc := abi.FuncPCABIInternal(mapclear)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- }
-
- if h == nil || h.count == 0 {
- return
- }
-
- if h.flags&hashWriting != 0 {
- fatal("concurrent map writes")
- }
-
- h.flags ^= hashWriting
- h.flags &^= sameSizeGrow
- h.oldbuckets = nil
- h.nevacuate = 0
- h.noverflow = 0
- h.count = 0
- h.clearSeq++
-
- // Reset the hash seed to make it more difficult for attackers to
- // repeatedly trigger hash collisions. See issue 25237.
- h.hash0 = uint32(rand())
-
- // Keep the mapextra allocation but clear any extra information.
- if h.extra != nil {
- *h.extra = mapextra{}
- }
-
- // makeBucketArray clears the memory pointed to by h.buckets
- // and recovers any overflow buckets by generating them
- // as if h.buckets was newly alloced.
- _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
- if nextOverflow != nil {
- // If overflow buckets are created then h.extra
- // will have been allocated during initial bucket creation.
- h.extra.nextOverflow = nextOverflow
- }
-
- if h.flags&hashWriting == 0 {
- fatal("concurrent map writes")
- }
- h.flags &^= hashWriting
-}
-
-func hashGrow(t *maptype, h *hmap) {
- // If we've hit the load factor, get bigger.
- // Otherwise, there are too many overflow buckets,
- // so keep the same number of buckets and "grow" laterally.
- bigger := uint8(1)
- if !overLoadFactor(h.count+1, h.B) {
- bigger = 0
- h.flags |= sameSizeGrow
- }
- oldbuckets := h.buckets
- newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
-
- flags := h.flags &^ (iterator | oldIterator)
- if h.flags&iterator != 0 {
- flags |= oldIterator
- }
- // commit the grow (atomic wrt gc)
- h.B += bigger
- h.flags = flags
- h.oldbuckets = oldbuckets
- h.buckets = newbuckets
- h.nevacuate = 0
- h.noverflow = 0
-
- if h.extra != nil && h.extra.overflow != nil {
- // Promote current overflow buckets to the old generation.
- if h.extra.oldoverflow != nil {
- throw("oldoverflow is not nil")
- }
- h.extra.oldoverflow = h.extra.overflow
- h.extra.overflow = nil
- }
- if nextOverflow != nil {
- if h.extra == nil {
- h.extra = new(mapextra)
- }
- h.extra.nextOverflow = nextOverflow
- }
-
- // the actual copying of the hash table data is done incrementally
- // by growWork() and evacuate().
-}
-
-// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
-func overLoadFactor(count int, B uint8) bool {
- return count > abi.OldMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
-}
-
-// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
-// Note that most of these overflow buckets must be in sparse use;
-// if use was dense, then we'd have already triggered regular map growth.
-func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
- // If the threshold is too low, we do extraneous work.
- // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
- // "too many" means (approximately) as many overflow buckets as regular buckets.
- // See incrnoverflow for more details.
- if B > 15 {
- B = 15
- }
- // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
- return noverflow >= uint16(1)<<(B&15)
-}
-
-// growing reports whether h is growing. The growth may be to the same size or bigger.
-func (h *hmap) growing() bool {
- return h.oldbuckets != nil
-}
-
-// sameSizeGrow reports whether the current growth is to a map of the same size.
-func (h *hmap) sameSizeGrow() bool {
- return h.flags&sameSizeGrow != 0
-}
-
-//go:linkname sameSizeGrowForIssue69110Test
-func sameSizeGrowForIssue69110Test(h *hmap) bool {
- return h.sameSizeGrow()
-}
-
-// noldbuckets calculates the number of buckets prior to the current map growth.
-func (h *hmap) noldbuckets() uintptr {
- oldB := h.B
- if !h.sameSizeGrow() {
- oldB--
- }
- return bucketShift(oldB)
-}
-
-// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
-func (h *hmap) oldbucketmask() uintptr {
- return h.noldbuckets() - 1
-}
-
-func growWork(t *maptype, h *hmap, bucket uintptr) {
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate(t, h, bucket&h.oldbucketmask())
-
- // evacuate one more oldbucket to make progress on growing
- if h.growing() {
- evacuate(t, h, h.nevacuate)
- }
-}
-
-func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
- b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
- return evacuated(b)
-}
-
-// evacDst is an evacuation destination.
-type evacDst struct {
- b *bmap // current destination bucket
- i int // key/elem index into b
- k unsafe.Pointer // pointer to current key storage
- e unsafe.Pointer // pointer to current elem storage
-}
-
-func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
- newbit := h.noldbuckets()
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- // xy contains the x and y (low and high) evacuation destinations.
- var xy [2]evacDst
- x := &xy[0]
- x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
- x.k = add(unsafe.Pointer(x.b), dataOffset)
- x.e = add(x.k, abi.OldMapBucketCount*uintptr(t.KeySize))
-
- if !h.sameSizeGrow() {
- // Only calculate y pointers if we're growing bigger.
- // Otherwise GC can see bad pointers.
- y := &xy[1]
- y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
- y.k = add(unsafe.Pointer(y.b), dataOffset)
- y.e = add(y.k, abi.OldMapBucketCount*uintptr(t.KeySize))
- }
-
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- e := add(k, abi.OldMapBucketCount*uintptr(t.KeySize))
- for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
- top := b.tophash[i]
- if isEmpty(top) {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- throw("bad map state")
- }
- k2 := k
- if t.IndirectKey() {
- k2 = *((*unsafe.Pointer)(k2))
- }
- var useY uint8
- if !h.sameSizeGrow() {
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/elem to bucket x or bucket y).
- hash := t.Hasher(k2, uintptr(h.hash0))
- if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
- // If key != key (NaNs), then the hash could be (and probably
- // will be) entirely different from the old hash. Moreover,
- // it isn't reproducible. Reproducibility is required in the
- // presence of iterators, as our evacuation decision must
- // match whatever decision the iterator made.
- // Fortunately, we have the freedom to send these keys either
- // way. Also, tophash is meaningless for these kinds of keys.
- // We let the low bit of tophash drive the evacuation decision.
- // We recompute a new random tophash for the next level so
- // these keys will get evenly distributed across all buckets
- // after multiple grows.
- useY = top & 1
- top = tophash(hash)
- } else {
- if hash&newbit != 0 {
- useY = 1
- }
- }
- }
-
- if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
- throw("bad evacuatedN")
- }
-
- b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
- dst := &xy[useY] // evacuation destination
-
- if dst.i == abi.OldMapBucketCount {
- dst.b = h.newoverflow(t, dst.b)
- dst.i = 0
- dst.k = add(unsafe.Pointer(dst.b), dataOffset)
- dst.e = add(dst.k, abi.OldMapBucketCount*uintptr(t.KeySize))
- }
- dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check
- if t.IndirectKey() {
- *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
- } else {
- typedmemmove(t.Key, dst.k, k) // copy elem
- }
- if t.IndirectElem() {
- *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
- } else {
- typedmemmove(t.Elem, dst.e, e)
- }
- dst.i++
- // These updates might push these pointers past the end of the
- // key or elem arrays. That's ok, as we have the overflow pointer
- // at the end of the bucket to protect against pointing past the
- // end of the bucket.
- dst.k = add(dst.k, uintptr(t.KeySize))
- dst.e = add(dst.e, uintptr(t.ValueSize))
- }
- }
- // Unlink the overflow buckets & clear key/elem to help GC.
- if h.flags&oldIterator == 0 && t.Bucket.Pointers() {
- b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
- // Preserve b.tophash because the evacuation
- // state is maintained there.
- ptr := add(b, dataOffset)
- n := uintptr(t.BucketSize) - dataOffset
- memclrHasPointers(ptr, n)
- }
- }
-
- if oldbucket == h.nevacuate {
- advanceEvacuationMark(h, t, newbit)
- }
-}
-
-func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
- h.nevacuate++
- // Experiments suggest that 1024 is overkill by at least an order of magnitude.
- // Put it in there as a safeguard anyway, to ensure O(1) behavior.
- stop := h.nevacuate + 1024
- if stop > newbit {
- stop = newbit
- }
- for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
- h.nevacuate++
- }
- if h.nevacuate == newbit { // newbit == # of oldbuckets
- // Growing is all done. Free old main bucket array.
- h.oldbuckets = nil
- // Can discard old overflow buckets as well.
- // If they are still referenced by an iterator,
- // then the iterator holds a pointers to the slice.
- if h.extra != nil {
- h.extra.oldoverflow = nil
- }
- h.flags &^= sameSizeGrow
- }
-}
-
-// Reflect stubs. Called from ../reflect/asm_*.s
-
-// reflect_makemap is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - gitee.com/quant1x/gox
-// - github.com/modern-go/reflect2
-// - github.com/goccy/go-json
-// - github.com/RomiChan/protobuf
-// - github.com/segmentio/encoding
-// - github.com/v2pro/plz
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_makemap reflect.makemap
-func reflect_makemap(t *maptype, cap int) *hmap {
- // Check invariants and reflects math.
- if t.Key.Equal == nil {
- throw("runtime.reflect_makemap: unsupported map key type")
- }
- if t.Key.Size_ > abi.OldMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
- t.Key.Size_ <= abi.OldMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
- throw("key size wrong")
- }
- if t.Elem.Size_ > abi.OldMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
- t.Elem.Size_ <= abi.OldMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
- throw("elem size wrong")
- }
- if t.Key.Align_ > abi.OldMapBucketCount {
- throw("key align too big")
- }
- if t.Elem.Align_ > abi.OldMapBucketCount {
- throw("elem align too big")
- }
- if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
- throw("key size not a multiple of key align")
- }
- if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
- throw("elem size not a multiple of elem align")
- }
- if abi.OldMapBucketCount < 8 {
- throw("bucketsize too small for proper alignment")
- }
- if dataOffset%uintptr(t.Key.Align_) != 0 {
- throw("need padding in bucket (key)")
- }
- if dataOffset%uintptr(t.Elem.Align_) != 0 {
- throw("need padding in bucket (elem)")
- }
-
- return makemap(t, cap, nil)
-}
-
-// reflect_mapaccess is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - gitee.com/quant1x/gox
-// - github.com/modern-go/reflect2
-// - github.com/v2pro/plz
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_mapaccess reflect.mapaccess
-func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- elem, ok := mapaccess2(t, h, key)
- if !ok {
- // reflect wants nil for a missing element
- elem = nil
- }
- return elem
-}
-
-//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
-func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
- elem, ok := mapaccess2_faststr(t, h, key)
- if !ok {
- // reflect wants nil for a missing element
- elem = nil
- }
- return elem
-}
-
-// reflect_mapassign is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - gitee.com/quant1x/gox
-// - github.com/v2pro/plz
-//
-// Do not remove or change the type signature.
-//
-//go:linkname reflect_mapassign reflect.mapassign0
-func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
- p := mapassign(t, h, key)
- typedmemmove(t.Elem, p, elem)
-}
-
-//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
-func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
- p := mapassign_faststr(t, h, key)
- typedmemmove(t.Elem, p, elem)
-}
-
-//go:linkname reflect_mapdelete reflect.mapdelete
-func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- mapdelete(t, h, key)
-}
-
-//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
-func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
- mapdelete_faststr(t, h, key)
-}
-
-// reflect_mapiterinit is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/modern-go/reflect2
-// - gitee.com/quant1x/gox
-// - github.com/v2pro/plz
-// - github.com/wI2L/jettison
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_mapiterinit reflect.mapiterinit
-func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
- mapiterinit(t, h, it)
-}
-
-// reflect_mapiternext is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - gitee.com/quant1x/gox
-// - github.com/modern-go/reflect2
-// - github.com/goccy/go-json
-// - github.com/v2pro/plz
-// - github.com/wI2L/jettison
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_mapiternext reflect.mapiternext
-func reflect_mapiternext(it *hiter) {
- mapiternext(it)
-}
-
-// reflect_mapiterkey was for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/goccy/go-json
-// - gonum.org/v1/gonum
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_mapiterkey reflect.mapiterkey
-func reflect_mapiterkey(it *hiter) unsafe.Pointer {
- return it.key
-}
-
-// reflect_mapiterelem was for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/goccy/go-json
-// - gonum.org/v1/gonum
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_mapiterelem reflect.mapiterelem
-func reflect_mapiterelem(it *hiter) unsafe.Pointer {
- return it.elem
-}
-
-// reflect_maplen is for package reflect,
-// but widely used packages access it using linkname.
-// Notable members of the hall of shame include:
-// - github.com/goccy/go-json
-// - github.com/wI2L/jettison
-//
-// Do not remove or change the type signature.
-// See go.dev/issue/67401.
-//
-//go:linkname reflect_maplen reflect.maplen
-func reflect_maplen(h *hmap) int {
- if h == nil {
- return 0
- }
- if raceenabled {
- callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
- }
- return h.count
-}
-
-//go:linkname reflect_mapclear reflect.mapclear
-func reflect_mapclear(t *maptype, h *hmap) {
- mapclear(t, h)
-}
-
-//go:linkname reflectlite_maplen internal/reflectlite.maplen
-func reflectlite_maplen(h *hmap) int {
- if h == nil {
- return 0
- }
- if raceenabled {
- callerpc := sys.GetCallerPC()
- racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
- }
- return h.count
-}
-
-// mapinitnoop is a no-op function known the Go linker; if a given global
-// map (of the right size) is determined to be dead, the linker will
-// rewrite the relocation (from the package init func) from the outlined
-// map init function to this symbol. Defined in assembly so as to avoid
-// complications with instrumentation (coverage, etc).
-func mapinitnoop()
-
-// mapclone for implementing maps.Clone
-//
-//go:linkname mapclone maps.clone
-func mapclone(m any) any {
- e := efaceOf(&m)
- e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
- return m
-}
-
-// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
-// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
-func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
- for i := 0; i < abi.OldMapBucketCount; i++ {
- if isEmpty(src.tophash[i]) {
- continue
- }
-
- for ; pos < abi.OldMapBucketCount; pos++ {
- if isEmpty(dst.tophash[pos]) {
- break
- }
- }
-
- if pos == abi.OldMapBucketCount {
- dst = h.newoverflow(t, dst)
- pos = 0
- }
-
- srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
- srcEle := add(unsafe.Pointer(src), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
- dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
- dstEle := add(unsafe.Pointer(dst), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
-
- dst.tophash[pos] = src.tophash[i]
- if t.IndirectKey() {
- srcK = *(*unsafe.Pointer)(srcK)
- if t.NeedKeyUpdate() {
- kStore := newobject(t.Key)
- typedmemmove(t.Key, kStore, srcK)
- srcK = kStore
- }
- // Note: if NeedKeyUpdate is false, then the memory
- // used to store the key is immutable, so we can share
- // it between the original map and its clone.
- *(*unsafe.Pointer)(dstK) = srcK
- } else {
- typedmemmove(t.Key, dstK, srcK)
- }
- if t.IndirectElem() {
- srcEle = *(*unsafe.Pointer)(srcEle)
- eStore := newobject(t.Elem)
- typedmemmove(t.Elem, eStore, srcEle)
- *(*unsafe.Pointer)(dstEle) = eStore
- } else {
- typedmemmove(t.Elem, dstEle, srcEle)
- }
- pos++
- h.count++
- }
- return dst, pos
-}
-
-func mapclone2(t *maptype, src *hmap) *hmap {
- hint := src.count
- if overLoadFactor(hint, src.B) {
- // Note: in rare cases (e.g. during a same-sized grow) the map
- // can be overloaded. Make sure we don't allocate a destination
- // bucket array larger than the source bucket array.
- // This will cause the cloned map to be overloaded also,
- // but that's better than crashing. See issue 69110.
- hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen))
- }
- dst := makemap(t, hint, nil)
- dst.hash0 = src.hash0
- dst.nevacuate = 0
- // flags do not need to be copied here, just like a new map has no flags.
-
- if src.count == 0 {
- return dst
- }
-
- if src.flags&hashWriting != 0 {
- fatal("concurrent map clone and map write")
- }
-
- if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() {
- // Quick copy for small maps.
- dst.buckets = newobject(t.Bucket)
- dst.count = src.count
- typedmemmove(t.Bucket, dst.buckets, src.buckets)
- return dst
- }
-
- if dst.B == 0 {
- dst.buckets = newobject(t.Bucket)
- }
- dstArraySize := int(bucketShift(dst.B))
- srcArraySize := int(bucketShift(src.B))
- for i := 0; i < dstArraySize; i++ {
- dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
- pos := 0
- for j := 0; j < srcArraySize; j += dstArraySize {
- srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
- for srcBmap != nil {
- dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
- srcBmap = srcBmap.overflow(t)
- }
- }
- }
-
- if src.oldbuckets == nil {
- return dst
- }
-
- oldB := src.B
- srcOldbuckets := src.oldbuckets
- if !src.sameSizeGrow() {
- oldB--
- }
- oldSrcArraySize := int(bucketShift(oldB))
-
- for i := 0; i < oldSrcArraySize; i++ {
- srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
- if evacuated(srcBmap) {
- continue
- }
-
- if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src
- dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
- for dstBmap.overflow(t) != nil {
- dstBmap = dstBmap.overflow(t)
- }
- pos := 0
- for srcBmap != nil {
- dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
- srcBmap = srcBmap.overflow(t)
- }
- continue
- }
-
- // oldB < dst.B, so a single source bucket may go to multiple destination buckets.
- // Process entries one at a time.
- for srcBmap != nil {
- // move from oldBlucket to new bucket
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- if isEmpty(srcBmap.tophash[i]) {
- continue
- }
-
- if src.flags&hashWriting != 0 {
- fatal("concurrent map clone and map write")
- }
-
- srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
- if t.IndirectKey() {
- srcK = *((*unsafe.Pointer)(srcK))
- }
-
- srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
- if t.IndirectElem() {
- srcEle = *((*unsafe.Pointer)(srcEle))
- }
- dstEle := mapassign(t, dst, srcK)
- typedmemmove(t.Elem, dstEle, srcEle)
- }
- srcBmap = srcBmap.overflow(t)
- }
- }
- return dst
-}
-
-// keys for implementing maps.keys
-//
-//go:linkname keys maps.keys
-func keys(m any, p unsafe.Pointer) {
- e := efaceOf(&m)
- t := (*maptype)(unsafe.Pointer(e._type))
- h := (*hmap)(e.data)
-
- if h == nil || h.count == 0 {
- return
- }
- s := (*slice)(p)
- r := int(rand())
- offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
- if h.B == 0 {
- copyKeys(t, h, (*bmap)(h.buckets), s, offset)
- return
- }
- arraySize := int(bucketShift(h.B))
- buckets := h.buckets
- for i := 0; i < arraySize; i++ {
- bucket := (i + r) & (arraySize - 1)
- b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
- copyKeys(t, h, b, s, offset)
- }
-
- if h.growing() {
- oldArraySize := int(h.noldbuckets())
- for i := 0; i < oldArraySize; i++ {
- bucket := (i + r) & (oldArraySize - 1)
- b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
- if evacuated(b) {
- continue
- }
- copyKeys(t, h, b, s, offset)
- }
- }
- return
-}
-
-func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
- for b != nil {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
- if isEmpty(b.tophash[offi]) {
- continue
- }
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
- k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
- if t.IndirectKey() {
- k = *((*unsafe.Pointer)(k))
- }
- if s.len >= s.cap {
- fatal("concurrent map read and map write")
- }
- typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
- s.len++
- }
- b = b.overflow(t)
- }
-}
-
-// values for implementing maps.values
-//
-//go:linkname values maps.values
-func values(m any, p unsafe.Pointer) {
- e := efaceOf(&m)
- t := (*maptype)(unsafe.Pointer(e._type))
- h := (*hmap)(e.data)
- if h == nil || h.count == 0 {
- return
- }
- s := (*slice)(p)
- r := int(rand())
- offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1))
- if h.B == 0 {
- copyValues(t, h, (*bmap)(h.buckets), s, offset)
- return
- }
- arraySize := int(bucketShift(h.B))
- buckets := h.buckets
- for i := 0; i < arraySize; i++ {
- bucket := (i + r) & (arraySize - 1)
- b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
- copyValues(t, h, b, s, offset)
- }
-
- if h.growing() {
- oldArraySize := int(h.noldbuckets())
- for i := 0; i < oldArraySize; i++ {
- bucket := (i + r) & (oldArraySize - 1)
- b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
- if evacuated(b) {
- continue
- }
- copyValues(t, h, b, s, offset)
- }
- }
- return
-}
-
-func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
- for b != nil {
- for i := uintptr(0); i < abi.OldMapBucketCount; i++ {
- offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1)
- if isEmpty(b.tophash[offi]) {
- continue
- }
-
- if h.flags&hashWriting != 0 {
- fatal("concurrent map read and map write")
- }
-
- ele := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
- if t.IndirectElem() {
- ele = *((*unsafe.Pointer)(ele))
- }
- if s.len >= s.cap {
- fatal("concurrent map read and map write")
- }
- typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)
- s.len++
- }
- b = b.overflow(t)
- }
-}
diff --git a/src/runtime/map_noswiss_test.go b/src/runtime/map_noswiss_test.go
deleted file mode 100644
index 5af7b7b8c8..0000000000
--- a/src/runtime/map_noswiss_test.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.swissmap
-
-package runtime_test
-
-import (
- "internal/abi"
- "internal/goarch"
- "runtime"
- "slices"
- "testing"
-)
-
-func TestHmapSize(t *testing.T) {
- // The structure of hmap is defined in runtime/map.go
- // and in cmd/compile/internal/reflectdata/map.go and must be in sync.
- // The size of hmap should be 56 bytes on 64 bit and 36 bytes on 32 bit platforms.
- var hmapSize = uintptr(2*8 + 5*goarch.PtrSize)
- if runtime.RuntimeHmapSize != hmapSize {
- t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize)
- }
-}
-
-func TestLoadFactor(t *testing.T) {
- for b := uint8(0); b < 20; b++ {
- count := 13 * (1 << b) / 2 // 6.5
- if b == 0 {
- count = 8
- }
- if runtime.OverLoadFactor(count, b) {
- t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b)
- }
- if !runtime.OverLoadFactor(count+1, b) {
- t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b)
- }
- }
-}
-
-func TestMapIterOrder(t *testing.T) {
- sizes := []int{3, 7, 9, 15}
- if abi.OldMapBucketCountBits >= 5 {
- // it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5.
- t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.OldMapBucketCountBits)
- }
- for _, n := range sizes {
- for i := 0; i < 1000; i++ {
- // Make m be {0: true, 1: true, ..., n-1: true}.
- m := make(map[int]bool)
- for i := 0; i < n; i++ {
- m[i] = true
- }
- // Check that iterating over the map produces at least two different orderings.
- ord := func() []int {
- var s []int
- for key := range m {
- s = append(s, key)
- }
- return s
- }
- first := ord()
- ok := false
- for try := 0; try < 100; try++ {
- if !slices.Equal(first, ord()) {
- ok = true
- break
- }
- }
- if !ok {
- t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
- break
- }
- }
- }
-}
-
-const bs = abi.OldMapBucketCount
-
-// belowOverflow should be a pretty-full pair of buckets;
-// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets
-// that are 13/16 full each, which is the overflow boundary.
-// Adding one to that should ensure overflow to the next higher size.
-const (
- belowOverflow = bs * 3 / 2 // 1.5 bs = 2 buckets @ 75%
- atOverflow = belowOverflow + bs/8 // 2 buckets at 13/16 fill.
-)
-
-var mapBucketTests = [...]struct {
- n int // n is the number of map elements
- noescape int // number of expected buckets for non-escaping map
- escape int // number of expected buckets for escaping map
-}{
- {-(1 << 30), 1, 1},
- {-1, 1, 1},
- {0, 1, 1},
- {1, 1, 1},
- {bs, 1, 1},
- {bs + 1, 2, 2},
- {belowOverflow, 2, 2}, // 1.5 bs = 2 buckets @ 75%
- {atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4
-
- {2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75%
- {2*atOverflow + 1, 8, 8}, // 13/4 bs + 1 = overflow to 8
-
- {4 * belowOverflow, 8, 8}, // 6 bs = 8 buckets @ 75%
- {4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16
-}
-
-func TestMapBuckets(t *testing.T) {
- // Test that maps of different sizes have the right number of buckets.
- // Non-escaping maps with small buckets (like map[int]int) never
- // have a nil bucket pointer due to starting with preallocated buckets
- // on the stack. Escaping maps start with a non-nil bucket pointer if
- // hint size is above bucketCnt and thereby have more than one bucket.
- // These tests depend on bucketCnt and loadFactor* in map.go.
- t.Run("mapliteral", func(t *testing.T) {
- for _, tt := range mapBucketTests {
- localMap := map[int]int{}
- if runtime.MapBucketsPointerIsNil(localMap) {
- t.Errorf("no escape: buckets pointer is nil for non-escaping map")
- }
- for i := 0; i < tt.n; i++ {
- localMap[i] = i
- }
- if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
- t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
- }
- escapingMap := runtime.Escape(map[int]int{})
- if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
- t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
- }
- for i := 0; i < tt.n; i++ {
- escapingMap[i] = i
- }
- if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
- t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got)
- }
- }
- })
- t.Run("nohint", func(t *testing.T) {
- for _, tt := range mapBucketTests {
- localMap := make(map[int]int)
- if runtime.MapBucketsPointerIsNil(localMap) {
- t.Errorf("no escape: buckets pointer is nil for non-escaping map")
- }
- for i := 0; i < tt.n; i++ {
- localMap[i] = i
- }
- if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
- t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
- }
- escapingMap := runtime.Escape(make(map[int]int))
- if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
- t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
- }
- for i := 0; i < tt.n; i++ {
- escapingMap[i] = i
- }
- if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
- t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
- }
- }
- })
- t.Run("makemap", func(t *testing.T) {
- for _, tt := range mapBucketTests {
- localMap := make(map[int]int, tt.n)
- if runtime.MapBucketsPointerIsNil(localMap) {
- t.Errorf("no escape: buckets pointer is nil for non-escaping map")
- }
- for i := 0; i < tt.n; i++ {
- localMap[i] = i
- }
- if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
- t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
- }
- escapingMap := runtime.Escape(make(map[int]int, tt.n))
- if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
- t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
- }
- for i := 0; i < tt.n; i++ {
- escapingMap[i] = i
- }
- if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
- t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
- }
- }
- })
- t.Run("makemap64", func(t *testing.T) {
- for _, tt := range mapBucketTests {
- localMap := make(map[int]int, int64(tt.n))
- if runtime.MapBucketsPointerIsNil(localMap) {
- t.Errorf("no escape: buckets pointer is nil for non-escaping map")
- }
- for i := 0; i < tt.n; i++ {
- localMap[i] = i
- }
- if got := runtime.MapBucketsCount(localMap); got != tt.noescape {
- t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got)
- }
- escapingMap := runtime.Escape(make(map[int]int, tt.n))
- if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) {
- t.Errorf("escape: buckets pointer is nil for n=%d buckets", count)
- }
- for i := 0; i < tt.n; i++ {
- escapingMap[i] = i
- }
- if got := runtime.MapBucketsCount(escapingMap); got != tt.escape {
- t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got)
- }
- }
- })
-}
diff --git a/src/runtime/map_swiss_test.go b/src/runtime/map_swiss_test.go
deleted file mode 100644
index d5c9fdbe46..0000000000
--- a/src/runtime/map_swiss_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.swissmap
-
-package runtime_test
-
-import (
- "internal/abi"
- "internal/goarch"
- "internal/runtime/maps"
- "slices"
- "testing"
- "unsafe"
-)
-
-func TestHmapSize(t *testing.T) {
- // The structure of Map is defined in internal/runtime/maps/map.go
- // and in cmd/compile/internal/reflectdata/map_swiss.go and must be in sync.
- // The size of Map should be 48 bytes on 64 bit and 32 bytes on 32 bit platforms.
- wantSize := uintptr(2*8 + 4*goarch.PtrSize)
- gotSize := unsafe.Sizeof(maps.Map{})
- if gotSize != wantSize {
- t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize)
- }
-}
-
-// See also reflect_test.TestGroupSizeZero.
-func TestGroupSizeZero(t *testing.T) {
- var m map[struct{}]struct{}
- mTyp := abi.TypeOf(m)
- mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp))
-
- // internal/runtime/maps when create pointers to slots, even if slots
- // are size 0. The compiler should have reserved an extra word to
- // ensure that pointers to the zero-size type at the end of group are
- // valid.
- if mt.Group.Size() <= 8 {
- t.Errorf("Group size got %d want >8", mt.Group.Size())
- }
-}
-
-func TestMapIterOrder(t *testing.T) {
- sizes := []int{3, 7, 9, 15}
- for _, n := range sizes {
- for i := 0; i < 1000; i++ {
- // Make m be {0: true, 1: true, ..., n-1: true}.
- m := make(map[int]bool)
- for i := 0; i < n; i++ {
- m[i] = true
- }
- // Check that iterating over the map produces at least two different orderings.
- ord := func() []int {
- var s []int
- for key := range m {
- s = append(s, key)
- }
- return s
- }
- first := ord()
- ok := false
- for try := 0; try < 100; try++ {
- if !slices.Equal(first, ord()) {
- ok = true
- break
- }
- }
- if !ok {
- t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
- break
- }
- }
- }
-}
diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go
index b1ff02d851..7fe8399130 100644
--- a/src/runtime/map_test.go
+++ b/src/runtime/map_test.go
@@ -6,7 +6,9 @@ package runtime_test
import (
"fmt"
- "internal/goexperiment"
+ "internal/abi"
+ "internal/goarch"
+ "internal/runtime/maps"
"internal/testenv"
"math"
"os"
@@ -812,31 +814,6 @@ func TestIncrementAfterBulkClearKeyStringValueInt(t *testing.T) {
}
}
-func TestMapTombstones(t *testing.T) {
- m := map[int]int{}
- const N = 10000
- // Fill a map.
- for i := 0; i < N; i++ {
- m[i] = i
- }
- runtime.MapTombstoneCheck(m)
- // Delete half of the entries.
- for i := 0; i < N; i += 2 {
- delete(m, i)
- }
- runtime.MapTombstoneCheck(m)
- // Add new entries to fill in holes.
- for i := N; i < 3*N/2; i++ {
- m[i] = i
- }
- runtime.MapTombstoneCheck(m)
- // Delete everything.
- for i := 0; i < 3*N/2; i++ {
- delete(m, i)
- }
- runtime.MapTombstoneCheck(m)
-}
-
type canString int
func (c canString) String() string {
@@ -1060,44 +1037,6 @@ func TestEmptyMapWithInterfaceKey(t *testing.T) {
})
}
-func TestMapKeys(t *testing.T) {
- if goexperiment.SwissMap {
- t.Skip("mapkeys not implemented for swissmaps")
- }
-
- type key struct {
- s string
- pad [128]byte // sizeof(key) > abi.MapMaxKeyBytes
- }
- m := map[key]int{{s: "a"}: 1, {s: "b"}: 2}
- keys := make([]key, 0, len(m))
- runtime.MapKeys(m, unsafe.Pointer(&keys))
- for _, k := range keys {
- if len(k.s) != 1 {
- t.Errorf("len(k.s) == %d, want 1", len(k.s))
- }
- }
-}
-
-func TestMapValues(t *testing.T) {
- if goexperiment.SwissMap {
- t.Skip("mapvalues not implemented for swissmaps")
- }
-
- type val struct {
- s string
- pad [128]byte // sizeof(val) > abi.MapMaxElemBytes
- }
- m := map[int]val{1: {s: "a"}, 2: {s: "b"}}
- vals := make([]val, 0, len(m))
- runtime.MapValues(m, unsafe.Pointer(&vals))
- for _, v := range vals {
- if len(v.s) != 1 {
- t.Errorf("len(v.s) == %d, want 1", len(v.s))
- }
- }
-}
-
func computeHash() uintptr {
var v struct{}
return runtime.MemHash(unsafe.Pointer(&v), 0, unsafe.Sizeof(v))
@@ -1202,3 +1141,62 @@ func TestMapIterDeleteReplace(t *testing.T) {
})
}
}
+
+func TestHmapSize(t *testing.T) {
+ // The structure of Map is defined in internal/runtime/maps/map.go
+ // and in cmd/compile/internal/reflectdata/map.go and must be in sync.
+ // The size of Map should be 48 bytes on 64 bit and 32 bytes on 32 bit platforms.
+ wantSize := uintptr(2*8 + 4*goarch.PtrSize)
+ gotSize := unsafe.Sizeof(maps.Map{})
+ if gotSize != wantSize {
+ t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize)
+ }
+}
+
+// See also reflect_test.TestGroupSizeZero.
+func TestGroupSizeZero(t *testing.T) {
+ var m map[struct{}]struct{}
+ mTyp := abi.TypeOf(m)
+ mt := (*abi.MapType)(unsafe.Pointer(mTyp))
+
+ // internal/runtime/maps when create pointers to slots, even if slots
+ // are size 0. The compiler should have reserved an extra word to
+ // ensure that pointers to the zero-size type at the end of group are
+ // valid.
+ if mt.Group.Size() <= 8 {
+ t.Errorf("Group size got %d want >8", mt.Group.Size())
+ }
+}
+
+func TestMapIterOrder(t *testing.T) {
+ sizes := []int{3, 7, 9, 15}
+ for _, n := range sizes {
+ for i := 0; i < 1000; i++ {
+ // Make m be {0: true, 1: true, ..., n-1: true}.
+ m := make(map[int]bool)
+ for i := 0; i < n; i++ {
+ m[i] = true
+ }
+ // Check that iterating over the map produces at least two different orderings.
+ ord := func() []int {
+ var s []int
+ for key := range m {
+ s = append(s, key)
+ }
+ return s
+ }
+ first := ord()
+ ok := false
+ for try := 0; try < 100; try++ {
+ if !slices.Equal(first, ord()) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first)
+ break
+ }
+ }
+ }
+}
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 7331886af2..9872e5297f 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -692,7 +692,7 @@ func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize
// malloc does not call heapSetType* when there are no pointers.
//
// There can be read-write races between heapSetType* and things
-// that read the heap metadata like scanobject. However, since
+// that read the heap metadata like scanObject. However, since
// heapSetType* is only used for objects that have not yet been
// made reachable, readers will ignore bits being modified by this
// function. This does mean this function cannot transiently modify
@@ -947,7 +947,7 @@ func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) {
if typ == nil {
return
}
- if typ.Kind_&abi.KindMask == abi.Interface {
+ if typ.Kind() == abi.Interface {
// Interfaces are unfortunately inconsistently handled
// when it comes to the type pointer, so it's easy to
// produce a lot of false positives here.
@@ -1776,7 +1776,7 @@ func pointerMask(ep any) (mask []byte) {
t := e._type
var et *_type
- if t.Kind_&abi.KindMask != abi.Pointer {
+ if t.Kind() != abi.Pointer {
throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried")
}
et = (*ptrtype)(unsafe.Pointer(t)).Elem
diff --git a/src/runtime/mcleanup.go b/src/runtime/mcleanup.go
index c368730c57..383217aa05 100644
--- a/src/runtime/mcleanup.go
+++ b/src/runtime/mcleanup.go
@@ -173,14 +173,14 @@ func (c Cleanup) Stop() {
// Reached the end of the linked list. Stop searching at this point.
break
}
- if offset == uintptr(s.offset) && _KindSpecialCleanup == s.kind &&
+ if offset == s.offset && _KindSpecialCleanup == s.kind &&
(*specialCleanup)(unsafe.Pointer(s)).id == c.id {
// The special is a cleanup and contains a matching cleanup id.
*iter = s.next
found = s
break
}
- if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCleanup < s.kind) {
+ if offset < s.offset || (offset == s.offset && _KindSpecialCleanup < s.kind) {
// The special is outside the region specified for that kind of
// special. The specials are sorted by kind.
break
diff --git a/src/runtime/mem_windows.go b/src/runtime/mem_windows.go
index 75860a4c1d..3db6fc2ba4 100644
--- a/src/runtime/mem_windows.go
+++ b/src/runtime/mem_windows.go
@@ -26,11 +26,11 @@ const (
//
//go:nosplit
func sysAllocOS(n uintptr, _ string) unsafe.Pointer {
- return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
+ return unsafe.Pointer(stdcall(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE))
}
func sysUnusedOS(v unsafe.Pointer, n uintptr) {
- r := stdcall3(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT)
+ r := stdcall(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT)
if r != 0 {
return
}
@@ -46,7 +46,7 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) {
// in the worst case, but that's fast enough.
for n > 0 {
small := n
- for small >= 4096 && stdcall3(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 {
+ for small >= 4096 && stdcall(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 {
small /= 2
small &^= 4096 - 1
}
@@ -60,7 +60,7 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) {
}
func sysUsedOS(v unsafe.Pointer, n uintptr) {
- p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
+ p := stdcall(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
if p == uintptr(v) {
return
}
@@ -71,7 +71,7 @@ func sysUsedOS(v unsafe.Pointer, n uintptr) {
k := n
for k > 0 {
small := k
- for small >= 4096 && stdcall4(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 {
+ for small >= 4096 && stdcall(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 {
small /= 2
small &^= 4096 - 1
}
@@ -105,7 +105,7 @@ func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) {
//
//go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr) {
- r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
+ r := stdcall(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
if r == 0 {
print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n")
throw("runtime: failed to release pages")
@@ -121,13 +121,13 @@ func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer {
// v is just a hint.
// First try at v.
// This will fail if any of [v, v+n) is already reserved.
- v = unsafe.Pointer(stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE))
+ v = unsafe.Pointer(stdcall(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE))
if v != nil {
return v
}
// Next let the kernel choose the address.
- return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
+ return unsafe.Pointer(stdcall(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE))
}
func sysMapOS(v unsafe.Pointer, n uintptr, _ string) {
diff --git a/src/runtime/memclr_mips64x.s b/src/runtime/memclr_mips64x.s
index cf3a9c4ab4..3df3728146 100644
--- a/src/runtime/memclr_mips64x.s
+++ b/src/runtime/memclr_mips64x.s
@@ -71,29 +71,93 @@ msa_large_loop:
no_msa:
// if less than 8 bytes, do one byte at a time
SGTU $8, R2, R3
- BNE R3, out
+ BNE R3, check4
- // do one byte at a time until 8-aligned
+ // Check alignment
AND $7, R1, R3
- BEQ R3, words
+ BEQ R3, aligned
+
+ // Zero one byte at a time until we reach 8 byte alignment.
+ MOVV $8, R5
+ SUBV R3, R5, R3
+ SUBV R3, R2, R2
+align:
+ SUBV $1, R3
MOVB R0, (R1)
ADDV $1, R1
- JMP -4(PC)
+ BNE R3, align
-words:
- // do 8 bytes at a time if there is room
- ADDV $-7, R4, R2
+aligned:
+ SGTU $8, R2, R3
+ BNE R3, check4
+ SGTU $16, R2, R3
+ BNE R3, zero8
+ SGTU $32, R2, R3
+ BNE R3, zero16
+ SGTU $64, R2, R3
+ BNE R3, zero32
+loop64:
+ MOVV R0, (R1)
+ MOVV R0, 8(R1)
+ MOVV R0, 16(R1)
+ MOVV R0, 24(R1)
+ MOVV R0, 32(R1)
+ MOVV R0, 40(R1)
+ MOVV R0, 48(R1)
+ MOVV R0, 56(R1)
+ ADDV $64, R1
+ SUBV $64, R2
+ SGTU $64, R2, R3
+ BEQ R0, R3, loop64
+ BEQ R2, done
- SGTU R2, R1, R3
- BEQ R3, out
+check32:
+ SGTU $32, R2, R3
+ BNE R3, check16
+zero32:
+ MOVV R0, (R1)
+ MOVV R0, 8(R1)
+ MOVV R0, 16(R1)
+ MOVV R0, 24(R1)
+ ADDV $32, R1
+ SUBV $32, R2
+ BEQ R2, done
+
+check16:
+ SGTU $16, R2, R3
+ BNE R3, check8
+zero16:
+ MOVV R0, (R1)
+ MOVV R0, 8(R1)
+ ADDV $16, R1
+ SUBV $16, R2
+ BEQ R2, done
+
+check8:
+ SGTU $8, R2, R3
+ BNE R3, check4
+zero8:
MOVV R0, (R1)
ADDV $8, R1
- JMP -4(PC)
+ SUBV $8, R2
+ BEQ R2, done
-out:
+check4:
+ SGTU $4, R2, R3
+ BNE R3, loop1
+zero4:
+ MOVB R0, (R1)
+ MOVB R0, 1(R1)
+ MOVB R0, 2(R1)
+ MOVB R0, 3(R1)
+ ADDV $4, R1
+ SUBV $4, R2
+
+loop1:
BEQ R1, R4, done
MOVB R0, (R1)
ADDV $1, R1
- JMP -3(PC)
+ JMP loop1
done:
RET
+
diff --git a/src/runtime/memclr_s390x.s b/src/runtime/memclr_s390x.s
index 656e96998c..392057565e 100644
--- a/src/runtime/memclr_s390x.s
+++ b/src/runtime/memclr_s390x.s
@@ -109,53 +109,23 @@ clearge32:
// For size >= 4KB, XC is loop unrolled 16 times (4KB = 256B * 16)
clearge4KB:
XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
- XC $256, 0(R4), 0(R4)
- ADD $256, R4
- ADD $-256, R5
+ XC $256, 256(R4), 256(R4)
+ XC $256, 512(R4), 512(R4)
+ XC $256, 768(R4), 768(R4)
+ XC $256, 1024(R4), 1024(R4)
+ XC $256, 1280(R4), 1280(R4)
+ XC $256, 1536(R4), 1536(R4)
+ XC $256, 1792(R4), 1792(R4)
+ XC $256, 2048(R4), 2048(R4)
+ XC $256, 2304(R4), 2304(R4)
+ XC $256, 2560(R4), 2560(R4)
+ XC $256, 2816(R4), 2816(R4)
+ XC $256, 3072(R4), 3072(R4)
+ XC $256, 3328(R4), 3328(R4)
+ XC $256, 3584(R4), 3584(R4)
+ XC $256, 3840(R4), 3840(R4)
+ ADD $4096, R4
+ ADD $-4096, R5
CMP R5, $4096
BGE clearge4KB
@@ -180,7 +150,7 @@ clear32to255:
clear32:
VZERO V1
VST V1, 0(R4)
- VST V1, 16(R4)
+ VST V1, 16(R4)
RET
clear33to64:
diff --git a/src/runtime/memmove_test.go b/src/runtime/memmove_test.go
index a2c3b72568..22905504d4 100644
--- a/src/runtime/memmove_test.go
+++ b/src/runtime/memmove_test.go
@@ -8,6 +8,8 @@ import (
"crypto/rand"
"encoding/binary"
"fmt"
+ "internal/asan"
+ "internal/msan"
"internal/race"
"internal/testenv"
. "runtime"
@@ -102,8 +104,8 @@ func TestMemmoveLarge0x180000(t *testing.T) {
}
t.Parallel()
- if race.Enabled {
- t.Skip("skipping large memmove test under race detector")
+ if race.Enabled || asan.Enabled || msan.Enabled {
+ t.Skip("skipping large memmove test under sanitizers")
}
testSize(t, 0x180000)
}
@@ -114,8 +116,8 @@ func TestMemmoveOverlapLarge0x120000(t *testing.T) {
}
t.Parallel()
- if race.Enabled {
- t.Skip("skipping large memmove test under race detector")
+ if race.Enabled || asan.Enabled || msan.Enabled {
+ t.Skip("skipping large memmove test under sanitizers")
}
testOverlap(t, 0x120000)
}
@@ -518,6 +520,42 @@ func BenchmarkMemclrRange(b *testing.B) {
}
}
+func BenchmarkClearFat3(b *testing.B) {
+ p := new([3]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = [3]byte{}
+ }
+}
+
+func BenchmarkClearFat4(b *testing.B) {
+ p := new([4 / 4]uint32)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = [4 / 4]uint32{}
+ }
+}
+
+func BenchmarkClearFat5(b *testing.B) {
+ p := new([5]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = [5]byte{}
+ }
+}
+
+func BenchmarkClearFat6(b *testing.B) {
+ p := new([6]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = [6]byte{}
+ }
+}
+
func BenchmarkClearFat7(b *testing.B) {
p := new([7]byte)
Escape(p)
@@ -536,6 +574,24 @@ func BenchmarkClearFat8(b *testing.B) {
}
}
+func BenchmarkClearFat9(b *testing.B) {
+ p := new([9]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = [9]byte{}
+ }
+}
+
+func BenchmarkClearFat10(b *testing.B) {
+ p := new([10]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = [10]byte{}
+ }
+}
+
func BenchmarkClearFat11(b *testing.B) {
p := new([11]byte)
Escape(p)
@@ -590,6 +646,24 @@ func BenchmarkClearFat16(b *testing.B) {
}
}
+func BenchmarkClearFat18(b *testing.B) {
+ p := new([18]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = [18]byte{}
+ }
+}
+
+func BenchmarkClearFat20(b *testing.B) {
+ p := new([20 / 4]uint32)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = [20 / 4]uint32{}
+ }
+}
+
func BenchmarkClearFat24(b *testing.B) {
p := new([24 / 4]uint32)
Escape(p)
@@ -707,6 +781,46 @@ func BenchmarkClearFat1040(b *testing.B) {
}
}
+func BenchmarkCopyFat3(b *testing.B) {
+ var x [3]byte
+ p := new([3]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = x
+ }
+}
+
+func BenchmarkCopyFat4(b *testing.B) {
+ var x [4 / 4]uint32
+ p := new([4 / 4]uint32)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = x
+ }
+}
+
+func BenchmarkCopyFat5(b *testing.B) {
+ var x [5]byte
+ p := new([5]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = x
+ }
+}
+
+func BenchmarkCopyFat6(b *testing.B) {
+ var x [6]byte
+ p := new([6]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = x
+ }
+}
+
func BenchmarkCopyFat7(b *testing.B) {
var x [7]byte
p := new([7]byte)
@@ -727,6 +841,26 @@ func BenchmarkCopyFat8(b *testing.B) {
}
}
+func BenchmarkCopyFat9(b *testing.B) {
+ var x [9]byte
+ p := new([9]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = x
+ }
+}
+
+func BenchmarkCopyFat10(b *testing.B) {
+ var x [10]byte
+ p := new([10]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = x
+ }
+}
+
func BenchmarkCopyFat11(b *testing.B) {
var x [11]byte
p := new([11]byte)
@@ -787,6 +921,26 @@ func BenchmarkCopyFat16(b *testing.B) {
}
}
+func BenchmarkCopyFat18(b *testing.B) {
+ var x [18]byte
+ p := new([18]byte)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = x
+ }
+}
+
+func BenchmarkCopyFat20(b *testing.B) {
+ var x [20 / 4]uint32
+ p := new([20 / 4]uint32)
+ Escape(p)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ *p = x
+ }
+}
+
func BenchmarkCopyFat24(b *testing.B) {
var x [24 / 4]uint32
p := new([24 / 4]uint32)
diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go
index 48da745521..ef3782b783 100644
--- a/src/runtime/metrics.go
+++ b/src/runtime/metrics.go
@@ -169,6 +169,20 @@ func initMetrics() {
out.scalar = float64bits(nsToSec(in.cpuStats.UserTime))
},
},
+ "/gc/cleanups/executed:cleanups": {
+ deps: makeStatDepSet(finalStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.finalStats.cleanupsExecuted
+ },
+ },
+ "/gc/cleanups/queued:cleanups": {
+ deps: makeStatDepSet(finalStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.finalStats.cleanupsQueued
+ },
+ },
"/gc/cycles/automatic:gc-cycles": {
deps: makeStatDepSet(sysStatsDep),
compute: func(in *statAggregate, out *metricValue) {
@@ -190,6 +204,20 @@ func initMetrics() {
out.scalar = in.sysStats.gcCyclesDone
},
},
+ "/gc/finalizers/executed:finalizers": {
+ deps: makeStatDepSet(finalStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.finalStats.finalizersExecuted
+ },
+ },
+ "/gc/finalizers/queued:finalizers": {
+ deps: makeStatDepSet(finalStatsDep),
+ compute: func(in *statAggregate, out *metricValue) {
+ out.kind = metricKindUint64
+ out.scalar = in.finalStats.finalizersQueued
+ },
+ },
"/gc/scan/globals:bytes": {
deps: makeStatDepSet(gcStatsDep),
compute: func(in *statAggregate, out *metricValue) {
@@ -514,10 +542,11 @@ func godebug_registerMetric(name string, read func() uint64) {
type statDep uint
const (
- heapStatsDep statDep = iota // corresponds to heapStatsAggregate
- sysStatsDep // corresponds to sysStatsAggregate
- cpuStatsDep // corresponds to cpuStatsAggregate
- gcStatsDep // corresponds to gcStatsAggregate
+ heapStatsDep statDep = iota // corresponds to heapStatsAggregate
+ sysStatsDep // corresponds to sysStatsAggregate
+ cpuStatsDep // corresponds to cpuStatsAggregate
+ gcStatsDep // corresponds to gcStatsAggregate
+ finalStatsDep // corresponds to finalStatsAggregate
numStatsDeps
)
@@ -696,6 +725,21 @@ func (a *gcStatsAggregate) compute() {
a.totalScan = a.heapScan + a.stackScan + a.globalsScan
}
+// finalStatsAggregate represents various finalizer/cleanup stats obtained
+// from the runtime acquired together to avoid skew and inconsistencies.
+type finalStatsAggregate struct {
+ finalizersQueued uint64
+ finalizersExecuted uint64
+ cleanupsQueued uint64
+ cleanupsExecuted uint64
+}
+
+// compute populates the finalStatsAggregate with values from the runtime.
+func (a *finalStatsAggregate) compute() {
+ a.finalizersQueued, a.finalizersExecuted = finReadQueueStats()
+ a.cleanupsQueued, a.cleanupsExecuted = gcCleanups.readQueueStats()
+}
+
// nsToSec takes a duration in nanoseconds and converts it to seconds as
// a float64.
func nsToSec(ns int64) float64 {
@@ -708,11 +752,12 @@ func nsToSec(ns int64) float64 {
// as a set of these aggregates that it has populated. The aggregates
// are populated lazily by its ensure method.
type statAggregate struct {
- ensured statDepSet
- heapStats heapStatsAggregate
- sysStats sysStatsAggregate
- cpuStats cpuStatsAggregate
- gcStats gcStatsAggregate
+ ensured statDepSet
+ heapStats heapStatsAggregate
+ sysStats sysStatsAggregate
+ cpuStats cpuStatsAggregate
+ gcStats gcStatsAggregate
+ finalStats finalStatsAggregate
}
// ensure populates statistics aggregates determined by deps if they
@@ -735,6 +780,8 @@ func (a *statAggregate) ensure(deps *statDepSet) {
a.cpuStats.compute()
case gcStatsDep:
a.gcStats.compute()
+ case finalStatsDep:
+ a.finalStats.compute()
}
}
a.ensured = a.ensured.union(missing)
diff --git a/src/runtime/metrics/description.go b/src/runtime/metrics/description.go
index 19a7dbf07a..4587f791e1 100644
--- a/src/runtime/metrics/description.go
+++ b/src/runtime/metrics/description.go
@@ -175,6 +175,22 @@ var allDesc = []Description{
Cumulative: true,
},
{
+ Name: "/gc/cleanups/executed:cleanups",
+ Description: "Approximate total count of cleanup functions (created by runtime.AddCleanup) " +
+ "executed by the runtime. Subtract /gc/cleanups/queued:cleanups to approximate " +
+ "cleanup queue length. Useful for detecting slow cleanups holding up the queue.",
+ Kind: KindUint64,
+ Cumulative: true,
+ },
+ {
+ Name: "/gc/cleanups/queued:cleanups",
+ Description: "Approximate total count of cleanup functions (created by runtime.AddCleanup) " +
+ "queued by the runtime for execution. Subtract from /gc/cleanups/executed:cleanups " +
+ "to approximate cleanup queue length. Useful for detecting slow cleanups holding up the queue.",
+ Kind: KindUint64,
+ Cumulative: true,
+ },
+ {
Name: "/gc/cycles/automatic:gc-cycles",
Description: "Count of completed GC cycles generated by the Go runtime.",
Kind: KindUint64,
@@ -193,6 +209,23 @@ var allDesc = []Description{
Cumulative: true,
},
{
+ Name: "/gc/finalizers/executed:finalizers",
+ Description: "Total count of finalizer functions (created by runtime.SetFinalizer) " +
+ "executed by the runtime. Subtract /gc/finalizers/queued:finalizers to approximate " +
+ "finalizer queue length. Useful for detecting finalizers overwhelming the queue, " +
+ "either by being too slow, or by there being too many of them.",
+ Kind: KindUint64,
+ Cumulative: true,
+ },
+ {
+ Name: "/gc/finalizers/queued:finalizers",
+ Description: "Total count of finalizer functions (created by runtime.SetFinalizer) and " +
+ "queued by the runtime for execution. Subtract from /gc/finalizers/executed:finalizers " +
+ "to approximate finalizer queue length. Useful for detecting slow finalizers holding up the queue.",
+ Kind: KindUint64,
+ Cumulative: true,
+ },
+ {
Name: "/gc/gogc:percent",
Description: "Heap size target percentage configured by the user, otherwise 100. This " +
"value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent " +
diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go
index a1902bc6d7..058769ac3a 100644
--- a/src/runtime/metrics/doc.go
+++ b/src/runtime/metrics/doc.go
@@ -137,6 +137,19 @@ Below is the full list of supported metrics, ordered lexicographically.
to system CPU time measurements. Compare only with other
/cpu/classes metrics.
+ /gc/cleanups/executed:cleanups
+ Approximate total count of cleanup functions (created
+ by runtime.AddCleanup) executed by the runtime. Subtract
+ /gc/cleanups/queued:cleanups to approximate cleanup queue
+ length. Useful for detecting slow cleanups holding up the queue.
+
+ /gc/cleanups/queued:cleanups
+ Approximate total count of cleanup functions (created by
+ runtime.AddCleanup) queued by the runtime for execution.
+ Subtract from /gc/cleanups/executed:cleanups to approximate
+ cleanup queue length. Useful for detecting slow cleanups holding
+ up the queue.
+
/gc/cycles/automatic:gc-cycles
Count of completed GC cycles generated by the Go runtime.
@@ -146,6 +159,20 @@ Below is the full list of supported metrics, ordered lexicographically.
/gc/cycles/total:gc-cycles
Count of all completed GC cycles.
+ /gc/finalizers/executed:finalizers
+ Total count of finalizer functions (created by
+ runtime.SetFinalizer) executed by the runtime. Subtract
+ /gc/finalizers/queued:finalizers to approximate finalizer queue
+ length. Useful for detecting finalizers overwhelming the queue,
+ either by being too slow, or by there being too many of them.
+
+ /gc/finalizers/queued:finalizers
+ Total count of finalizer functions (created by
+ runtime.SetFinalizer) and queued by the runtime for execution.
+ Subtract from /gc/finalizers/executed:finalizers to approximate
+ finalizer queue length. Useful for detecting slow finalizers
+ holding up the queue.
+
/gc/gogc:percent
Heap size target percentage configured by the user, otherwise
100. This value is set by the GOGC environment variable, and the
diff --git a/src/runtime/metrics_test.go b/src/runtime/metrics_test.go
index 5fc022efc6..5787c96084 100644
--- a/src/runtime/metrics_test.go
+++ b/src/runtime/metrics_test.go
@@ -499,6 +499,10 @@ func TestReadMetricsCumulative(t *testing.T) {
defer wg.Done()
for {
// Add more things here that could influence metrics.
+ for i := 0; i < 10; i++ {
+ runtime.AddCleanup(new(*int), func(_ struct{}) {}, struct{}{})
+ runtime.SetFinalizer(new(*int), func(_ **int) {})
+ }
for i := 0; i < len(readMetricsSink); i++ {
readMetricsSink[i] = make([]byte, 1024)
select {
@@ -1512,3 +1516,62 @@ func TestMetricHeapUnusedLargeObjectOverflow(t *testing.T) {
done <- struct{}{}
wg.Wait()
}
+
+func TestReadMetricsCleanups(t *testing.T) {
+ runtime.GC() // End any in-progress GC.
+ runtime.BlockUntilEmptyCleanupQueue(int64(1 * time.Second)) // Flush any queued cleanups.
+
+ var before [2]metrics.Sample
+ before[0].Name = "/gc/cleanups/queued:cleanups"
+ before[1].Name = "/gc/cleanups/executed:cleanups"
+ after := before
+
+ metrics.Read(before[:])
+
+ const N = 10
+ for i := 0; i < N; i++ {
+ runtime.AddCleanup(new(*int), func(_ struct{}) {}, struct{}{})
+ }
+
+ runtime.GC()
+ runtime.BlockUntilEmptyCleanupQueue(int64(1 * time.Second))
+
+ metrics.Read(after[:])
+
+ if v0, v1 := before[0].Value.Uint64(), after[0].Value.Uint64(); v0+N != v1 {
+ t.Errorf("expected %s difference to be exactly %d, got %d -> %d", before[0].Name, N, v0, v1)
+ }
+ if v0, v1 := before[1].Value.Uint64(), after[1].Value.Uint64(); v0+N != v1 {
+ t.Errorf("expected %s difference to be exactly %d, got %d -> %d", before[1].Name, N, v0, v1)
+ }
+}
+
+func TestReadMetricsFinalizers(t *testing.T) {
+ runtime.GC() // End any in-progress GC.
+ runtime.BlockUntilEmptyFinalizerQueue(int64(1 * time.Second)) // Flush any queued finalizers.
+
+ var before [2]metrics.Sample
+ before[0].Name = "/gc/finalizers/queued:finalizers"
+ before[1].Name = "/gc/finalizers/executed:finalizers"
+ after := before
+
+ metrics.Read(before[:])
+
+ const N = 10
+ for i := 0; i < N; i++ {
+ runtime.SetFinalizer(new(*int), func(_ **int) {})
+ }
+
+ runtime.GC()
+ runtime.GC()
+ runtime.BlockUntilEmptyFinalizerQueue(int64(1 * time.Second))
+
+ metrics.Read(after[:])
+
+ if v0, v1 := before[0].Value.Uint64(), after[0].Value.Uint64(); v0+N != v1 {
+ t.Errorf("expected %s difference to be exactly %d, got %d -> %d", before[0].Name, N, v0, v1)
+ }
+ if v0, v1 := before[1].Value.Uint64(), after[1].Value.Uint64(); v0+N != v1 {
+ t.Errorf("expected %s difference to be exactly %d, got %d -> %d", before[1].Name, N, v0, v1)
+ }
+}
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index 2d4a54c933..bafdb01603 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -251,7 +251,7 @@ func runFinalizers() {
// confusing the write barrier.
*(*[2]uintptr)(frame) = [2]uintptr{}
}
- switch f.fint.Kind_ & abi.KindMask {
+ switch f.fint.Kind() {
case abi.Pointer:
// direct use of pointer
*(*unsafe.Pointer)(r) = f.arg
@@ -435,7 +435,7 @@ func SetFinalizer(obj any, finalizer any) {
if etyp == nil {
throw("runtime.SetFinalizer: first argument is nil")
}
- if etyp.Kind_&abi.KindMask != abi.Pointer {
+ if etyp.Kind() != abi.Pointer {
throw("runtime.SetFinalizer: first argument is " + toRType(etyp).string() + ", not pointer")
}
ot := (*ptrtype)(unsafe.Pointer(etyp))
@@ -490,7 +490,7 @@ func SetFinalizer(obj any, finalizer any) {
return
}
- if ftyp.Kind_&abi.KindMask != abi.Func {
+ if ftyp.Kind() != abi.Func {
throw("runtime.SetFinalizer: second argument is " + toRType(ftyp).string() + ", not a function")
}
ft := (*functype)(unsafe.Pointer(ftyp))
@@ -505,13 +505,13 @@ func SetFinalizer(obj any, finalizer any) {
case fint == etyp:
// ok - same type
goto okarg
- case fint.Kind_&abi.KindMask == abi.Pointer:
+ case fint.Kind() == abi.Pointer:
if (fint.Uncommon() == nil || etyp.Uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).Elem == ot.Elem {
// ok - not same type, but both pointers,
// one or the other is unnamed, and same element type, so assignable.
goto okarg
}
- case fint.Kind_&abi.KindMask == abi.Interface:
+ case fint.Kind() == abi.Interface:
ityp := (*interfacetype)(unsafe.Pointer(fint))
if len(ityp.Methods) == 0 {
// ok - satisfies empty interface
diff --git a/src/runtime/mgclimit.go b/src/runtime/mgclimit.go
index ad86fbd65b..80aeb71cad 100644
--- a/src/runtime/mgclimit.go
+++ b/src/runtime/mgclimit.go
@@ -209,14 +209,12 @@ func (l *gcCPULimiterState) updateLocked(now int64) {
for _, pp := range allp {
typ, duration := pp.limiterEvent.consume(now)
switch typ {
- case limiterEventIdleMarkWork:
- fallthrough
case limiterEventIdle:
- idleTime += duration
sched.idleTime.Add(duration)
- case limiterEventMarkAssist:
- fallthrough
- case limiterEventScavengeAssist:
+ idleTime += duration
+ case limiterEventIdleMarkWork:
+ idleTime += duration
+ case limiterEventMarkAssist, limiterEventScavengeAssist:
assistTime += duration
case limiterEventNone:
break
@@ -470,14 +468,12 @@ func (e *limiterEvent) stop(typ limiterEventType, now int64) {
}
// Account for the event.
switch typ {
- case limiterEventIdleMarkWork:
- gcCPULimiter.addIdleTime(duration)
case limiterEventIdle:
- gcCPULimiter.addIdleTime(duration)
sched.idleTime.Add(duration)
- case limiterEventMarkAssist:
- fallthrough
- case limiterEventScavengeAssist:
+ gcCPULimiter.addIdleTime(duration)
+ case limiterEventIdleMarkWork:
+ gcCPULimiter.addIdleTime(duration)
+ case limiterEventMarkAssist, limiterEventScavengeAssist:
gcCPULimiter.addAssistTime(duration)
default:
throw("limiterEvent.stop: invalid limiter event type found")
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index a136c7aeac..8b306045c5 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -415,13 +415,13 @@ func gcScanFinalizer(spf *specialfinalizer, s *mspan, gcw *gcWork) {
// Don't mark finalized object, but scan it so we retain everything it points to.
// A finalizer can be set for an inner byte of an object, find object beginning.
- p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
+ p := s.base() + spf.special.offset/s.elemsize*s.elemsize
// Mark everything that can be reached from
// the object (but *not* the object itself or
// we'll never collect it).
if !s.spanclass.noscan() {
- scanobject(p, gcw)
+ scanObject(p, gcw)
}
// The special itself is also a root.
@@ -1255,7 +1255,7 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
}
}
if b != 0 {
- scanobject(b, gcw)
+ scanObject(b, gcw)
} else if s != 0 {
scanSpan(s, gcw)
} else {
@@ -1359,7 +1359,7 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 {
}
}
if b != 0 {
- scanobject(b, gcw)
+ scanObject(b, gcw)
} else if s != 0 {
scanSpan(s, gcw)
} else {
@@ -1390,7 +1390,7 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 {
return workFlushed + gcw.heapScanWork
}
-// scanblock scans b as scanobject would, but using an explicit
+// scanblock scans b as scanObject would, but using an explicit
// pointer bitmap instead of the heap bitmap.
//
// This is used to scan non-heap roots, so it does not update
@@ -1415,7 +1415,7 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState)
}
for j := 0; j < 8 && i < n; j++ {
if bits&1 != 0 {
- // Same work as in scanobject; see comments there.
+ // Same work as in scanObject; see comments there.
p := *(*uintptr)(unsafe.Pointer(b + i))
if p != 0 {
if stk != nil && p >= stk.stack.lo && p < stk.stack.hi {
@@ -1435,107 +1435,6 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState)
}
}
-// scanobject scans the object starting at b, adding pointers to gcw.
-// b must point to the beginning of a heap object or an oblet.
-// scanobject consults the GC bitmap for the pointer mask and the
-// spans for the size of the object.
-//
-//go:nowritebarrier
-func scanobject(b uintptr, gcw *gcWork) {
- // Prefetch object before we scan it.
- //
- // This will overlap fetching the beginning of the object with initial
- // setup before we start scanning the object.
- sys.Prefetch(b)
-
- // Find the bits for b and the size of the object at b.
- //
- // b is either the beginning of an object, in which case this
- // is the size of the object to scan, or it points to an
- // oblet, in which case we compute the size to scan below.
- s := spanOfUnchecked(b)
- n := s.elemsize
- if n == 0 {
- throw("scanobject n == 0")
- }
- if s.spanclass.noscan() {
- // Correctness-wise this is ok, but it's inefficient
- // if noscan objects reach here.
- throw("scanobject of a noscan object")
- }
-
- var tp typePointers
- if n > maxObletBytes {
- // Large object. Break into oblets for better
- // parallelism and lower latency.
- if b == s.base() {
- // Enqueue the other oblets to scan later.
- // Some oblets may be in b's scalar tail, but
- // these will be marked as "no more pointers",
- // so we'll drop out immediately when we go to
- // scan those.
- for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
- if !gcw.putObjFast(oblet) {
- gcw.putObj(oblet)
- }
- }
- }
-
- // Compute the size of the oblet. Since this object
- // must be a large object, s.base() is the beginning
- // of the object.
- n = s.base() + s.elemsize - b
- n = min(n, maxObletBytes)
- tp = s.typePointersOfUnchecked(s.base())
- tp = tp.fastForward(b-tp.addr, b+n)
- } else {
- tp = s.typePointersOfUnchecked(b)
- }
-
- var scanSize uintptr
- for {
- var addr uintptr
- if tp, addr = tp.nextFast(); addr == 0 {
- if tp, addr = tp.next(b + n); addr == 0 {
- break
- }
- }
-
- // Keep track of farthest pointer we found, so we can
- // update heapScanWork. TODO: is there a better metric,
- // now that we can skip scalar portions pretty efficiently?
- scanSize = addr - b + goarch.PtrSize
-
- // Work here is duplicated in scanblock and above.
- // If you make changes here, make changes there too.
- obj := *(*uintptr)(unsafe.Pointer(addr))
-
- // At this point we have extracted the next potential pointer.
- // Quickly filter out nil and pointers back to the current object.
- if obj != 0 && obj-b >= n {
- // Test if obj points into the Go heap and, if so,
- // mark the object.
- //
- // Note that it's possible for findObject to
- // fail if obj points to a just-allocated heap
- // object because of a race with growing the
- // heap. In this case, we know the object was
- // just allocated and hence will be marked by
- // allocation itself.
- if !tryDeferToSpanScan(obj, gcw) {
- if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
- greyobject(obj, b, addr-b, span, gcw, objIndex)
- }
- }
- }
- }
- gcw.bytesMarked += uint64(n)
- gcw.heapScanWork += int64(scanSize)
- if debug.gctrace > 1 {
- gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
- }
-}
-
// scanConservative scans block [b, b+n) conservatively, treating any
// pointer-like value in the block as a pointer.
//
diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go
index ac2b1732f9..845857a817 100644
--- a/src/runtime/mgcmark_greenteagc.go
+++ b/src/runtime/mgcmark_greenteagc.go
@@ -110,7 +110,7 @@ func (o *spanScanOwnership) or(v spanScanOwnership) spanScanOwnership {
return spanScanOwnership(atomic.Or32(o32, uint32(v)<<off) >> off)
}
-func (imb *spanInlineMarkBits) init(class spanClass) {
+func (imb *spanInlineMarkBits) init(class spanClass, needzero bool) {
if imb == nil {
// This nil check and throw is almost pointless. Normally we would
// expect imb to never be nil. However, this is called on potentially
@@ -131,7 +131,13 @@ func (imb *spanInlineMarkBits) init(class spanClass) {
// See go.dev/issue/74375 for details.
throw("runtime: span inline mark bits nil?")
}
- *imb = spanInlineMarkBits{}
+ if needzero {
+ // Use memclrNoHeapPointers to avoid having the compiler make a worse
+ // decision. We know that imb is both aligned and a nice power-of-two
+ // size that works well for wider SIMD instructions. The compiler likely
+ // has no idea that imb is aligned to 128 bytes.
+ memclrNoHeapPointers(unsafe.Pointer(imb), unsafe.Sizeof(spanInlineMarkBits{}))
+ }
imb.class = class
}
@@ -180,25 +186,33 @@ func (s *mspan) initInlineMarkBits() {
if doubleCheckGreenTea && !gcUsesSpanInlineMarkBits(s.elemsize) {
throw("expected span with inline mark bits")
}
- s.inlineMarkBits().init(s.spanclass)
+ // Zeroing is only necessary if this span wasn't just freshly allocated from the OS.
+ s.inlineMarkBits().init(s.spanclass, s.needzero != 0)
}
-// mergeInlineMarks merges the span's inline mark bits into dst.
+// moveInlineMarks merges the span's inline mark bits into dst and clears them.
//
// gcUsesSpanInlineMarkBits(s.elemsize) must be true.
-func (s *mspan) mergeInlineMarks(dst *gcBits) {
+func (s *mspan) moveInlineMarks(dst *gcBits) {
if doubleCheckGreenTea && !gcUsesSpanInlineMarkBits(s.elemsize) {
throw("expected span with inline mark bits")
}
bytes := divRoundUp(uintptr(s.nelems), 8)
imb := s.inlineMarkBits()
- _ = imb.marks[bytes-1]
- for i := uintptr(0); i < bytes; i++ {
- *dst.bytep(i) |= imb.marks[i]
+ imbMarks := (*gc.ObjMask)(unsafe.Pointer(&imb.marks))
+ for i := uintptr(0); i < bytes; i += goarch.PtrSize {
+ marks := bswapIfBigEndian(imbMarks[i/goarch.PtrSize])
+ if i/goarch.PtrSize == uintptr(len(imb.marks)+1)/goarch.PtrSize-1 {
+ marks &^= 0xff << ((goarch.PtrSize - 1) * 8) // mask out class
+ }
+ *(*uintptr)(unsafe.Pointer(dst.bytep(i))) |= bswapIfBigEndian(marks)
}
if doubleCheckGreenTea && !s.spanclass.noscan() && imb.marks != imb.scans {
throw("marks don't match scans for span with pointer")
}
+
+ // Reset the inline mark bits.
+ imb.init(s.spanclass, true /* We know these bits are always dirty now. */)
}
// inlineMarkBits returns the inline mark bits for the span.
@@ -652,7 +666,7 @@ func spanSetScans(spanBase uintptr, nelems uint16, imb *spanInlineMarkBits, toSc
marks := imbMarks[i/goarch.PtrSize]
scans = bswapIfBigEndian(scans)
marks = bswapIfBigEndian(marks)
- if i/goarch.PtrSize == 64/goarch.PtrSize-1 {
+ if i/goarch.PtrSize == uintptr(len(imb.marks)+1)/goarch.PtrSize-1 {
scans &^= 0xff << ((goarch.PtrSize - 1) * 8) // mask out owned
marks &^= 0xff << ((goarch.PtrSize - 1) * 8) // mask out class
}
@@ -837,3 +851,107 @@ func (w *gcWork) flushScanStats(dst *[gc.NumSizeClasses]sizeClassScanStats) {
}
clear(w.stats[:])
}
+
+// scanObject scans the object starting at b, adding pointers to gcw.
+// b must point to the beginning of a heap object or an oblet.
+// scanObject consults the GC bitmap for the pointer mask and the
+// spans for the size of the object.
+//
+// Used only for !gcUsesSpanInlineMarkBits spans, but supports all
+// object sizes and is safe to be called on all heap objects.
+//
+//go:nowritebarrier
+func scanObject(b uintptr, gcw *gcWork) {
+ // Prefetch object before we scan it.
+ //
+ // This will overlap fetching the beginning of the object with initial
+ // setup before we start scanning the object.
+ sys.Prefetch(b)
+
+ // Find the bits for b and the size of the object at b.
+ //
+ // b is either the beginning of an object, in which case this
+ // is the size of the object to scan, or it points to an
+ // oblet, in which case we compute the size to scan below.
+ s := spanOfUnchecked(b)
+ n := s.elemsize
+ if n == 0 {
+ throw("scanObject n == 0")
+ }
+ if s.spanclass.noscan() {
+ // Correctness-wise this is ok, but it's inefficient
+ // if noscan objects reach here.
+ throw("scanObject of a noscan object")
+ }
+
+ var tp typePointers
+ if n > maxObletBytes {
+ // Large object. Break into oblets for better
+ // parallelism and lower latency.
+ if b == s.base() {
+ // Enqueue the other oblets to scan later.
+ // Some oblets may be in b's scalar tail, but
+ // these will be marked as "no more pointers",
+ // so we'll drop out immediately when we go to
+ // scan those.
+ for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
+ if !gcw.putObjFast(oblet) {
+ gcw.putObj(oblet)
+ }
+ }
+ }
+
+ // Compute the size of the oblet. Since this object
+ // must be a large object, s.base() is the beginning
+ // of the object.
+ n = s.base() + s.elemsize - b
+ n = min(n, maxObletBytes)
+ tp = s.typePointersOfUnchecked(s.base())
+ tp = tp.fastForward(b-tp.addr, b+n)
+ } else {
+ tp = s.typePointersOfUnchecked(b)
+ }
+
+ var scanSize uintptr
+ for {
+ var addr uintptr
+ if tp, addr = tp.nextFast(); addr == 0 {
+ if tp, addr = tp.next(b + n); addr == 0 {
+ break
+ }
+ }
+
+ // Keep track of farthest pointer we found, so we can
+ // update heapScanWork. TODO: is there a better metric,
+ // now that we can skip scalar portions pretty efficiently?
+ scanSize = addr - b + goarch.PtrSize
+
+ // Work here is duplicated in scanblock and above.
+ // If you make changes here, make changes there too.
+ obj := *(*uintptr)(unsafe.Pointer(addr))
+
+ // At this point we have extracted the next potential pointer.
+ // Quickly filter out nil and pointers back to the current object.
+ if obj != 0 && obj-b >= n {
+ // Test if obj points into the Go heap and, if so,
+ // mark the object.
+ //
+ // Note that it's possible for findObject to
+ // fail if obj points to a just-allocated heap
+ // object because of a race with growing the
+ // heap. In this case, we know the object was
+ // just allocated and hence will be marked by
+ // allocation itself.
+ if !tryDeferToSpanScan(obj, gcw) {
+ if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
+ greyobject(obj, b, addr-b, span, gcw, objIndex)
+ }
+ }
+ }
+ }
+ gcw.bytesMarked += uint64(n)
+ gcw.heapScanWork += int64(scanSize)
+ if debug.gctrace > 1 {
+ gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
+ }
+}
diff --git a/src/runtime/mgcmark_nogreenteagc.go b/src/runtime/mgcmark_nogreenteagc.go
index c0ca5c21ea..6375773123 100644
--- a/src/runtime/mgcmark_nogreenteagc.go
+++ b/src/runtime/mgcmark_nogreenteagc.go
@@ -6,7 +6,12 @@
package runtime
-import "internal/runtime/gc"
+import (
+ "internal/goarch"
+ "internal/runtime/gc"
+ "internal/runtime/sys"
+ "unsafe"
+)
func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
bytep, mask := s.gcmarkBits.bitp(objIndex)
@@ -24,7 +29,7 @@ func tryDeferToSpanScan(p uintptr, gcw *gcWork) bool {
func (s *mspan) initInlineMarkBits() {
}
-func (s *mspan) mergeInlineMarks(to *gcBits) {
+func (s *mspan) moveInlineMarks(to *gcBits) {
throw("unimplemented")
}
@@ -110,3 +115,104 @@ func (w *gcWork) flushScanStats(dst *[gc.NumSizeClasses]sizeClassScanStats) {
}
clear(w.stats[:])
}
+
+// scanObject scans the object starting at b, adding pointers to gcw.
+// b must point to the beginning of a heap object or an oblet.
+// scanObject consults the GC bitmap for the pointer mask and the
+// spans for the size of the object.
+//
+//go:nowritebarrier
+func scanObject(b uintptr, gcw *gcWork) {
+ // Prefetch object before we scan it.
+ //
+ // This will overlap fetching the beginning of the object with initial
+ // setup before we start scanning the object.
+ sys.Prefetch(b)
+
+ // Find the bits for b and the size of the object at b.
+ //
+ // b is either the beginning of an object, in which case this
+ // is the size of the object to scan, or it points to an
+ // oblet, in which case we compute the size to scan below.
+ s := spanOfUnchecked(b)
+ n := s.elemsize
+ if n == 0 {
+ throw("scanObject n == 0")
+ }
+ if s.spanclass.noscan() {
+ // Correctness-wise this is ok, but it's inefficient
+ // if noscan objects reach here.
+ throw("scanObject of a noscan object")
+ }
+
+ var tp typePointers
+ if n > maxObletBytes {
+ // Large object. Break into oblets for better
+ // parallelism and lower latency.
+ if b == s.base() {
+ // Enqueue the other oblets to scan later.
+ // Some oblets may be in b's scalar tail, but
+ // these will be marked as "no more pointers",
+ // so we'll drop out immediately when we go to
+ // scan those.
+ for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
+ if !gcw.putObjFast(oblet) {
+ gcw.putObj(oblet)
+ }
+ }
+ }
+
+ // Compute the size of the oblet. Since this object
+ // must be a large object, s.base() is the beginning
+ // of the object.
+ n = s.base() + s.elemsize - b
+ n = min(n, maxObletBytes)
+ tp = s.typePointersOfUnchecked(s.base())
+ tp = tp.fastForward(b-tp.addr, b+n)
+ } else {
+ tp = s.typePointersOfUnchecked(b)
+ }
+
+ var scanSize uintptr
+ for {
+ var addr uintptr
+ if tp, addr = tp.nextFast(); addr == 0 {
+ if tp, addr = tp.next(b + n); addr == 0 {
+ break
+ }
+ }
+
+ // Keep track of farthest pointer we found, so we can
+ // update heapScanWork. TODO: is there a better metric,
+ // now that we can skip scalar portions pretty efficiently?
+ scanSize = addr - b + goarch.PtrSize
+
+ // Work here is duplicated in scanblock and above.
+ // If you make changes here, make changes there too.
+ obj := *(*uintptr)(unsafe.Pointer(addr))
+
+ // At this point we have extracted the next potential pointer.
+ // Quickly filter out nil and pointers back to the current object.
+ if obj != 0 && obj-b >= n {
+ // Test if obj points into the Go heap and, if so,
+ // mark the object.
+ //
+ // Note that it's possible for findObject to
+ // fail if obj points to a just-allocated heap
+ // object because of a race with growing the
+ // heap. In this case, we know the object was
+ // just allocated and hence will be marked by
+ // allocation itself.
+ if !tryDeferToSpanScan(obj, gcw) {
+ if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 {
+ greyobject(obj, b, addr-b, span, gcw, objIndex)
+ }
+ }
+ }
+ }
+ gcw.bytesMarked += uint64(n)
+ gcw.heapScanWork += int64(scanSize)
+ if debug.gctrace > 1 {
+ gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++
+ }
+}
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index a3bf2989df..b72cc461ba 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -553,7 +553,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
siter := newSpecialsIter(s)
for siter.valid() {
// A finalizer can be set for an inner byte of an object, find object beginning.
- objIndex := uintptr(siter.s.offset) / size
+ objIndex := siter.s.offset / size
p := s.base() + objIndex*size
mbits := s.markBitsForIndex(objIndex)
if !mbits.isMarked() {
@@ -561,7 +561,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// Pass 1: see if it has a finalizer.
hasFinAndRevived := false
endOffset := p - s.base() + size
- for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
+ for tmp := siter.s; tmp != nil && tmp.offset < endOffset; tmp = tmp.next {
if tmp.kind == _KindSpecialFinalizer {
// Stop freeing of object if it has a finalizer.
mbits.setMarkedNonAtomic()
@@ -573,11 +573,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// Pass 2: queue all finalizers and clear any weak handles. Weak handles are cleared
// before finalization as specified by the weak package. See the documentation
// for that package for more details.
- for siter.valid() && uintptr(siter.s.offset) < endOffset {
+ for siter.valid() && siter.s.offset < endOffset {
// Find the exact byte for which the special was setup
// (as opposed to object beginning).
special := siter.s
- p := s.base() + uintptr(special.offset)
+ p := s.base() + special.offset
if special.kind == _KindSpecialFinalizer || special.kind == _KindSpecialWeakHandle {
siter.unlinkAndNext()
freeSpecial(special, unsafe.Pointer(p), size)
@@ -589,11 +589,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
}
} else {
// Pass 2: the object is truly dead, free (and handle) all specials.
- for siter.valid() && uintptr(siter.s.offset) < endOffset {
+ for siter.valid() && siter.s.offset < endOffset {
// Find the exact byte for which the special was setup
// (as opposed to object beginning).
special := siter.s
- p := s.base() + uintptr(special.offset)
+ p := s.base() + special.offset
siter.unlinkAndNext()
freeSpecial(special, unsafe.Pointer(p), size)
}
@@ -650,9 +650,9 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
}
}
- // Copy over the inline mark bits if necessary.
+ // Copy over and clear the inline mark bits if necessary.
if gcUsesSpanInlineMarkBits(s.elemsize) {
- s.mergeInlineMarks(s.gcmarkBits)
+ s.moveInlineMarks(s.gcmarkBits)
}
// Check for zombie objects.
@@ -704,11 +704,6 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// Initialize alloc bits cache.
s.refillAllocCache(0)
- // Reset the object queue, if we have one.
- if gcUsesSpanInlineMarkBits(s.elemsize) {
- s.initInlineMarkBits()
- }
-
// The span must be in our exclusive ownership until we update sweepgen,
// check for potential races.
if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 358de2f376..1776206573 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -1490,7 +1490,7 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
s.allocBits = newAllocBits(uintptr(s.nelems))
// Adjust s.limit down to the object-containing part of the span.
- s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems)
+ s.limit = s.base() + s.elemsize*uintptr(s.nelems)
// It's safe to access h.sweepgen without the heap lock because it's
// only ever updated with the world stopped and we run on the
@@ -1549,6 +1549,8 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
func (h *mheap) grow(npage uintptr) (uintptr, bool) {
assertLockHeld(&h.lock)
+ firstGrow := h.curArena.base == 0
+
// We must grow the heap in whole palloc chunks.
// We call sysMap below but note that because we
// round up to pallocChunkPages which is on the order
@@ -1597,6 +1599,16 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) {
// Switch to the new space.
h.curArena.base = uintptr(av)
h.curArena.end = uintptr(av) + asize
+
+ if firstGrow && randomizeHeapBase {
+ // The top heapAddrBits-logHeapArenaBytes are randomized, we now
+ // want to randomize the next
+ // logHeapArenaBytes-log2(pallocChunkBytes) bits, making sure
+ // h.curArena.base is aligned to pallocChunkBytes.
+ bits := logHeapArenaBytes - logPallocChunkBytes
+ offset := nextHeapRandBits(bits)
+ h.curArena.base = alignDown(h.curArena.base|(offset<<logPallocChunkBytes), pallocChunkBytes)
+ }
}
// Recalculate nBase.
@@ -1627,6 +1639,22 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) {
// space ready for allocation.
h.pages.grow(v, nBase-v)
totalGrowth += nBase - v
+
+ if firstGrow && randomizeHeapBase {
+ // The top heapAddrBits-log2(pallocChunkBytes) bits are now randomized,
+ // we finally want to randomize the next
+ // log2(pallocChunkBytes)-log2(pageSize) bits, while maintaining
+ // alignment to pageSize. We do this by calculating a random number of
+ // pages into the current arena, and marking them as allocated. The
+ // address of the next available page becomes our fully randomized base
+ // heap address.
+ randOffset := nextHeapRandBits(logPallocChunkBytes)
+ randNumPages := alignDown(randOffset, pageSize) / pageSize
+ if randNumPages != 0 {
+ h.pages.markRandomPaddingPages(v, randNumPages)
+ }
+ }
+
return totalGrowth, true
}
@@ -2126,11 +2154,11 @@ func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special,
if s == nil {
break
}
- if offset == uintptr(s.offset) && kind == s.kind {
+ if offset == s.offset && kind == s.kind {
found = true
break
}
- if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && kind < s.kind) {
+ if offset < s.offset || (offset == s.offset && kind < s.kind) {
break
}
iter = &s.next
@@ -2173,7 +2201,7 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p
// Mark everything reachable from the object
// so it's retained for the finalizer.
if !span.spanclass.noscan() {
- scanobject(base, gcw)
+ scanObject(base, gcw)
}
// Mark the finalizer itself, since the
// special isn't part of the GC'd heap.
@@ -2297,14 +2325,14 @@ func getCleanupContext(ptr uintptr, cleanupID uint64) *specialCheckFinalizer {
// Reached the end of the linked list. Stop searching at this point.
break
}
- if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind &&
+ if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
// The special is a cleanup and contains a matching cleanup id.
*iter = s.next
found = (*specialCheckFinalizer)(unsafe.Pointer(s))
break
}
- if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) {
+ if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
// The special is outside the region specified for that kind of
// special. The specials are sorted by kind.
break
@@ -2347,14 +2375,14 @@ func clearCleanupContext(ptr uintptr, cleanupID uint64) {
// Reached the end of the linked list. Stop searching at this point.
break
}
- if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind &&
+ if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
// The special is a cleanup and contains a matching cleanup id.
*iter = s.next
found = s
break
}
- if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) {
+ if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
// The special is outside the region specified for that kind of
// special. The specials are sorted by kind.
break
@@ -2450,7 +2478,7 @@ type specialWeakHandle struct {
//go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer
func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer {
- return unsafe.Pointer(getOrAddWeakHandle(unsafe.Pointer(p)))
+ return unsafe.Pointer(getOrAddWeakHandle(p))
}
//go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index 4c58fb6e02..83db005051 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -972,6 +972,45 @@ func (p *pageAlloc) free(base, npages uintptr) {
p.update(base, npages, true, false)
}
+// markRandomPaddingPages marks the range of memory [base, base+npages*pageSize]
+// as both allocated and scavenged. This is used for randomizing the base heap
+// address. Both the alloc and scav bits are set so that the pages are not used
+// and so the memory accounting stats are correctly calculated.
+//
+// Similar to allocRange, it also updates the summaries to reflect the
+// newly-updated bitmap.
+//
+// p.mheapLock must be held.
+func (p *pageAlloc) markRandomPaddingPages(base uintptr, npages uintptr) {
+ assertLockHeld(p.mheapLock)
+
+ limit := base + npages*pageSize - 1
+ sc, ec := chunkIndex(base), chunkIndex(limit)
+ si, ei := chunkPageIndex(base), chunkPageIndex(limit)
+ if sc == ec {
+ chunk := p.chunkOf(sc)
+ chunk.allocRange(si, ei+1-si)
+ p.scav.index.alloc(sc, ei+1-si)
+ chunk.scavenged.setRange(si, ei+1-si)
+ } else {
+ chunk := p.chunkOf(sc)
+ chunk.allocRange(si, pallocChunkPages-si)
+ p.scav.index.alloc(sc, pallocChunkPages-si)
+ chunk.scavenged.setRange(si, pallocChunkPages-si)
+ for c := sc + 1; c < ec; c++ {
+ chunk := p.chunkOf(c)
+ chunk.allocAll()
+ p.scav.index.alloc(c, pallocChunkPages)
+ chunk.scavenged.setAll()
+ }
+ chunk = p.chunkOf(ec)
+ chunk.allocRange(0, ei+1)
+ p.scav.index.alloc(ec, ei+1)
+ chunk.scavenged.setRange(0, ei+1)
+ }
+ p.update(base, npages, true, true)
+}
+
const (
pallocSumBytes = unsafe.Sizeof(pallocSum(0))
diff --git a/src/runtime/mwbbuf.go b/src/runtime/mwbbuf.go
index 537d558592..e8c6064905 100644
--- a/src/runtime/mwbbuf.go
+++ b/src/runtime/mwbbuf.go
@@ -215,7 +215,7 @@ func wbBufFlush1(pp *p) {
// pointers we greyed. We use the buffer itself to temporarily
// record greyed pointers.
//
- // TODO: Should scanobject/scanblock just stuff pointers into
+ // TODO: Should scanObject/scanblock just stuff pointers into
// the wbBuf? Then this would become the sole greying path.
//
// TODO: We could avoid shading any of the "new" pointers in
diff --git a/src/runtime/netpoll_epoll.go b/src/runtime/netpoll_epoll.go
index c43bab0882..48c03d119f 100644
--- a/src/runtime/netpoll_epoll.go
+++ b/src/runtime/netpoll_epoll.go
@@ -8,7 +8,7 @@ package runtime
import (
"internal/runtime/atomic"
- "internal/runtime/syscall"
+ "internal/runtime/syscall/linux"
"unsafe"
)
@@ -20,21 +20,21 @@ var (
func netpollinit() {
var errno uintptr
- epfd, errno = syscall.EpollCreate1(syscall.EPOLL_CLOEXEC)
+ epfd, errno = linux.EpollCreate1(linux.EPOLL_CLOEXEC)
if errno != 0 {
println("runtime: epollcreate failed with", errno)
throw("runtime: netpollinit failed")
}
- efd, errno := syscall.Eventfd(0, syscall.EFD_CLOEXEC|syscall.EFD_NONBLOCK)
+ efd, errno := linux.Eventfd(0, linux.EFD_CLOEXEC|linux.EFD_NONBLOCK)
if errno != 0 {
println("runtime: eventfd failed with", -errno)
throw("runtime: eventfd failed")
}
- ev := syscall.EpollEvent{
- Events: syscall.EPOLLIN,
+ ev := linux.EpollEvent{
+ Events: linux.EPOLLIN,
}
*(**uintptr)(unsafe.Pointer(&ev.Data)) = &netpollEventFd
- errno = syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, efd, &ev)
+ errno = linux.EpollCtl(epfd, linux.EPOLL_CTL_ADD, efd, &ev)
if errno != 0 {
println("runtime: epollctl failed with", errno)
throw("runtime: epollctl failed")
@@ -47,16 +47,16 @@ func netpollIsPollDescriptor(fd uintptr) bool {
}
func netpollopen(fd uintptr, pd *pollDesc) uintptr {
- var ev syscall.EpollEvent
- ev.Events = syscall.EPOLLIN | syscall.EPOLLOUT | syscall.EPOLLRDHUP | syscall.EPOLLET
+ var ev linux.EpollEvent
+ ev.Events = linux.EPOLLIN | linux.EPOLLOUT | linux.EPOLLRDHUP | linux.EPOLLET
tp := taggedPointerPack(unsafe.Pointer(pd), pd.fdseq.Load())
*(*taggedPointer)(unsafe.Pointer(&ev.Data)) = tp
- return syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, int32(fd), &ev)
+ return linux.EpollCtl(epfd, linux.EPOLL_CTL_ADD, int32(fd), &ev)
}
func netpollclose(fd uintptr) uintptr {
- var ev syscall.EpollEvent
- return syscall.EpollCtl(epfd, syscall.EPOLL_CTL_DEL, int32(fd), &ev)
+ var ev linux.EpollEvent
+ return linux.EpollCtl(epfd, linux.EPOLL_CTL_DEL, int32(fd), &ev)
}
func netpollarm(pd *pollDesc, mode int) {
@@ -114,9 +114,9 @@ func netpoll(delay int64) (gList, int32) {
// 1e9 ms == ~11.5 days.
waitms = 1e9
}
- var events [128]syscall.EpollEvent
+ var events [128]linux.EpollEvent
retry:
- n, errno := syscall.EpollWait(epfd, events[:], int32(len(events)), waitms)
+ n, errno := linux.EpollWait(epfd, events[:], int32(len(events)), waitms)
if errno != 0 {
if errno != _EINTR {
println("runtime: epollwait on fd", epfd, "failed with", errno)
@@ -138,7 +138,7 @@ retry:
}
if *(**uintptr)(unsafe.Pointer(&ev.Data)) == &netpollEventFd {
- if ev.Events != syscall.EPOLLIN {
+ if ev.Events != linux.EPOLLIN {
println("runtime: netpoll: eventfd ready for", ev.Events)
throw("runtime: netpoll: eventfd ready for something unexpected")
}
@@ -156,10 +156,10 @@ retry:
}
var mode int32
- if ev.Events&(syscall.EPOLLIN|syscall.EPOLLRDHUP|syscall.EPOLLHUP|syscall.EPOLLERR) != 0 {
+ if ev.Events&(linux.EPOLLIN|linux.EPOLLRDHUP|linux.EPOLLHUP|linux.EPOLLERR) != 0 {
mode += 'r'
}
- if ev.Events&(syscall.EPOLLOUT|syscall.EPOLLHUP|syscall.EPOLLERR) != 0 {
+ if ev.Events&(linux.EPOLLOUT|linux.EPOLLHUP|linux.EPOLLERR) != 0 {
mode += 'w'
}
if mode != 0 {
@@ -167,7 +167,7 @@ retry:
pd := (*pollDesc)(tp.pointer())
tag := tp.tag()
if pd.fdseq.Load() == tag {
- pd.setEventErr(ev.Events == syscall.EPOLLERR, tag)
+ pd.setEventErr(ev.Events == linux.EPOLLERR, tag)
delta += netpollready(&toRun, pd, mode)
}
}
diff --git a/src/runtime/netpoll_windows.go b/src/runtime/netpoll_windows.go
index fb35d41c0c..93137e4709 100644
--- a/src/runtime/netpoll_windows.go
+++ b/src/runtime/netpoll_windows.go
@@ -102,7 +102,7 @@ var (
)
func netpollinit() {
- iocphandle = stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
+ iocphandle = stdcall(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
if iocphandle == 0 {
println("runtime: CreateIoCompletionPort failed (errno=", getlasterror(), ")")
throw("runtime: netpollinit failed")
@@ -115,7 +115,7 @@ func netpollIsPollDescriptor(fd uintptr) bool {
func netpollopen(fd uintptr, pd *pollDesc) int32 {
key := packNetpollKey(netpollSourceReady, pd)
- if stdcall4(_CreateIoCompletionPort, fd, iocphandle, key, 0) == 0 {
+ if stdcall(_CreateIoCompletionPort, fd, iocphandle, key, 0) == 0 {
return int32(getlasterror())
}
return 0
@@ -137,7 +137,7 @@ func netpollBreak() {
}
key := packNetpollKey(netpollSourceBreak, nil)
- if stdcall4(_PostQueuedCompletionStatus, iocphandle, 0, key, 0) == 0 {
+ if stdcall(_PostQueuedCompletionStatus, iocphandle, 0, key, 0) == 0 {
println("runtime: netpoll: PostQueuedCompletionStatus failed (errno=", getlasterror(), ")")
throw("runtime: netpoll: PostQueuedCompletionStatus failed")
}
@@ -197,7 +197,7 @@ func netpoll(delay int64) (gList, int32) {
if delay != 0 {
mp.blocked = true
}
- if stdcall6(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 {
+ if stdcall(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 {
mp.blocked = false
errno := getlasterror()
if errno == _WAIT_TIMEOUT {
@@ -256,7 +256,7 @@ func netpollQueueTimer(delay int64) (signaled bool) {
// such as a netpollBreak, so we can get to this point with a timer that hasn't
// expired yet. In this case, the completion packet can still be picked up by
// another thread, so defer the cancellation until it is really necessary.
- errno := stdcall2(_NtCancelWaitCompletionPacket, mp.waitIocpHandle, 1)
+ errno := stdcall(_NtCancelWaitCompletionPacket, mp.waitIocpHandle, 1)
switch errno {
case STATUS_CANCELLED:
// STATUS_CANCELLED is returned when the associated timer has already expired,
@@ -264,12 +264,12 @@ func netpollQueueTimer(delay int64) (signaled bool) {
fallthrough
case STATUS_SUCCESS:
dt := -delay / 100 // relative sleep (negative), 100ns units
- if stdcall6(_SetWaitableTimer, mp.waitIocpTimer, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) == 0 {
+ if stdcall(_SetWaitableTimer, mp.waitIocpTimer, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) == 0 {
println("runtime: SetWaitableTimer failed; errno=", getlasterror())
throw("runtime: netpoll failed")
}
key := packNetpollKey(netpollSourceTimer, nil)
- if errno := stdcall8(_NtAssociateWaitCompletionPacket, mp.waitIocpHandle, iocphandle, mp.waitIocpTimer, key, 0, 0, 0, uintptr(unsafe.Pointer(&signaled))); errno != 0 {
+ if errno := stdcall(_NtAssociateWaitCompletionPacket, mp.waitIocpHandle, iocphandle, mp.waitIocpTimer, key, 0, 0, 0, uintptr(unsafe.Pointer(&signaled))); errno != 0 {
println("runtime: NtAssociateWaitCompletionPacket failed; errno=", errno)
throw("runtime: netpoll failed")
}
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
index 116995e5f6..3197c66537 100644
--- a/src/runtime/os3_solaris.go
+++ b/src/runtime/os3_solaris.go
@@ -238,6 +238,7 @@ func unminit() {
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
//
// This always runs without a P, so //go:nowritebarrierrec is required.
+//
//go:nowritebarrierrec
func mdestroy(mp *m) {
}
diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go
index 3847b7671a..4bb8576f42 100644
--- a/src/runtime/os_aix.go
+++ b/src/runtime/os_aix.go
@@ -27,6 +27,7 @@ type funcDescriptor struct {
type mOS struct {
waitsema uintptr // semaphore for parking on locks
perrno uintptr // pointer to tls errno
+ libcall libcall
}
//go:nosplit
@@ -194,6 +195,7 @@ func unminit() {
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
//
// This always runs without a P, so //go:nowritebarrierrec is required.
+//
//go:nowritebarrierrec
func mdestroy(mp *m) {
}
diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go
index 5aef34ff8f..0c7144e9d0 100644
--- a/src/runtime/os_darwin.go
+++ b/src/runtime/os_darwin.go
@@ -348,6 +348,7 @@ func unminit() {
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
//
// This always runs without a P, so //go:nowritebarrierrec is required.
+//
//go:nowritebarrierrec
func mdestroy(mp *m) {
}
diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go
index e22fd9b42f..fbbee64fd3 100644
--- a/src/runtime/os_dragonfly.go
+++ b/src/runtime/os_dragonfly.go
@@ -220,6 +220,7 @@ func unminit() {
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
//
// This always runs without a P, so //go:nowritebarrierrec is required.
+//
//go:nowritebarrierrec
func mdestroy(mp *m) {
}
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index 54f98ef4f8..0ec5e43007 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -9,7 +9,7 @@ import (
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/strconv"
- "internal/runtime/syscall"
+ "internal/runtime/syscall/linux"
"unsafe"
)
@@ -417,6 +417,7 @@ func unminit() {
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
//
// This always runs without a P, so //go:nowritebarrierrec is required.
+//
//go:nowritebarrierrec
func mdestroy(mp *m) {
}
@@ -469,7 +470,7 @@ func pipe2(flags int32) (r, w int32, errno int32)
//go:nosplit
func fcntl(fd, cmd, arg int32) (ret int32, errno int32) {
- r, _, err := syscall.Syscall6(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0)
+ r, _, err := linux.Syscall6(linux.SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0)
return int32(r), int32(err)
}
@@ -772,7 +773,7 @@ func syscall_runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (
// ensuring all threads execute system calls from multiple calls in the
// same order.
- r1, r2, errno := syscall.Syscall6(trap, a1, a2, a3, a4, a5, a6)
+ r1, r2, errno := linux.Syscall6(trap, a1, a2, a3, a4, a5, a6)
if GOARCH == "ppc64" || GOARCH == "ppc64le" {
// TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
r2 = 0
@@ -883,7 +884,7 @@ func runPerThreadSyscall() {
}
args := perThreadSyscall
- r1, r2, errno := syscall.Syscall6(args.trap, args.a1, args.a2, args.a3, args.a4, args.a5, args.a6)
+ r1, r2, errno := linux.Syscall6(args.trap, args.a1, args.a2, args.a3, args.a4, args.a5, args.a6)
if GOARCH == "ppc64" || GOARCH == "ppc64le" {
// TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
r2 = 0
@@ -922,6 +923,6 @@ func (c *sigctxt) sigFromSeccomp() bool {
//go:nosplit
func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) {
- r, _, err := syscall.Syscall6(syscall.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0)
+ r, _, err := linux.Syscall6(linux.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0)
return int32(r), int32(err)
}
diff --git a/src/runtime/os_linux_riscv64.go b/src/runtime/os_linux_riscv64.go
index c4a4d4e50d..65fa601a29 100644
--- a/src/runtime/os_linux_riscv64.go
+++ b/src/runtime/os_linux_riscv64.go
@@ -5,7 +5,7 @@
package runtime
import (
- "internal/runtime/syscall"
+ "internal/runtime/syscall/linux"
"unsafe"
)
@@ -32,6 +32,6 @@ func internal_cpu_riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool {
}
// Passing in a cpuCount of 0 and a cpu of nil ensures that only extensions supported by all the
// cores are returned, which is the behaviour we want in internal/cpu.
- _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(unsafe.Pointer(&pairs[0])), uintptr(len(pairs)), uintptr(0), uintptr(unsafe.Pointer(nil)), uintptr(flags), 0)
+ _, _, e1 := linux.Syscall6(sys_RISCV_HWPROBE, uintptr(unsafe.Pointer(&pairs[0])), uintptr(len(pairs)), uintptr(0), uintptr(unsafe.Pointer(nil)), uintptr(flags), 0)
return e1 == 0
}
diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go
index 342ede9c53..f117253f34 100644
--- a/src/runtime/os_netbsd.go
+++ b/src/runtime/os_netbsd.go
@@ -101,9 +101,6 @@ var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)
// From NetBSD's <sys/sysctl.h>
const (
- _CTL_KERN = 1
- _KERN_OSREV = 3
-
_CTL_HW = 6
_HW_NCPU = 3
_HW_PAGESIZE = 7
@@ -141,13 +138,6 @@ func getPageSize() uintptr {
return 0
}
-func getOSRev() int {
- if osrev, ok := sysctlInt([]uint32{_CTL_KERN, _KERN_OSREV}); ok {
- return int(osrev)
- }
- return 0
-}
-
//go:nosplit
func semacreate(mp *m) {
}
@@ -268,7 +258,6 @@ func osinit() {
if physPageSize == 0 {
physPageSize = getPageSize()
}
- needSysmonWorkaround = getOSRev() < 902000000 // NetBSD 9.2
}
var urandom_dev = []byte("/dev/urandom\x00")
@@ -324,6 +313,7 @@ func unminit() {
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
//
// This always runs without a P, so //go:nowritebarrierrec is required.
+//
//go:nowritebarrierrec
func mdestroy(mp *m) {
}
diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go
index 02846851d6..3943111853 100644
--- a/src/runtime/os_openbsd.go
+++ b/src/runtime/os_openbsd.go
@@ -134,6 +134,54 @@ func semawakeup(mp *m) {
}
}
+// mstart_stub provides glue code to call mstart from pthread_create.
+func mstart_stub()
+
+// May run with m.p==nil, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func newosproc(mp *m) {
+ if false {
+ print("newosproc m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
+ }
+
+ // Initialize an attribute object.
+ var attr pthreadattr
+ if err := pthread_attr_init(&attr); err != 0 {
+ writeErrStr(failthreadcreate)
+ exit(1)
+ }
+
+ // Find out OS stack size for our own stack guard.
+ var stacksize uintptr
+ if pthread_attr_getstacksize(&attr, &stacksize) != 0 {
+ writeErrStr(failthreadcreate)
+ exit(1)
+ }
+ mp.g0.stack.hi = stacksize // for mstart
+
+ // Tell the pthread library we won't join with this thread.
+ if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
+ writeErrStr(failthreadcreate)
+ exit(1)
+ }
+
+ // Finally, create the thread. It starts at mstart_stub, which does some low-level
+ // setup and then calls mstart.
+ var oset sigset
+ sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
+ err := retryOnEAGAIN(func() int32 {
+ return pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp))
+ })
+ sigprocmask(_SIG_SETMASK, &oset, nil)
+ if err != 0 {
+ writeErrStr(failthreadcreate)
+ exit(1)
+ }
+
+ pthread_attr_destroy(&attr)
+}
+
func osinit() {
numCPUStartup = getCPUCount()
physPageSize = getPageSize()
@@ -160,9 +208,6 @@ func goenvs() {
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
gsignalSize := int32(32 * 1024)
- if GOARCH == "mips64" {
- gsignalSize = int32(64 * 1024)
- }
mp.gsignal = malg(gsignalSize)
mp.gsignal.m = mp
}
@@ -186,6 +231,7 @@ func unminit() {
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
//
// This always runs without a P, so //go:nowritebarrierrec is required.
+//
//go:nowritebarrierrec
func mdestroy(mp *m) {
}
diff --git a/src/runtime/os_openbsd_libc.go b/src/runtime/os_openbsd_libc.go
deleted file mode 100644
index 201f1629d9..0000000000
--- a/src/runtime/os_openbsd_libc.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build openbsd && !mips64
-
-package runtime
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-// mstart_stub provides glue code to call mstart from pthread_create.
-func mstart_stub()
-
-// May run with m.p==nil, so write barriers are not allowed.
-//
-//go:nowritebarrierrec
-func newosproc(mp *m) {
- if false {
- print("newosproc m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
- }
-
- // Initialize an attribute object.
- var attr pthreadattr
- if err := pthread_attr_init(&attr); err != 0 {
- writeErrStr(failthreadcreate)
- exit(1)
- }
-
- // Find out OS stack size for our own stack guard.
- var stacksize uintptr
- if pthread_attr_getstacksize(&attr, &stacksize) != 0 {
- writeErrStr(failthreadcreate)
- exit(1)
- }
- mp.g0.stack.hi = stacksize // for mstart
-
- // Tell the pthread library we won't join with this thread.
- if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
- writeErrStr(failthreadcreate)
- exit(1)
- }
-
- // Finally, create the thread. It starts at mstart_stub, which does some low-level
- // setup and then calls mstart.
- var oset sigset
- sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- err := retryOnEAGAIN(func() int32 {
- return pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp))
- })
- sigprocmask(_SIG_SETMASK, &oset, nil)
- if err != 0 {
- writeErrStr(failthreadcreate)
- exit(1)
- }
-
- pthread_attr_destroy(&attr)
-}
diff --git a/src/runtime/os_openbsd_mips64.go b/src/runtime/os_openbsd_mips64.go
deleted file mode 100644
index e5eeb2dcd1..0000000000
--- a/src/runtime/os_openbsd_mips64.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-//go:nosplit
-func cputicks() int64 {
- // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
- return nanotime()
-}
diff --git a/src/runtime/os_openbsd_syscall.go b/src/runtime/os_openbsd_syscall.go
deleted file mode 100644
index d784f76475..0000000000
--- a/src/runtime/os_openbsd_syscall.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build openbsd && mips64
-
-package runtime
-
-import (
- "internal/abi"
- "internal/goarch"
- "unsafe"
-)
-
-//go:noescape
-func tfork(param *tforkt, psize uintptr, mm *m, gg *g, fn uintptr) int32
-
-// May run with m.p==nil, so write barriers are not allowed.
-//
-//go:nowritebarrier
-func newosproc(mp *m) {
- stk := unsafe.Pointer(mp.g0.stack.hi)
- if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
- }
-
- // Stack pointer must point inside stack area (as marked with MAP_STACK),
- // rather than at the top of it.
- param := tforkt{
- tf_tcb: unsafe.Pointer(&mp.tls[0]),
- tf_tid: nil, // minit will record tid
- tf_stack: uintptr(stk) - goarch.PtrSize,
- }
-
- var oset sigset
- sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret := retryOnEAGAIN(func() int32 {
- errno := tfork(&param, unsafe.Sizeof(param), mp, mp.g0, abi.FuncPCABI0(mstart))
- // tfork returns negative errno
- return -errno
- })
- sigprocmask(_SIG_SETMASK, &oset, nil)
-
- if ret != 0 {
- print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", ret, ")\n")
- if ret == _EAGAIN {
- println("runtime: may need to increase max user processes (ulimit -p)")
- }
- throw("runtime.newosproc")
- }
-}
diff --git a/src/runtime/os_openbsd_syscall1.go b/src/runtime/os_openbsd_syscall1.go
deleted file mode 100644
index d32894ba6a..0000000000
--- a/src/runtime/os_openbsd_syscall1.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build openbsd && mips64
-
-package runtime
-
-//go:noescape
-func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32
-
-//go:noescape
-func thrwakeup(ident uintptr, n int32) int32
-
-func osyield()
-
-//go:nosplit
-func osyield_no_g() {
- osyield()
-}
diff --git a/src/runtime/os_openbsd_syscall2.go b/src/runtime/os_openbsd_syscall2.go
deleted file mode 100644
index 072f53320d..0000000000
--- a/src/runtime/os_openbsd_syscall2.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build openbsd && mips64
-
-package runtime
-
-import (
- "internal/runtime/atomic"
- "unsafe"
-)
-
-//go:noescape
-func sigaction(sig uint32, new, old *sigactiont)
-
-func kqueue() int32
-
-//go:noescape
-func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
-
-func raiseproc(sig uint32)
-
-func getthrid() int32
-func thrkill(tid int32, sig int)
-
-// read calls the read system call.
-// It returns a non-negative number of bytes written or a negative errno value.
-func read(fd int32, p unsafe.Pointer, n int32) int32
-
-func closefd(fd int32) int32
-
-func exit(code int32)
-func usleep(usec uint32)
-
-//go:nosplit
-func usleep_no_g(usec uint32) {
- usleep(usec)
-}
-
-// write1 calls the write system call.
-// It returns a non-negative number of bytes written or a negative errno value.
-//
-//go:noescape
-func write1(fd uintptr, p unsafe.Pointer, n int32) int32
-
-//go:noescape
-func open(name *byte, mode, perm int32) int32
-
-// return value is only set on linux to be used in osinit().
-func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
-
-// exitThread terminates the current thread, writing *wait = freeMStack when
-// the stack is safe to reclaim.
-//
-//go:noescape
-func exitThread(wait *atomic.Uint32)
-
-//go:noescape
-func obsdsigprocmask(how int32, new sigset) sigset
-
-//go:nosplit
-//go:nowritebarrierrec
-func sigprocmask(how int32, new, old *sigset) {
- n := sigset(0)
- if new != nil {
- n = *new
- }
- r := obsdsigprocmask(how, n)
- if old != nil {
- *old = r
- }
-}
-
-func pipe2(flags int32) (r, w int32, errno int32)
-
-//go:noescape
-func setitimer(mode int32, new, old *itimerval)
-
-//go:noescape
-func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-
-// mmap calls the mmap system call. It is implemented in assembly.
-// We only pass the lower 32 bits of file offset to the
-// assembly routine; the higher bits (if required), should be provided
-// by the assembly routine as 0.
-// The err result is an OS error code such as ENOMEM.
-func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)
-
-// munmap calls the munmap system call. It is implemented in assembly.
-func munmap(addr unsafe.Pointer, n uintptr)
-
-func nanotime1() int64
-
-//go:noescape
-func sigaltstack(new, old *stackt)
-
-func fcntl(fd, cmd, arg int32) (ret int32, errno int32)
-
-func walltime() (sec int64, nsec int32)
-
-func issetugid() int32
diff --git a/src/runtime/os_solaris.go b/src/runtime/os_solaris.go
index 5f6163f131..42b7e4a6bc 100644
--- a/src/runtime/os_solaris.go
+++ b/src/runtime/os_solaris.go
@@ -21,9 +21,8 @@ type mscratch struct {
type mOS struct {
waitsema uintptr // semaphore for parking on locks
perrno *int32 // pointer to tls errno
- // these are here because they are too large to be on the stack
- // of low-level NOSPLIT functions.
- //LibCall libcall;
+ // This is here to avoid using the G stack so the stack can move during the call.
+ libcall libcall
ts mts
scratch mscratch
}
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index 8f77cd50f8..ab4e165bae 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -8,6 +8,7 @@ import (
"internal/abi"
"internal/runtime/atomic"
"internal/runtime/sys"
+ "internal/runtime/syscall/windows"
"unsafe"
)
@@ -40,7 +41,6 @@ const (
//go:cgo_import_dynamic runtime._GetThreadContext GetThreadContext%2 "kernel32.dll"
//go:cgo_import_dynamic runtime._SetThreadContext SetThreadContext%2 "kernel32.dll"
//go:cgo_import_dynamic runtime._LoadLibraryExW LoadLibraryExW%3 "kernel32.dll"
-//go:cgo_import_dynamic runtime._LoadLibraryW LoadLibraryW%1 "kernel32.dll"
//go:cgo_import_dynamic runtime._PostQueuedCompletionStatus PostQueuedCompletionStatus%4 "kernel32.dll"
//go:cgo_import_dynamic runtime._QueryPerformanceCounter QueryPerformanceCounter%1 "kernel32.dll"
//go:cgo_import_dynamic runtime._QueryPerformanceFrequency QueryPerformanceFrequency%1 "kernel32.dll"
@@ -98,7 +98,6 @@ var (
_GetThreadContext,
_SetThreadContext,
_LoadLibraryExW,
- _LoadLibraryW,
_PostQueuedCompletionStatus,
_QueryPerformanceCounter,
_QueryPerformanceFrequency,
@@ -160,6 +159,9 @@ func tstart_stdcall(newm *m)
func wintls()
type mOS struct {
+ // This is here to avoid using the G stack so the stack can move during the call.
+ stdCallInfo windows.StdCallInfo
+
threadLock mutex // protects "thread" and prevents closing
thread uintptr // thread handle
@@ -210,19 +212,15 @@ func read(fd int32, p unsafe.Pointer, n int32) int32 {
type sigset struct{}
-// Call a Windows function with stdcall conventions,
-// and switch to os stack during the call.
-func asmstdcall(fn unsafe.Pointer)
-
var asmstdcallAddr unsafe.Pointer
-type winlibcall libcall
+type winlibcall windows.StdCallInfo
func windowsFindfunc(lib uintptr, name []byte) stdFunction {
if name[len(name)-1] != 0 {
throw("usage")
}
- f := stdcall2(_GetProcAddress, lib, uintptr(unsafe.Pointer(&name[0])))
+ f := stdcall(_GetProcAddress, lib, uintptr(unsafe.Pointer(&name[0])))
return stdFunction(unsafe.Pointer(f))
}
@@ -231,7 +229,7 @@ var sysDirectory [_MAX_PATH + 1]byte
var sysDirectoryLen uintptr
func initSysDirectory() {
- l := stdcall2(_GetSystemDirectoryA, uintptr(unsafe.Pointer(&sysDirectory[0])), uintptr(len(sysDirectory)-1))
+ l := stdcall(_GetSystemDirectoryA, uintptr(unsafe.Pointer(&sysDirectory[0])), uintptr(len(sysDirectory)-1))
if l == 0 || l > uintptr(len(sysDirectory)-1) {
throw("Unable to determine system directory")
}
@@ -245,20 +243,21 @@ func windows_GetSystemDirectory() string {
}
func windowsLoadSystemLib(name []uint16) uintptr {
- return stdcall3(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
+ const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
+ return stdcall(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
}
//go:linkname windows_QueryPerformanceCounter internal/syscall/windows.QueryPerformanceCounter
func windows_QueryPerformanceCounter() int64 {
var counter int64
- stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
+ stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
return counter
}
//go:linkname windows_QueryPerformanceFrequency internal/syscall/windows.QueryPerformanceFrequency
func windows_QueryPerformanceFrequency() int64 {
var frequency int64
- stdcall1(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency)))
+ stdcall(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency)))
return frequency
}
@@ -309,7 +308,7 @@ func monitorSuspendResume() {
var fn any = func(context uintptr, changeType uint32, setting uintptr) uintptr {
for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink {
if mp.resumesema != 0 {
- stdcall1(_SetEvent, mp.resumesema)
+ stdcall(_SetEvent, mp.resumesema)
}
}
return 0
@@ -318,13 +317,13 @@ func monitorSuspendResume() {
callback: compileCallback(*efaceOf(&fn), true),
}
handle := uintptr(0)
- stdcall3(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
+ stdcall(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK,
uintptr(unsafe.Pointer(&params)), uintptr(unsafe.Pointer(&handle)))
}
func getCPUCount() int32 {
var mask, sysmask uintptr
- ret := stdcall3(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
+ ret := stdcall(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
if ret != 0 {
n := 0
maskbits := int(unsafe.Sizeof(mask) * 8)
@@ -339,13 +338,13 @@ func getCPUCount() int32 {
}
// use GetSystemInfo if GetProcessAffinityMask fails
var info systeminfo
- stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
+ stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
return int32(info.dwnumberofprocessors)
}
func getPageSize() uintptr {
var info systeminfo
- stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
+ stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
return uintptr(info.dwpagesize)
}
@@ -384,9 +383,9 @@ func osRelax(relax bool) uint32 {
}
if relax {
- return uint32(stdcall1(_timeEndPeriod, 1))
+ return uint32(stdcall(_timeEndPeriod, 1))
} else {
- return uint32(stdcall1(_timeBeginPeriod, 1))
+ return uint32(stdcall(_timeBeginPeriod, 1))
}
}
@@ -415,7 +414,7 @@ func createHighResTimer() uintptr {
_TIMER_QUERY_STATE = 0x0001
_TIMER_MODIFY_STATE = 0x0002
)
- return stdcall4(_CreateWaitableTimerExW, 0, 0,
+ return stdcall(_CreateWaitableTimerExW, 0, 0,
_CREATE_WAITABLE_TIMER_HIGH_RESOLUTION,
_SYNCHRONIZE|_TIMER_QUERY_STATE|_TIMER_MODIFY_STATE)
}
@@ -425,7 +424,7 @@ func initHighResTimer() {
if h != 0 {
haveHighResTimer = true
haveHighResSleep = _NtCreateWaitCompletionPacket != nil
- stdcall1(_CloseHandle, h)
+ stdcall(_CloseHandle, h)
} else {
// Only load winmm.dll if we need it.
// This avoids a dependency on winmm.dll for Go programs
@@ -457,7 +456,7 @@ func initLongPathSupport() {
// Check that we're ≥ 10.0.15063.
info := _OSVERSIONINFOW{}
info.osVersionInfoSize = uint32(unsafe.Sizeof(info))
- stdcall1(_RtlGetVersion, uintptr(unsafe.Pointer(&info)))
+ stdcall(_RtlGetVersion, uintptr(unsafe.Pointer(&info)))
if info.majorVersion < 10 || (info.majorVersion == 10 && info.minorVersion == 0 && info.buildNumber < 15063) {
return
}
@@ -465,14 +464,14 @@ func initLongPathSupport() {
// Set the IsLongPathAwareProcess flag of the PEB's bit field.
// This flag is not documented, but it's known to be used
// by Windows to enable long path support.
- bitField := (*byte)(unsafe.Pointer(stdcall0(_RtlGetCurrentPeb) + PebBitFieldOffset))
+ bitField := (*byte)(unsafe.Pointer(stdcall(_RtlGetCurrentPeb) + PebBitFieldOffset))
*bitField |= IsLongPathAwareProcess
canUseLongPaths = true
}
func osinit() {
- asmstdcallAddr = unsafe.Pointer(abi.FuncPCABI0(asmstdcall))
+ asmstdcallAddr = unsafe.Pointer(windows.AsmStdCallAddr())
loadOptionalSyscalls()
@@ -494,13 +493,13 @@ func osinit() {
// of dedicated threads -- GUI, IO, computational, etc. Go processes use
// equivalent threads that all do a mix of GUI, IO, computations, etc.
// In such context dynamic priority boosting does nothing but harm, so we turn it off.
- stdcall2(_SetProcessPriorityBoost, currentProcess, 1)
+ stdcall(_SetProcessPriorityBoost, currentProcess, 1)
}
//go:nosplit
func readRandom(r []byte) int {
n := 0
- if stdcall2(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
+ if stdcall(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 {
n = len(r)
}
return n
@@ -510,7 +509,7 @@ func goenvs() {
// strings is a pointer to environment variable pairs in the form:
// "envA=valA\x00envB=valB\x00\x00" (in UTF-16)
// Two consecutive zero bytes end the list.
- strings := unsafe.Pointer(stdcall0(_GetEnvironmentStringsW))
+ strings := unsafe.Pointer(stdcall(_GetEnvironmentStringsW))
p := (*[1 << 24]uint16)(strings)[:]
n := 0
@@ -534,13 +533,13 @@ func goenvs() {
p = p[1:] // skip nil byte
}
- stdcall1(_FreeEnvironmentStringsW, uintptr(strings))
+ stdcall(_FreeEnvironmentStringsW, uintptr(strings))
// We call these all the way here, late in init, so that malloc works
// for the callback functions these generate.
var fn any = ctrlHandler
ctrlHandlerPC := compileCallback(*efaceOf(&fn), true)
- stdcall2(_SetConsoleCtrlHandler, ctrlHandlerPC, 1)
+ stdcall(_SetConsoleCtrlHandler, ctrlHandlerPC, 1)
monitorSuspendResume()
}
@@ -556,7 +555,7 @@ func exit(code int32) {
// kills the suspending thread, and then this thread suspends.
lock(&suspendLock)
atomic.Store(&exiting, 1)
- stdcall1(_ExitProcess, uintptr(code))
+ stdcall(_ExitProcess, uintptr(code))
}
// write1 must be nosplit because it's used as a last resort in
@@ -572,9 +571,9 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
var handle uintptr
switch fd {
case 1:
- handle = stdcall1(_GetStdHandle, _STD_OUTPUT_HANDLE)
+ handle = stdcall(_GetStdHandle, _STD_OUTPUT_HANDLE)
case 2:
- handle = stdcall1(_GetStdHandle, _STD_ERROR_HANDLE)
+ handle = stdcall(_GetStdHandle, _STD_ERROR_HANDLE)
default:
// assume fd is real windows handle.
handle = fd
@@ -590,7 +589,7 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
if !isASCII {
var m uint32
- isConsole := stdcall2(_GetConsoleMode, handle, uintptr(unsafe.Pointer(&m))) != 0
+ isConsole := stdcall(_GetConsoleMode, handle, uintptr(unsafe.Pointer(&m))) != 0
// If this is a console output, various non-unicode code pages can be in use.
// Use the dedicated WriteConsole call to ensure unicode is printed correctly.
if isConsole {
@@ -598,7 +597,7 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
}
}
var written uint32
- stdcall5(_WriteFile, handle, uintptr(buf), uintptr(n), uintptr(unsafe.Pointer(&written)), 0)
+ stdcall(_WriteFile, handle, uintptr(buf), uintptr(n), uintptr(unsafe.Pointer(&written)), 0)
return int32(written)
}
@@ -651,7 +650,7 @@ func writeConsoleUTF16(handle uintptr, b []uint16) {
return
}
var written uint32
- stdcall5(_WriteConsoleW,
+ stdcall(_WriteConsoleW,
handle,
uintptr(unsafe.Pointer(&b[0])),
uintptr(l),
@@ -672,7 +671,7 @@ func semasleep(ns int64) int32 {
var result uintptr
if ns < 0 {
- result = stdcall2(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE))
+ result = stdcall(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE))
} else {
start := nanotime()
elapsed := int64(0)
@@ -681,7 +680,7 @@ func semasleep(ns int64) int32 {
if ms == 0 {
ms = 1
}
- result = stdcall4(_WaitForMultipleObjects, 2,
+ result = stdcall(_WaitForMultipleObjects, 2,
uintptr(unsafe.Pointer(&[2]uintptr{getg().m.waitsema, getg().m.resumesema})),
0, uintptr(ms))
if result != _WAIT_OBJECT_0+1 {
@@ -724,7 +723,7 @@ func semasleep(ns int64) int32 {
//go:nosplit
func semawakeup(mp *m) {
- if stdcall1(_SetEvent, mp.waitsema) == 0 {
+ if stdcall(_SetEvent, mp.waitsema) == 0 {
systemstack(func() {
print("runtime: setevent failed; errno=", getlasterror(), "\n")
throw("runtime.semawakeup")
@@ -737,20 +736,20 @@ func semacreate(mp *m) {
if mp.waitsema != 0 {
return
}
- mp.waitsema = stdcall4(_CreateEventA, 0, 0, 0, 0)
+ mp.waitsema = stdcall(_CreateEventA, 0, 0, 0, 0)
if mp.waitsema == 0 {
systemstack(func() {
print("runtime: createevent failed; errno=", getlasterror(), "\n")
throw("runtime.semacreate")
})
}
- mp.resumesema = stdcall4(_CreateEventA, 0, 0, 0, 0)
+ mp.resumesema = stdcall(_CreateEventA, 0, 0, 0, 0)
if mp.resumesema == 0 {
systemstack(func() {
print("runtime: createevent failed; errno=", getlasterror(), "\n")
throw("runtime.semacreate")
})
- stdcall1(_CloseHandle, mp.waitsema)
+ stdcall(_CloseHandle, mp.waitsema)
mp.waitsema = 0
}
}
@@ -763,7 +762,7 @@ func semacreate(mp *m) {
//go:nosplit
func newosproc(mp *m) {
// We pass 0 for the stack size to use the default for this binary.
- thandle := stdcall6(_CreateThread, 0, 0,
+ thandle := stdcall(_CreateThread, 0, 0,
abi.FuncPCABI0(tstart_stdcall), uintptr(unsafe.Pointer(mp)),
0, 0)
@@ -781,7 +780,7 @@ func newosproc(mp *m) {
}
// Close thandle to avoid leaking the thread object if it exits.
- stdcall1(_CloseHandle, thandle)
+ stdcall(_CloseHandle, thandle)
}
// Used by the C library build mode. On Linux this function would allocate a
@@ -829,7 +828,7 @@ func sigblock(exiting bool) {
// Called on the new thread, cannot allocate Go memory.
func minit() {
var thandle uintptr
- if stdcall7(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
+ if stdcall(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
print("runtime.minit: duplicatehandle failed; errno=", getlasterror(), "\n")
throw("runtime.minit: duplicatehandle failed")
}
@@ -837,7 +836,7 @@ func minit() {
mp := getg().m
lock(&mp.threadLock)
mp.thread = thandle
- mp.procid = uint64(stdcall0(_GetCurrentThreadId))
+ mp.procid = uint64(stdcall(_GetCurrentThreadId))
// Configure usleep timer, if possible.
if mp.highResTimer == 0 && haveHighResTimer {
@@ -854,7 +853,7 @@ func minit() {
throw("CreateWaitableTimerEx when creating timer failed")
}
const GENERIC_ALL = 0x10000000
- errno := stdcall3(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0)
+ errno := stdcall(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0)
if mp.waitIocpHandle == 0 {
print("runtime: NtCreateWaitCompletionPacket failed; errno=", errno, "\n")
throw("NtCreateWaitCompletionPacket failed")
@@ -865,7 +864,7 @@ func minit() {
// Query the true stack base from the OS. Currently we're
// running on a small assumed stack.
var mbi memoryBasicInformation
- res := stdcall3(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi))
+ res := stdcall(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi))
if res == 0 {
print("runtime: VirtualQuery failed; errno=", getlasterror(), "\n")
throw("VirtualQuery for stack base failed")
@@ -897,7 +896,7 @@ func unminit() {
mp := getg().m
lock(&mp.threadLock)
if mp.thread != 0 {
- stdcall1(_CloseHandle, mp.thread)
+ stdcall(_CloseHandle, mp.thread)
mp.thread = 0
}
unlock(&mp.threadLock)
@@ -909,56 +908,64 @@ func unminit() {
// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
//
// This always runs without a P, so //go:nowritebarrierrec is required.
+//
//go:nowritebarrierrec
//go:nosplit
func mdestroy(mp *m) {
if mp.highResTimer != 0 {
- stdcall1(_CloseHandle, mp.highResTimer)
+ stdcall(_CloseHandle, mp.highResTimer)
mp.highResTimer = 0
}
if mp.waitIocpTimer != 0 {
- stdcall1(_CloseHandle, mp.waitIocpTimer)
+ stdcall(_CloseHandle, mp.waitIocpTimer)
mp.waitIocpTimer = 0
}
if mp.waitIocpHandle != 0 {
- stdcall1(_CloseHandle, mp.waitIocpHandle)
+ stdcall(_CloseHandle, mp.waitIocpHandle)
mp.waitIocpHandle = 0
}
if mp.waitsema != 0 {
- stdcall1(_CloseHandle, mp.waitsema)
+ stdcall(_CloseHandle, mp.waitsema)
mp.waitsema = 0
}
if mp.resumesema != 0 {
- stdcall1(_CloseHandle, mp.resumesema)
+ stdcall(_CloseHandle, mp.resumesema)
mp.resumesema = 0
}
}
-// asmstdcall_trampoline calls asmstdcall converting from Go to C calling convention.
-func asmstdcall_trampoline(args unsafe.Pointer)
-
-// stdcall_no_g calls asmstdcall on os stack without using g.
+// stdcall_no_g is like [stdcall] but can be called without a G.
//
+//go:nowritebarrier
//go:nosplit
-func stdcall_no_g(fn stdFunction, n int, args uintptr) uintptr {
- libcall := libcall{
- fn: uintptr(unsafe.Pointer(fn)),
- n: uintptr(n),
- args: args,
+//go:uintptrkeepalive
+func stdcall_no_g(fn stdFunction, args ...uintptr) uintptr {
+ call := windows.StdCallInfo{
+ Fn: uintptr(unsafe.Pointer(fn)),
+ N: uintptr(len(args)),
+ }
+ if len(args) > 0 {
+ call.Args = uintptr(abi.NoEscape(unsafe.Pointer(&args[0])))
}
- asmstdcall_trampoline(noescape(unsafe.Pointer(&libcall)))
- return libcall.r1
+ windows.StdCall(&call)
+ return call.R1
}
-// Calling stdcall on os stack.
+// stdcall calls fn with the given arguments using the stdcall calling convention.
+// Must be called from the system stack.
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrier
//go:nosplit
-func stdcall(fn stdFunction) uintptr {
+//go:uintptrkeepalive
+func stdcall(fn stdFunction, args ...uintptr) uintptr {
gp := getg()
mp := gp.m
- mp.libcall.fn = uintptr(unsafe.Pointer(fn))
+ mp.stdCallInfo.Fn = uintptr(unsafe.Pointer(fn))
+ mp.stdCallInfo.N = uintptr(len(args))
+ if len(args) > 0 {
+ mp.stdCallInfo.Args = uintptr(abi.NoEscape(unsafe.Pointer(&args[0])))
+ }
resetLibcall := false
if mp.profilehz != 0 && mp.libcallsp == 0 {
// leave pc/sp for cpu profiler
@@ -969,112 +976,31 @@ func stdcall(fn stdFunction) uintptr {
mp.libcallsp = sys.GetCallerSP()
resetLibcall = true // See comment in sys_darwin.go:libcCall
}
- asmcgocall(asmstdcallAddr, unsafe.Pointer(&mp.libcall))
+ asmcgocall(asmstdcallAddr, unsafe.Pointer(&mp.stdCallInfo))
if resetLibcall {
mp.libcallsp = 0
}
- return mp.libcall.r1
-}
-
-//go:nosplit
-func stdcall0(fn stdFunction) uintptr {
- mp := getg().m
- mp.libcall.n = 0
- mp.libcall.args = 0
- return stdcall(fn)
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func stdcall1(fn stdFunction, a0 uintptr) uintptr {
- mp := getg().m
- mp.libcall.n = 1
- mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0)))
- return stdcall(fn)
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func stdcall2(fn stdFunction, a0, a1 uintptr) uintptr {
- mp := getg().m
- mp.libcall.n = 2
- mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0)))
- return stdcall(fn)
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func stdcall3(fn stdFunction, a0, a1, a2 uintptr) uintptr {
- mp := getg().m
- mp.libcall.n = 3
- mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0)))
- return stdcall(fn)
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func stdcall4(fn stdFunction, a0, a1, a2, a3 uintptr) uintptr {
- mp := getg().m
- mp.libcall.n = 4
- mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0)))
- return stdcall(fn)
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func stdcall5(fn stdFunction, a0, a1, a2, a3, a4 uintptr) uintptr {
- mp := getg().m
- mp.libcall.n = 5
- mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0)))
- return stdcall(fn)
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func stdcall6(fn stdFunction, a0, a1, a2, a3, a4, a5 uintptr) uintptr {
- mp := getg().m
- mp.libcall.n = 6
- mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0)))
- return stdcall(fn)
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func stdcall7(fn stdFunction, a0, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
- mp := getg().m
- mp.libcall.n = 7
- mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0)))
- return stdcall(fn)
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func stdcall8(fn stdFunction, a0, a1, a2, a3, a4, a5, a6, a7 uintptr) uintptr {
- mp := getg().m
- mp.libcall.n = 8
- mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0)))
- return stdcall(fn)
+ return mp.stdCallInfo.R1
}
// These must run on the system stack only.
//go:nosplit
func osyield_no_g() {
- stdcall_no_g(_SwitchToThread, 0, 0)
+ stdcall_no_g(_SwitchToThread)
}
//go:nosplit
func osyield() {
systemstack(func() {
- stdcall0(_SwitchToThread)
+ stdcall(_SwitchToThread)
})
}
//go:nosplit
func usleep_no_g(us uint32) {
timeout := uintptr(us) / 1000 // ms units
- args := [...]uintptr{_INVALID_HANDLE_VALUE, timeout}
- stdcall_no_g(_WaitForSingleObject, len(args), uintptr(noescape(unsafe.Pointer(&args[0]))))
+ stdcall_no_g(_WaitForSingleObject, _INVALID_HANDLE_VALUE, timeout)
}
//go:nosplit
@@ -1086,13 +1012,13 @@ func usleep(us uint32) {
if haveHighResTimer && getg().m.highResTimer != 0 {
h = getg().m.highResTimer
dt := -10 * int64(us) // relative sleep (negative), 100ns units
- stdcall6(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0)
+ stdcall(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0)
timeout = _INFINITE
} else {
h = _INVALID_HANDLE_VALUE
timeout = uintptr(us) / 1000 // ms units
}
- stdcall2(_WaitForSingleObject, h, timeout)
+ stdcall(_WaitForSingleObject, h, timeout)
})
}
@@ -1133,7 +1059,7 @@ func profilem(mp *m, thread uintptr) {
c = (*context)(unsafe.Pointer((uintptr(unsafe.Pointer(&cbuf[15]))) &^ 15))
c.contextflags = _CONTEXT_CONTROL
- stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
+ stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
gp := gFromSP(mp, c.sp())
@@ -1154,10 +1080,10 @@ func gFromSP(mp *m, sp uintptr) *g {
}
func profileLoop() {
- stdcall2(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)
+ stdcall(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)
for {
- stdcall2(_WaitForSingleObject, profiletimer, _INFINITE)
+ stdcall(_WaitForSingleObject, profiletimer, _INFINITE)
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink {
if mp == getg().m {
@@ -1175,7 +1101,7 @@ func profileLoop() {
}
// Acquire our own handle to the thread.
var thread uintptr
- if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
+ if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
print("runtime: duplicatehandle failed; errno=", getlasterror(), "\n")
throw("duplicatehandle failed")
}
@@ -1185,9 +1111,9 @@ func profileLoop() {
// above and the SuspendThread. The handle
// will remain valid, but SuspendThread may
// fail.
- if int32(stdcall1(_SuspendThread, thread)) == -1 {
+ if int32(stdcall(_SuspendThread, thread)) == -1 {
// The thread no longer exists.
- stdcall1(_CloseHandle, thread)
+ stdcall(_CloseHandle, thread)
continue
}
if mp.profilehz != 0 && !mp.blocked {
@@ -1195,8 +1121,8 @@ func profileLoop() {
// was in the process of shutting down.
profilem(mp, thread)
}
- stdcall1(_ResumeThread, thread)
- stdcall1(_CloseHandle, thread)
+ stdcall(_ResumeThread, thread)
+ stdcall(_CloseHandle, thread)
}
}
}
@@ -1207,7 +1133,7 @@ func setProcessCPUProfiler(hz int32) {
if haveHighResTimer {
timer = createHighResTimer()
} else {
- timer = stdcall3(_CreateWaitableTimerA, 0, 0, 0)
+ timer = stdcall(_CreateWaitableTimerA, 0, 0, 0)
}
atomic.Storeuintptr(&profiletimer, timer)
newm(profileLoop, nil, -1)
@@ -1224,7 +1150,7 @@ func setThreadCPUProfiler(hz int32) {
}
due = int64(ms) * -10000
}
- stdcall6(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0)
+ stdcall(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0)
atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz))
}
@@ -1257,7 +1183,7 @@ func preemptM(mp *m) {
return
}
var thread uintptr
- if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
+ if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
print("runtime.preemptM: duplicatehandle failed; errno=", getlasterror(), "\n")
throw("runtime.preemptM: duplicatehandle failed")
}
@@ -1277,9 +1203,9 @@ func preemptM(mp *m) {
lock(&suspendLock)
// Suspend the thread.
- if int32(stdcall1(_SuspendThread, thread)) == -1 {
+ if int32(stdcall(_SuspendThread, thread)) == -1 {
unlock(&suspendLock)
- stdcall1(_CloseHandle, thread)
+ stdcall(_CloseHandle, thread)
atomic.Store(&mp.preemptExtLock, 0)
// The thread no longer exists. This shouldn't be
// possible, but just acknowledge the request.
@@ -1296,7 +1222,7 @@ func preemptM(mp *m) {
// We have to get the thread context before inspecting the M
// because SuspendThread only requests a suspend.
// GetThreadContext actually blocks until it's suspended.
- stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
+ stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
unlock(&suspendLock)
@@ -1307,7 +1233,7 @@ func preemptM(mp *m) {
// Inject call to asyncPreempt
targetPC := abi.FuncPCABI0(asyncPreempt)
c.pushCall(targetPC, resumePC)
- stdcall2(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
+ stdcall(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
}
}
@@ -1316,8 +1242,8 @@ func preemptM(mp *m) {
// Acknowledge the preemption.
mp.preemptGen.Add(1)
- stdcall1(_ResumeThread, thread)
- stdcall1(_CloseHandle, thread)
+ stdcall(_ResumeThread, thread)
+ stdcall(_CloseHandle, thread)
}
// osPreemptExtEnter is called before entering external code that may
diff --git a/src/runtime/os_windows_arm.go b/src/runtime/os_windows_arm.go
index 10aff75e31..bc29843241 100644
--- a/src/runtime/os_windows_arm.go
+++ b/src/runtime/os_windows_arm.go
@@ -9,7 +9,7 @@ import "unsafe"
//go:nosplit
func cputicks() int64 {
var counter int64
- stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
+ stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
return counter
}
diff --git a/src/runtime/os_windows_arm64.go b/src/runtime/os_windows_arm64.go
index 7e413445ba..bd80c08b0e 100644
--- a/src/runtime/os_windows_arm64.go
+++ b/src/runtime/os_windows_arm64.go
@@ -9,6 +9,6 @@ import "unsafe"
//go:nosplit
func cputicks() int64 {
var counter int64
- stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
+ stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
return counter
}
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 95305b84bc..8f9ab4dd47 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -112,13 +112,13 @@ func panicCheck2(err string) {
//go:yeswritebarrierrec
func goPanicIndex(x int, y int) {
panicCheck1(sys.GetCallerPC(), "index out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
+ panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsIndex})
}
//go:yeswritebarrierrec
func goPanicIndexU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "index out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
+ panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsIndex})
}
// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
@@ -126,25 +126,25 @@ func goPanicIndexU(x uint, y int) {
//go:yeswritebarrierrec
func goPanicSliceAlen(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
+ panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceAlen})
}
//go:yeswritebarrierrec
func goPanicSliceAlenU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
+ panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceAlen})
}
//go:yeswritebarrierrec
func goPanicSliceAcap(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
+ panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceAcap})
}
//go:yeswritebarrierrec
func goPanicSliceAcapU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
+ panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceAcap})
}
// failures in the comparisons for s[x:y], 0 <= x <= y
@@ -152,57 +152,57 @@ func goPanicSliceAcapU(x uint, y int) {
//go:yeswritebarrierrec
func goPanicSliceB(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
+ panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceB})
}
//go:yeswritebarrierrec
func goPanicSliceBU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
+ panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceB})
}
// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicSlice3Alen(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
+ panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3Alen})
}
func goPanicSlice3AlenU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
+ panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3Alen})
}
func goPanicSlice3Acap(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
+ panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3Acap})
}
func goPanicSlice3AcapU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
+ panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3Acap})
}
// failures in the comparisons for s[:x:y], 0 <= x <= y
func goPanicSlice3B(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
+ panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3B})
}
func goPanicSlice3BU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
+ panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3B})
}
// failures in the comparisons for s[x:y:], 0 <= x <= y
func goPanicSlice3C(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
+ panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3C})
}
func goPanicSlice3CU(x uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
+ panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3C})
}
// failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
func goPanicSliceConvert(x int, y int) {
panicCheck1(sys.GetCallerPC(), "slice length too short to convert to array or pointer to array")
- panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
+ panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsConvert})
}
// Implemented in assembly, as they take arguments in registers.
@@ -225,6 +225,99 @@ func panicSlice3C(x int, y int)
func panicSlice3CU(x uint, y int)
func panicSliceConvert(x int, y int)
+func panicBounds() // in asm_GOARCH.s files, called from generated code
+func panicExtend() // in asm_GOARCH.s files, called from generated code (on 32-bit archs)
+func panicBounds64(pc uintptr, regs *[16]int64) { // called from panicBounds on 64-bit archs
+ f := findfunc(pc)
+ v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
+
+ code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v))
+
+ if code == abi.BoundsIndex {
+ panicCheck1(pc, "index out of range")
+ } else {
+ panicCheck1(pc, "slice bounds out of range")
+ }
+
+ var e boundsError
+ e.code = code
+ e.signed = signed
+ if xIsReg {
+ e.x = regs[xVal]
+ } else {
+ e.x = int64(xVal)
+ }
+ if yIsReg {
+ e.y = int(regs[yVal])
+ } else {
+ e.y = yVal
+ }
+ panic(e)
+}
+
+func panicBounds32(pc uintptr, regs *[16]int32) { // called from panicBounds on 32-bit archs
+ f := findfunc(pc)
+ v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
+
+ code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v))
+
+ if code == abi.BoundsIndex {
+ panicCheck1(pc, "index out of range")
+ } else {
+ panicCheck1(pc, "slice bounds out of range")
+ }
+
+ var e boundsError
+ e.code = code
+ e.signed = signed
+ if xIsReg {
+ if signed {
+ e.x = int64(regs[xVal])
+ } else {
+ e.x = int64(uint32(regs[xVal]))
+ }
+ } else {
+ e.x = int64(xVal)
+ }
+ if yIsReg {
+ e.y = int(regs[yVal])
+ } else {
+ e.y = yVal
+ }
+ panic(e)
+}
+
+func panicBounds32X(pc uintptr, regs *[16]int32) { // called from panicExtend on 32-bit archs
+ f := findfunc(pc)
+ v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1)
+
+ code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v))
+
+ if code == abi.BoundsIndex {
+ panicCheck1(pc, "index out of range")
+ } else {
+ panicCheck1(pc, "slice bounds out of range")
+ }
+
+ var e boundsError
+ e.code = code
+ e.signed = signed
+ if xIsReg {
+ // Our 4-bit register numbers are actually 2 2-bit register numbers.
+ lo := xVal & 3
+ hi := xVal >> 2
+ e.x = int64(regs[hi])<<32 + int64(uint32(regs[lo]))
+ } else {
+ e.x = int64(xVal)
+ }
+ if yIsReg {
+ e.y = int(regs[yVal])
+ } else {
+ e.y = yVal
+ }
+ panic(e)
+}
+
var shiftError = error(errorString("negative shift amount"))
//go:yeswritebarrierrec
@@ -771,6 +864,7 @@ func gopanic(e any) {
var p _panic
p.arg = e
+ p.gopanicFP = unsafe.Pointer(sys.GetCallerSP())
runningPanicDefers.Add(1)
@@ -865,10 +959,6 @@ func (p *_panic) nextDefer() (func(), bool) {
}
}
- // The assembler adjusts p.argp in wrapper functions that shouldn't
- // be visible to recover(), so we need to restore it each iteration.
- p.argp = add(p.startSP, sys.MinFrameSize)
-
for {
for p.deferBitsPtr != nil {
bits := *p.deferBitsPtr
@@ -993,27 +1083,89 @@ func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool {
}
// The implementation of the predeclared function recover.
-// Cannot split the stack because it needs to reliably
-// find the stack segment of its caller.
-//
-// TODO(rsc): Once we commit to CopyStackAlways,
-// this doesn't need to be nosplit.
-//
-//go:nosplit
-func gorecover(argp uintptr) any {
- // Must be in a function running as part of a deferred call during the panic.
- // Must be called from the topmost function of the call
- // (the function used in the defer statement).
- // p.argp is the argument pointer of that topmost deferred function call.
- // Compare against argp reported by caller.
- // If they match, the caller is the one who can recover.
+func gorecover() any {
gp := getg()
p := gp._panic
- if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) {
- p.recovered = true
- return p.arg
+ if p == nil || p.goexit || p.recovered {
+ return nil
+ }
+
+ // Check to see if the function that called recover() was
+ // deferred directly from the panicking function.
+ // For code like:
+ // func foo() {
+ // defer bar()
+ // panic("panic")
+ // }
+ // func bar() {
+ // recover()
+ // }
+ // Normally the stack would look like this:
+ // foo
+ // runtime.gopanic
+ // bar
+ // runtime.gorecover
+ //
+ // However, if the function we deferred requires a wrapper
+ // of some sort, we need to ignore the wrapper. In that case,
+ // the stack looks like:
+ // foo
+ // runtime.gopanic
+ // wrapper
+ // bar
+ // runtime.gorecover
+ // And we should also successfully recover.
+ //
+ // Finally, in the weird case "defer recover()", the stack looks like:
+ // foo
+ // runtime.gopanic
+ // wrapper
+ // runtime.gorecover
+ // And we should not recover in that case.
+ //
+ // So our criteria is, there must be exactly one non-wrapper
+ // frame between gopanic and gorecover.
+ //
+ // We don't recover this:
+ // defer func() { func() { recover() }() }
+ // because there are 2 non-wrapper frames.
+ //
+ // We don't recover this:
+ // defer recover()
+ // because there are 0 non-wrapper frames.
+ canRecover := false
+ systemstack(func() {
+ var u unwinder
+ u.init(gp, 0)
+ u.next() // skip systemstack_switch
+ u.next() // skip gorecover
+ nonWrapperFrames := 0
+ loop:
+ for ; u.valid(); u.next() {
+ for iu, f := newInlineUnwinder(u.frame.fn, u.symPC()); f.valid(); f = iu.next(f) {
+ sf := iu.srcFunc(f)
+ switch sf.funcID {
+ case abi.FuncIDWrapper:
+ continue
+ case abi.FuncID_gopanic:
+ if u.frame.fp == uintptr(p.gopanicFP) && nonWrapperFrames > 0 {
+ canRecover = true
+ }
+ break loop
+ default:
+ nonWrapperFrames++
+ if nonWrapperFrames > 1 {
+ break loop
+ }
+ }
+ }
+ }
+ })
+ if !canRecover {
+ return nil
}
- return nil
+ p.recovered = true
+ return p.arg
}
//go:linkname sync_throw sync.throw
diff --git a/src/runtime/panic32.go b/src/runtime/panic32.go
index cd34485a96..9dd4c0eb2e 100644
--- a/src/runtime/panic32.go
+++ b/src/runtime/panic32.go
@@ -7,6 +7,7 @@
package runtime
import (
+ "internal/abi"
"internal/runtime/sys"
)
@@ -16,77 +17,77 @@ import (
// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
func goPanicExtendIndex(hi int, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "index out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsIndex})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsIndex})
}
func goPanicExtendIndexU(hi uint, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "index out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsIndex})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsIndex})
}
// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicExtendSliceAlen(hi int, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSliceAlen})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSliceAlen})
}
func goPanicExtendSliceAlenU(hi uint, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSliceAlen})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSliceAlen})
}
func goPanicExtendSliceAcap(hi int, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSliceAcap})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSliceAcap})
}
func goPanicExtendSliceAcapU(hi uint, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSliceAcap})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSliceAcap})
}
// failures in the comparisons for s[x:y], 0 <= x <= y
func goPanicExtendSliceB(hi int, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSliceB})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSliceB})
}
func goPanicExtendSliceBU(hi uint, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSliceB})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSliceB})
}
// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicExtendSlice3Alen(hi int, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3Alen})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSlice3Alen})
}
func goPanicExtendSlice3AlenU(hi uint, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3Alen})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSlice3Alen})
}
func goPanicExtendSlice3Acap(hi int, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3Acap})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSlice3Acap})
}
func goPanicExtendSlice3AcapU(hi uint, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3Acap})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSlice3Acap})
}
// failures in the comparisons for s[:x:y], 0 <= x <= y
func goPanicExtendSlice3B(hi int, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3B})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSlice3B})
}
func goPanicExtendSlice3BU(hi uint, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3B})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSlice3B})
}
// failures in the comparisons for s[x:y:], 0 <= x <= y
func goPanicExtendSlice3C(hi int, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3C})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSlice3C})
}
func goPanicExtendSlice3CU(hi uint, lo uint, y int) {
panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
- panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3C})
+ panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSlice3C})
}
// Implemented in assembly, as they take arguments in registers.
diff --git a/src/runtime/pinner.go b/src/runtime/pinner.go
index 543bfdb7a4..424dd065ef 100644
--- a/src/runtime/pinner.go
+++ b/src/runtime/pinner.go
@@ -108,7 +108,7 @@ func pinnerGetPtr(i *any) unsafe.Pointer {
if etyp == nil {
panic(errorString("runtime.Pinner: argument is nil"))
}
- if kind := etyp.Kind_ & abi.KindMask; kind != abi.Pointer && kind != abi.UnsafePointer {
+ if kind := etyp.Kind(); kind != abi.Pointer && kind != abi.UnsafePointer {
panic(errorString("runtime.Pinner: argument is not a pointer: " + toRType(etyp).string()))
}
if inUserArenaChunk(uintptr(e.data)) {
diff --git a/src/runtime/plugin.go b/src/runtime/plugin.go
index 4b6821b1fb..49cf13cb64 100644
--- a/src/runtime/plugin.go
+++ b/src/runtime/plugin.go
@@ -88,7 +88,7 @@ func plugin_lastmoduleinit() (path string, syms map[string]any, initTasks []*ini
(*valp)[0] = unsafe.Pointer(t)
name := symName.Name()
- if t.Kind_&abi.KindMask == abi.Func {
+ if t.Kind() == abi.Func {
name = "." + name
}
syms[name] = val
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index bee3b26c0e..25d39d9ba3 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -862,10 +862,10 @@ func schedinit() {
ticks.init() // run as early as possible
moduledataverify()
stackinit()
+ randinit() // must run before mallocinit, alginit, mcommoninit
mallocinit()
godebug := getGodebugEarly()
cpuinit(godebug) // must run before alginit
- randinit() // must run before alginit, mcommoninit
alginit() // maps, hash, rand must not be used before this call
mcommoninit(gp.m, -1)
modulesinit() // provides activeModules
@@ -6200,10 +6200,6 @@ func checkdead() {
// This is a variable for testing purposes. It normally doesn't change.
var forcegcperiod int64 = 2 * 60 * 1e9
-// needSysmonWorkaround is true if the workaround for
-// golang.org/issue/42515 is needed on NetBSD.
-var needSysmonWorkaround bool = false
-
// haveSysmon indicates whether there is sysmon thread support.
//
// No threads on wasm yet, so no sysmon.
@@ -6312,26 +6308,6 @@ func sysmon() {
netpollAdjustWaiters(delta)
}
}
- if GOOS == "netbsd" && needSysmonWorkaround {
- // netpoll is responsible for waiting for timer
- // expiration, so we typically don't have to worry
- // about starting an M to service timers. (Note that
- // sleep for timeSleepUntil above simply ensures sysmon
- // starts running again when that timer expiration may
- // cause Go code to run again).
- //
- // However, netbsd has a kernel bug that sometimes
- // misses netpollBreak wake-ups, which can lead to
- // unbounded delays servicing timers. If we detect this
- // overrun, then startm to get something to handle the
- // timer.
- //
- // See issue 42515 and
- // https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094.
- if next := timeSleepUntil(); next < now {
- startm(nil, false, false)
- }
- }
// Check if we need to update GOMAXPROCS at most once per second.
if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
sysmonUpdateGOMAXPROCS()
diff --git a/src/runtime/race.go b/src/runtime/race.go
index 7e7bca76ac..2cd4e3a9a2 100644
--- a/src/runtime/race.go
+++ b/src/runtime/race.go
@@ -156,7 +156,7 @@ const raceenabled = true
// callerpc is a return PC of the function that calls this function,
// pc is start PC of the function that calls this function.
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
- kind := t.Kind_ & abi.KindMask
+ kind := t.Kind()
if kind == abi.Array || kind == abi.Struct {
// for composite objects we have to read every address
// because a write might happen to any subobject.
@@ -174,7 +174,7 @@ func race_ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) {
}
func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
- kind := t.Kind_ & abi.KindMask
+ kind := t.Kind()
if kind == abi.Array || kind == abi.Struct {
// for composite objects we have to write every address
// because a write might happen to any subobject.
diff --git a/src/runtime/race/testdata/rangefunc_test.go b/src/runtime/race/testdata/rangefunc_test.go
index 453c0733ed..986395bfb9 100644
--- a/src/runtime/race/testdata/rangefunc_test.go
+++ b/src/runtime/race/testdata/rangefunc_test.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.rangefunc
-
package race_test
import (
diff --git a/src/runtime/rt0_openbsd_mips64.s b/src/runtime/rt0_openbsd_mips64.s
deleted file mode 100644
index 82a8dfaba6..0000000000
--- a/src/runtime/rt0_openbsd_mips64.s
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "textflag.h"
-
-TEXT _rt0_mips64_openbsd(SB),NOSPLIT,$0
- JMP _main<>(SB)
-
-TEXT _rt0_mips64le_openbsd(SB),NOSPLIT,$0
- JMP _main<>(SB)
-
-TEXT _main<>(SB),NOSPLIT|NOFRAME,$0
- // In a statically linked binary, the stack contains argc,
- // argv as argc string pointers followed by a NULL, envv as a
- // sequence of string pointers followed by a NULL, and auxv.
- // There is no TLS base pointer.
-#ifdef GOARCH_mips64
- MOVW 4(R29), R4 // argc, big-endian ABI places int32 at offset 4
-#else
- MOVW 0(R29), R4 // argc
-#endif
- ADDV $8, R29, R5 // argv
- JMP main(SB)
-
-TEXT main(SB),NOSPLIT|NOFRAME,$0
- // in external linking, glibc jumps to main with argc in R4
- // and argv in R5
-
- // initialize REGSB = PC&0xffffffff00000000
- BGEZAL R0, 1(PC)
- SRLV $32, R31, RSB
- SLLV $32, RSB
-
- MOVV $runtime·rt0_go(SB), R1
- JMP (R1)
diff --git a/src/runtime/runtime-gdb.py b/src/runtime/runtime-gdb.py
index 6d99515176..345a59605e 100644
--- a/src/runtime/runtime-gdb.py
+++ b/src/runtime/runtime-gdb.py
@@ -160,14 +160,7 @@ class MapTypePrinter:
return str(self.val.type)
def children(self):
- fields = [f.name for f in self.val.type.strip_typedefs().target().fields()]
- if 'buckets' in fields:
- yield from self.old_map_children()
- else:
- yield from self.swiss_map_children()
-
- def swiss_map_children(self):
- SwissMapGroupSlots = 8 # see internal/abi:SwissMapGroupSlots
+ MapGroupSlots = 8 # see internal/abi:MapGroupSlots
cnt = 0
# Yield keys and elements in group.
@@ -175,7 +168,7 @@ class MapTypePrinter:
def group_slots(group):
ctrl = group['ctrl']
- for i in xrange(SwissMapGroupSlots):
+ for i in xrange(MapGroupSlots):
c = (ctrl >> (8*i)) & 0xff
if (c & 0x80) != 0:
# Empty or deleted
@@ -186,7 +179,7 @@ class MapTypePrinter:
yield str(cnt+1), group['slots'][i]['elem']
# The linker DWARF generation
- # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypesSwiss) records
+ # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypes) records
# dirPtr as a **table[K,V], but it may actually be two different types:
#
# For "full size" maps (dirLen > 0), dirPtr is actually a pointer to
@@ -249,7 +242,7 @@ class MapTypePrinter:
length = table['groups']['lengthMask'] + 1
# The linker DWARF generation
- # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypesSwiss) records
+ # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypes) records
# groups.data as a *group[K,V], but it is actually a pointer to
# variable length array *[length]group[K,V].
#
@@ -270,40 +263,6 @@ class MapTypePrinter:
yield from group_slots(group)
- def old_map_children(self):
- MapBucketCount = 8 # see internal/abi:OldMapBucketCount
- B = self.val['B']
- buckets = self.val['buckets']
- oldbuckets = self.val['oldbuckets']
- flags = self.val['flags']
- inttype = self.val['hash0'].type
- cnt = 0
- for bucket in xrange(2 ** int(B)):
- bp = buckets + bucket
- if oldbuckets:
- oldbucket = bucket & (2 ** (B - 1) - 1)
- oldbp = oldbuckets + oldbucket
- oldb = oldbp.dereference()
- if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet
- if bucket >= 2 ** (B - 1):
- continue # already did old bucket
- bp = oldbp
- while bp:
- b = bp.dereference()
- for i in xrange(MapBucketCount):
- if b['tophash'][i] != 0:
- k = b['keys'][i]
- v = b['values'][i]
- if flags & 1:
- k = k.dereference()
- if flags & 2:
- v = v.dereference()
- yield str(cnt), k
- yield str(cnt + 1), v
- cnt += 2
- bp = b['overflow']
-
-
class ChanTypePrinter:
"""Pretty print chan[T] types.
diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go
index 47c1fe5851..e81efadeb3 100644
--- a/src/runtime/runtime-gdb_test.go
+++ b/src/runtime/runtime-gdb_test.go
@@ -8,8 +8,6 @@ import (
"bytes"
"flag"
"fmt"
- "internal/abi"
- "internal/goexperiment"
"internal/testenv"
"os"
"os/exec"
@@ -155,9 +153,6 @@ func checkPtraceScope(t *testing.T) {
}
}
-// NOTE: the maps below are allocated larger than abi.MapBucketCount
-// to ensure that they are not "optimized out".
-
var helloSource = `
import "fmt"
import "runtime"
@@ -166,19 +161,21 @@ var gslice []string
var smallmapvar map[string]string
func main() {
smallmapvar = make(map[string]string)
- mapvar := make(map[string]string, ` + strconv.FormatInt(abi.OldMapBucketCount+9, 10) + `)
- slicemap := make(map[string][]string,` + strconv.FormatInt(abi.OldMapBucketCount+3, 10) + `)
- chanint := make(chan int, 10)
- chanstr := make(chan string, 10)
- chanint <- 99
+ // NOTE: the maps below are allocated large to ensure that they are not
+ // "optimized out".
+ mapvar := make(map[string]string, 10)
+ slicemap := make(map[string][]string, 10)
+ chanint := make(chan int, 10)
+ chanstr := make(chan string, 10)
+ chanint <- 99
chanint <- 11
- chanstr <- "spongepants"
- chanstr <- "squarebob"
+ chanstr <- "spongepants"
+ chanstr <- "squarebob"
smallmapvar["abc"] = "def"
mapvar["abc"] = "def"
mapvar["ghi"] = "jkl"
slicemap["a"] = []string{"b","c","d"}
- slicemap["e"] = []string{"f","g","h"}
+ slicemap["e"] = []string{"f","g","h"}
strvar := "abc"
ptrvar := &strvar
slicevar := make([]string, 0, 16)
@@ -638,20 +635,10 @@ func TestGdbAutotmpTypes(t *testing.T) {
types := []string{
"[]main.astruct",
"main.astruct",
- }
- if goexperiment.SwissMap {
- types = append(types, []string{
- "groupReference<string,main.astruct>",
- "table<string,main.astruct>",
- "map<string,main.astruct>",
- "map<string,main.astruct> * map[string]main.astruct",
- }...)
- } else {
- types = append(types, []string{
- "bucket<string,main.astruct>",
- "hash<string,main.astruct>",
- "hash<string,main.astruct> * map[string]main.astruct",
- }...)
+ "groupReference<string,main.astruct>",
+ "table<string,main.astruct>",
+ "map<string,main.astruct>",
+ "map<string,main.astruct> * map[string]main.astruct",
}
for _, name := range types {
if !strings.Contains(sgot, name) {
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 1e2de52989..b5d2dcefad 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -597,9 +597,7 @@ type m struct {
freelink *m // on sched.freem
trace mTraceState
- // these are here because they are too large to be on the stack
- // of low-level NOSPLIT functions.
- libcall libcall
+ // These are here to avoid using the G stack so the stack can move during the call.
libcallpc uintptr // for cpu profiler
libcallsp uintptr
libcallg guintptr
@@ -1009,14 +1007,13 @@ type _defer struct {
//
// A _panic value must only ever live on the stack.
//
-// The argp and link fields are stack pointers, but don't need special
+// The gopanicFP and link fields are stack pointers, but don't need special
// handling during stack growth: because they are pointer-typed and
// _panic values only live on the stack, regular stack pointer
// adjustment takes care of them.
type _panic struct {
- argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
- arg any // argument to panic
- link *_panic // link to earlier panic
+ arg any // argument to panic
+ link *_panic // link to earlier panic
// startPC and startSP track where _panic.start was called.
startPC uintptr
@@ -1039,6 +1036,8 @@ type _panic struct {
repanicked bool // whether this panic repanicked
goexit bool
deferreturn bool
+
+ gopanicFP unsafe.Pointer // frame pointer of the gopanic frame
}
// savedOpenDeferState tracks the extra state from _panic that's
diff --git a/src/runtime/set_vma_name_linux.go b/src/runtime/set_vma_name_linux.go
index 100c2bfeca..9b6654f332 100644
--- a/src/runtime/set_vma_name_linux.go
+++ b/src/runtime/set_vma_name_linux.go
@@ -8,7 +8,7 @@ package runtime
import (
"internal/runtime/atomic"
- "internal/runtime/syscall"
+ "internal/runtime/syscall/linux"
"unsafe"
)
@@ -24,7 +24,7 @@ func setVMAName(start unsafe.Pointer, length uintptr, name string) {
n := copy(sysName[:], " Go: ")
copy(sysName[n:79], name) // leave final byte zero
- _, _, err := syscall.Syscall6(syscall.SYS_PRCTL, syscall.PR_SET_VMA, syscall.PR_SET_VMA_ANON_NAME, uintptr(start), length, uintptr(unsafe.Pointer(&sysName[0])), 0)
+ _, _, err := linux.Syscall6(linux.SYS_PRCTL, linux.PR_SET_VMA, linux.PR_SET_VMA_ANON_NAME, uintptr(start), length, uintptr(unsafe.Pointer(&sysName[0])), 0)
if err == _EINVAL {
prSetVMAUnsupported.Store(true)
}
diff --git a/src/runtime/signal_mips64x.go b/src/runtime/signal_mips64x.go
index cee1bf7a1b..eea2169408 100644
--- a/src/runtime/signal_mips64x.go
+++ b/src/runtime/signal_mips64x.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build (linux || openbsd) && (mips64 || mips64le)
+//go:build linux && (mips64 || mips64le)
package runtime
diff --git a/src/runtime/signal_openbsd_mips64.go b/src/runtime/signal_openbsd_mips64.go
deleted file mode 100644
index 54ed523c7b..0000000000
--- a/src/runtime/signal_openbsd_mips64.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "unsafe"
-)
-
-type sigctxt struct {
- info *siginfo
- ctxt unsafe.Pointer
-}
-
-//go:nosplit
-//go:nowritebarrierrec
-func (c *sigctxt) regs() *sigcontext {
- return (*sigcontext)(c.ctxt)
-}
-
-func (c *sigctxt) r0() uint64 { return c.regs().sc_regs[0] }
-func (c *sigctxt) r1() uint64 { return c.regs().sc_regs[1] }
-func (c *sigctxt) r2() uint64 { return c.regs().sc_regs[2] }
-func (c *sigctxt) r3() uint64 { return c.regs().sc_regs[3] }
-func (c *sigctxt) r4() uint64 { return c.regs().sc_regs[4] }
-func (c *sigctxt) r5() uint64 { return c.regs().sc_regs[5] }
-func (c *sigctxt) r6() uint64 { return c.regs().sc_regs[6] }
-func (c *sigctxt) r7() uint64 { return c.regs().sc_regs[7] }
-func (c *sigctxt) r8() uint64 { return c.regs().sc_regs[8] }
-func (c *sigctxt) r9() uint64 { return c.regs().sc_regs[9] }
-func (c *sigctxt) r10() uint64 { return c.regs().sc_regs[10] }
-func (c *sigctxt) r11() uint64 { return c.regs().sc_regs[11] }
-func (c *sigctxt) r12() uint64 { return c.regs().sc_regs[12] }
-func (c *sigctxt) r13() uint64 { return c.regs().sc_regs[13] }
-func (c *sigctxt) r14() uint64 { return c.regs().sc_regs[14] }
-func (c *sigctxt) r15() uint64 { return c.regs().sc_regs[15] }
-func (c *sigctxt) r16() uint64 { return c.regs().sc_regs[16] }
-func (c *sigctxt) r17() uint64 { return c.regs().sc_regs[17] }
-func (c *sigctxt) r18() uint64 { return c.regs().sc_regs[18] }
-func (c *sigctxt) r19() uint64 { return c.regs().sc_regs[19] }
-func (c *sigctxt) r20() uint64 { return c.regs().sc_regs[20] }
-func (c *sigctxt) r21() uint64 { return c.regs().sc_regs[21] }
-func (c *sigctxt) r22() uint64 { return c.regs().sc_regs[22] }
-func (c *sigctxt) r23() uint64 { return c.regs().sc_regs[23] }
-func (c *sigctxt) r24() uint64 { return c.regs().sc_regs[24] }
-func (c *sigctxt) r25() uint64 { return c.regs().sc_regs[25] }
-func (c *sigctxt) r26() uint64 { return c.regs().sc_regs[26] }
-func (c *sigctxt) r27() uint64 { return c.regs().sc_regs[27] }
-func (c *sigctxt) r28() uint64 { return c.regs().sc_regs[28] }
-func (c *sigctxt) r29() uint64 { return c.regs().sc_regs[29] }
-func (c *sigctxt) r30() uint64 { return c.regs().sc_regs[30] }
-func (c *sigctxt) r31() uint64 { return c.regs().sc_regs[31] }
-func (c *sigctxt) sp() uint64 { return c.regs().sc_regs[29] }
-
-//go:nosplit
-//go:nowritebarrierrec
-func (c *sigctxt) pc() uint64 { return c.regs().sc_pc }
-
-func (c *sigctxt) link() uint64 { return c.regs().sc_regs[31] }
-func (c *sigctxt) lo() uint64 { return c.regs().mullo }
-func (c *sigctxt) hi() uint64 { return c.regs().mulhi }
-
-func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
-func (c *sigctxt) sigaddr() uint64 {
- return *(*uint64)(add(unsafe.Pointer(c.info), 16))
-}
-
-func (c *sigctxt) set_r28(x uint64) { c.regs().sc_regs[28] = x }
-func (c *sigctxt) set_r30(x uint64) { c.regs().sc_regs[30] = x }
-func (c *sigctxt) set_pc(x uint64) { c.regs().sc_pc = x }
-func (c *sigctxt) set_sp(x uint64) { c.regs().sc_regs[29] = x }
-func (c *sigctxt) set_link(x uint64) { c.regs().sc_regs[31] = x }
-
-func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
-func (c *sigctxt) set_sigaddr(x uint64) {
- *(*uint64)(add(unsafe.Pointer(c.info), 16)) = x
-}
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index 7d7734433e..07778c8ebe 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -18,24 +18,24 @@ const (
)
func preventErrorDialogs() {
- errormode := stdcall0(_GetErrorMode)
- stdcall1(_SetErrorMode, errormode|_SEM_FAILCRITICALERRORS|_SEM_NOGPFAULTERRORBOX|_SEM_NOOPENFILEERRORBOX)
+ errormode := stdcall(_GetErrorMode)
+ stdcall(_SetErrorMode, errormode|_SEM_FAILCRITICALERRORS|_SEM_NOGPFAULTERRORBOX|_SEM_NOOPENFILEERRORBOX)
// Disable WER fault reporting UI.
// Do this even if WER is disabled as a whole,
// as WER might be enabled later with setTraceback("wer")
// and we still want the fault reporting UI to be disabled if this happens.
var werflags uintptr
- stdcall2(_WerGetFlags, currentProcess, uintptr(unsafe.Pointer(&werflags)))
- stdcall1(_WerSetFlags, werflags|_WER_FAULT_REPORTING_NO_UI)
+ stdcall(_WerGetFlags, currentProcess, uintptr(unsafe.Pointer(&werflags)))
+ stdcall(_WerSetFlags, werflags|_WER_FAULT_REPORTING_NO_UI)
}
// enableWER re-enables Windows error reporting without fault reporting UI.
func enableWER() {
// re-enable Windows Error Reporting
- errormode := stdcall0(_GetErrorMode)
+ errormode := stdcall(_GetErrorMode)
if errormode&_SEM_NOGPFAULTERRORBOX != 0 {
- stdcall1(_SetErrorMode, errormode^_SEM_NOGPFAULTERRORBOX)
+ stdcall(_SetErrorMode, errormode^_SEM_NOGPFAULTERRORBOX)
}
}
@@ -47,14 +47,14 @@ func sehtramp()
func sigresume()
func initExceptionHandler() {
- stdcall2(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp))
+ stdcall(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp))
if GOARCH == "386" {
// use SetUnhandledExceptionFilter for windows-386.
// note: SetUnhandledExceptionFilter handler won't be called, if debugging.
- stdcall1(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp))
+ stdcall(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp))
} else {
- stdcall2(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp))
- stdcall2(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp))
+ stdcall(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp))
+ stdcall(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp))
}
}
@@ -279,11 +279,11 @@ func sehhandler(_ *exceptionrecord, _ uint64, _ *context, dctxt *_DISPATCHER_CON
ctxt := dctxt.ctx()
var base, sp uintptr
for {
- entry := stdcall3(_RtlLookupFunctionEntry, ctxt.ip(), uintptr(unsafe.Pointer(&base)), 0)
+ entry := stdcall(_RtlLookupFunctionEntry, ctxt.ip(), uintptr(unsafe.Pointer(&base)), 0)
if entry == 0 {
break
}
- stdcall8(_RtlVirtualUnwind, 0, base, ctxt.ip(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0)
+ stdcall(_RtlVirtualUnwind, 0, base, ctxt.ip(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0)
if sp < gp.stack.lo || gp.stack.hi <= sp {
break
}
@@ -467,7 +467,7 @@ func dieFromException(info *exceptionrecord, r *context) {
}
}
const FAIL_FAST_GENERATE_EXCEPTION_ADDRESS = 0x1
- stdcall3(_RaiseFailFastException, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(r)), FAIL_FAST_GENERATE_EXCEPTION_ADDRESS)
+ stdcall(_RaiseFailFastException, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(r)), FAIL_FAST_GENERATE_EXCEPTION_ADDRESS)
}
// gsignalStack is unused on Windows.
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index 79d3f6c0de..e31d5dccb2 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -397,5 +397,5 @@ func bytealg_MakeNoZero(len int) []byte {
panicmakeslicelen()
}
cap := roundupsize(uintptr(len), true)
- return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len]
+ return unsafe.Slice((*byte)(mallocgc(cap, nil, false)), cap)[:len]
}
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index 866c46a83d..56f2a00d76 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -647,8 +647,15 @@ func moduledataverify1(datap *moduledata) {
min := datap.textAddr(datap.ftab[0].entryoff)
max := datap.textAddr(datap.ftab[nftab].entryoff)
- if datap.minpc != min || datap.maxpc != max {
- println("minpc=", hex(datap.minpc), "min=", hex(min), "maxpc=", hex(datap.maxpc), "max=", hex(max))
+ minpc := datap.minpc
+ maxpc := datap.maxpc
+ if GOARCH == "wasm" {
+ // On Wasm, the func table contains the function index, whereas
+ // the "PC" is function index << 16 + block index.
+ maxpc = alignUp(maxpc, 1<<16) // round up for end PC
+ }
+ if minpc != min || maxpc != max {
+ println("minpc=", hex(minpc), "min=", hex(min), "maxpc=", hex(maxpc), "max=", hex(max))
throw("minpc or maxpc invalid")
}
@@ -694,6 +701,11 @@ func (md *moduledata) textAddr(off32 uint32) uintptr {
throw("runtime: text offset out of range")
}
}
+ if GOARCH == "wasm" {
+ // On Wasm, a text offset (e.g. in the method table) is function index, whereas
+ // the "PC" is function index << 16 + block index.
+ res <<= 16
+ }
return res
}
@@ -704,8 +716,17 @@ func (md *moduledata) textAddr(off32 uint32) uintptr {
//
//go:nosplit
func (md *moduledata) textOff(pc uintptr) (uint32, bool) {
- res := uint32(pc - md.text)
+ off := pc - md.text
+ if GOARCH == "wasm" {
+ // On Wasm, the func table contains the function index, whereas
+ // the "PC" is function index << 16 + block index.
+ off >>= 16
+ }
+ res := uint32(off)
if len(md.textsectmap) > 1 {
+ if GOARCH == "wasm" {
+ fatal("unexpected multiple text sections on Wasm")
+ }
for i, sect := range md.textsectmap {
if sect.baseaddr > pc {
// pc is not in any section.
@@ -904,6 +925,11 @@ func findfunc(pc uintptr) funcInfo {
}
x := uintptr(pcOff) + datap.text - datap.minpc // TODO: are datap.text and datap.minpc always equal?
+ if GOARCH == "wasm" {
+ // On Wasm, pcOff is the function index, whereas
+ // the "PC" is function index << 16 + block index.
+ x = uintptr(pcOff)<<16 + datap.text - datap.minpc
+ }
b := x / abi.FuncTabBucketSize
i := x % abi.FuncTabBucketSize / (abi.FuncTabBucketSize / nsub)
diff --git a/src/runtime/sys_aix_ppc64.s b/src/runtime/sys_aix_ppc64.s
index 66081977b1..a0ef7e111a 100644
--- a/src/runtime/sys_aix_ppc64.s
+++ b/src/runtime/sys_aix_ppc64.s
@@ -130,15 +130,15 @@ TEXT sigtramp<>(SB),NOSPLIT|NOFRAME|TOPFRAME,$0
// Save m->libcall. We need to do this because we
// might get interrupted by a signal in runtime·asmcgocall.
- MOVD (m_libcall+libcall_fn)(R6), R7
+ MOVD (m_mOS+mOS_libcall+libcall_fn)(R6), R7
MOVD R7, 96(R1)
- MOVD (m_libcall+libcall_args)(R6), R7
+ MOVD (m_mOS+mOS_libcall+libcall_args)(R6), R7
MOVD R7, 104(R1)
- MOVD (m_libcall+libcall_n)(R6), R7
+ MOVD (m_mOS+mOS_libcall+libcall_n)(R6), R7
MOVD R7, 112(R1)
- MOVD (m_libcall+libcall_r1)(R6), R7
+ MOVD (m_mOS+mOS_libcall+libcall_r1)(R6), R7
MOVD R7, 120(R1)
- MOVD (m_libcall+libcall_r2)(R6), R7
+ MOVD (m_mOS+mOS_libcall+libcall_r2)(R6), R7
MOVD R7, 128(R1)
// save errno, it might be EINTR; stuff we do here might reset it.
@@ -162,15 +162,15 @@ sigtramp:
// restore libcall
MOVD 96(R1), R7
- MOVD R7, (m_libcall+libcall_fn)(R6)
+ MOVD R7, (m_mOS+mOS_libcall+libcall_fn)(R6)
MOVD 104(R1), R7
- MOVD R7, (m_libcall+libcall_args)(R6)
+ MOVD R7, (m_mOS+mOS_libcall+libcall_args)(R6)
MOVD 112(R1), R7
- MOVD R7, (m_libcall+libcall_n)(R6)
+ MOVD R7, (m_mOS+mOS_libcall+libcall_n)(R6)
MOVD 120(R1), R7
- MOVD R7, (m_libcall+libcall_r1)(R6)
+ MOVD R7, (m_mOS+mOS_libcall+libcall_r1)(R6)
MOVD 128(R1), R7
- MOVD R7, (m_libcall+libcall_r2)(R6)
+ MOVD R7, (m_mOS+mOS_libcall+libcall_r2)(R6)
// restore errno
MOVD (m_mOS+mOS_perrno)(R6), R7
diff --git a/src/runtime/sys_libc.go b/src/runtime/sys_libc.go
index 72d8991559..214e879319 100644
--- a/src/runtime/sys_libc.go
+++ b/src/runtime/sys_libc.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build darwin || (openbsd && !mips64)
+//go:build darwin || openbsd
package runtime
diff --git a/src/runtime/sys_openbsd.go b/src/runtime/sys_openbsd.go
index c4b8489612..df503d24c6 100644
--- a/src/runtime/sys_openbsd.go
+++ b/src/runtime/sys_openbsd.go
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build openbsd && !mips64
-
package runtime
import (
"internal/abi"
+ "internal/runtime/atomic"
"unsafe"
)
@@ -61,6 +60,412 @@ func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32
}
func pthread_create_trampoline()
+//go:nosplit
+//go:cgo_unsafe_args
+func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(thrsleep_trampoline)), unsafe.Pointer(&ident))
+ KeepAlive(tsp)
+ KeepAlive(abort)
+ return ret
+}
+func thrsleep_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func thrwakeup(ident uintptr, n int32) int32 {
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(thrwakeup_trampoline)), unsafe.Pointer(&ident))
+}
+func thrwakeup_trampoline()
+
+//go:nosplit
+func osyield() {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
+}
+func sched_yield_trampoline()
+
+//go:nosplit
+func osyield_no_g() {
+ asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
+}
+
+// This is exported via linkname to assembly in runtime/cgo.
+//
+//go:linkname exit
+//go:nosplit
+//go:cgo_unsafe_args
+func exit(code int32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(exit_trampoline)), unsafe.Pointer(&code))
+}
+func exit_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func getthrid() (tid int32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(getthrid_trampoline)), unsafe.Pointer(&tid))
+ return
+}
+func getthrid_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func raiseproc(sig uint32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(raiseproc_trampoline)), unsafe.Pointer(&sig))
+}
+func raiseproc_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func thrkill(tid int32, sig int) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(thrkill_trampoline)), unsafe.Pointer(&tid))
+}
+func thrkill_trampoline()
+
+// mmap is used to do low-level memory allocation via mmap. Don't allow stack
+// splits, since this function (used by sysAlloc) is called in a lot of low-level
+// parts of the runtime and callers often assume it won't acquire any locks.
+//
+//go:nosplit
+func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
+ args := struct {
+ addr unsafe.Pointer
+ n uintptr
+ prot, flags, fd int32
+ off uint32
+ ret1 unsafe.Pointer
+ ret2 int
+ }{addr, n, prot, flags, fd, off, nil, 0}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(mmap_trampoline)), unsafe.Pointer(&args))
+ KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
+ return args.ret1, args.ret2
+}
+func mmap_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func munmap(addr unsafe.Pointer, n uintptr) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(munmap_trampoline)), unsafe.Pointer(&addr))
+ KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
+}
+func munmap_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func madvise(addr unsafe.Pointer, n uintptr, flags int32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(madvise_trampoline)), unsafe.Pointer(&addr))
+ KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
+}
+func madvise_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func open(name *byte, mode, perm int32) (ret int32) {
+ ret = libcCall(unsafe.Pointer(abi.FuncPCABI0(open_trampoline)), unsafe.Pointer(&name))
+ KeepAlive(name)
+ return
+}
+func open_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func closefd(fd int32) int32 {
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(close_trampoline)), unsafe.Pointer(&fd))
+}
+func close_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func read(fd int32, p unsafe.Pointer, n int32) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(read_trampoline)), unsafe.Pointer(&fd))
+ KeepAlive(p)
+ return ret
+}
+func read_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(write_trampoline)), unsafe.Pointer(&fd))
+ KeepAlive(p)
+ return ret
+}
+func write_trampoline()
+
+func pipe2(flags int32) (r, w int32, errno int32) {
+ var p [2]int32
+ args := struct {
+ p unsafe.Pointer
+ flags int32
+ }{noescape(unsafe.Pointer(&p)), flags}
+ errno = libcCall(unsafe.Pointer(abi.FuncPCABI0(pipe2_trampoline)), unsafe.Pointer(&args))
+ return p[0], p[1], errno
+}
+func pipe2_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func setitimer(mode int32, new, old *itimerval) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(setitimer_trampoline)), unsafe.Pointer(&mode))
+ KeepAlive(new)
+ KeepAlive(old)
+}
+func setitimer_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func usleep(usec uint32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
+}
+func usleep_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func usleep_no_g(usec uint32) {
+ asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
+}
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctl_trampoline)), unsafe.Pointer(&mib))
+ KeepAlive(mib)
+ KeepAlive(out)
+ KeepAlive(size)
+ KeepAlive(dst)
+ return ret
+}
+func sysctl_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func fcntl(fd, cmd, arg int32) (ret int32, errno int32) {
+ args := struct {
+ fd, cmd, arg int32
+ ret, errno int32
+ }{fd, cmd, arg, 0, 0}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(fcntl_trampoline)), unsafe.Pointer(&args))
+ return args.ret, args.errno
+}
+func fcntl_trampoline()
+
+//go:nosplit
+func nanotime1() int64 {
+ var ts timespec
+ args := struct {
+ clock_id int32
+ tp unsafe.Pointer
+ }{_CLOCK_MONOTONIC, unsafe.Pointer(&ts)}
+ if errno := libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args)); errno < 0 {
+ // Avoid growing the nosplit stack.
+ systemstack(func() {
+ println("runtime: errno", -errno)
+ throw("clock_gettime failed")
+ })
+ }
+ return ts.tv_sec*1e9 + int64(ts.tv_nsec)
+}
+func clock_gettime_trampoline()
+
+//go:nosplit
+func walltime() (int64, int32) {
+ var ts timespec
+ args := struct {
+ clock_id int32
+ tp unsafe.Pointer
+ }{_CLOCK_REALTIME, unsafe.Pointer(&ts)}
+ if errno := libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args)); errno < 0 {
+ // Avoid growing the nosplit stack.
+ systemstack(func() {
+ println("runtime: errno", -errno)
+ throw("clock_gettime failed")
+ })
+ }
+ return ts.tv_sec, int32(ts.tv_nsec)
+}
+
+//go:nosplit
+//go:cgo_unsafe_args
+func kqueue() int32 {
+ return libcCall(unsafe.Pointer(abi.FuncPCABI0(kqueue_trampoline)), nil)
+}
+func kqueue_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 {
+ ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(kevent_trampoline)), unsafe.Pointer(&kq))
+ KeepAlive(ch)
+ KeepAlive(ev)
+ KeepAlive(ts)
+ return ret
+}
+func kevent_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sigaction(sig uint32, new *sigactiont, old *sigactiont) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaction_trampoline)), unsafe.Pointer(&sig))
+ KeepAlive(new)
+ KeepAlive(old)
+}
+func sigaction_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sigprocmask(how uint32, new *sigset, old *sigset) {
+ // sigprocmask is called from sigsave, which is called from needm.
+ // As such, we have to be able to run with no g here.
+ asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sigprocmask_trampoline)), unsafe.Pointer(&how))
+ KeepAlive(new)
+ KeepAlive(old)
+}
+func sigprocmask_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sigaltstack(new *stackt, old *stackt) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaltstack_trampoline)), unsafe.Pointer(&new))
+ KeepAlive(new)
+ KeepAlive(old)
+}
+func sigaltstack_trampoline()
+
+// Not used on OpenBSD, but must be defined.
+func exitThread(wait *atomic.Uint32) {
+ throw("exitThread")
+}
+
+//go:nosplit
+//go:cgo_unsafe_args
+func issetugid() (ret int32) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(issetugid_trampoline)), unsafe.Pointer(&ret))
+ return
+}
+func issetugid_trampoline()
+
+// The X versions of syscall expect the libc call to return a 64-bit result.
+// Otherwise (the non-X version) expects a 32-bit result.
+// This distinction is required because an error is indicated by returning -1,
+// and we need to know whether to check 32 or 64 bits of the result.
+// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.)
+
+// golang.org/x/sys linknames syscall_syscall
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
+//go:linkname syscall_syscall syscall.syscall
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscall()
+
+//go:linkname syscall_syscallX syscall.syscallX
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscallX()
+
+// golang.org/x/sys linknames syscall.syscall6
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
+//go:linkname syscall_syscall6 syscall.syscall6
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscall6()
+
+//go:linkname syscall_syscall6X syscall.syscall6X
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscall6X()
+
+// golang.org/x/sys linknames syscall.syscall10
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
+//go:linkname syscall_syscall10 syscall.syscall10
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscall10()
+
+//go:linkname syscall_syscall10X syscall.syscall10X
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscall10X()
+
+// golang.org/x/sys linknames syscall_rawSyscall
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
+//go:linkname syscall_rawSyscall syscall.rawSyscall
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
+ return
+}
+
+// golang.org/x/sys linknames syscall_rawSyscall6
+// (in addition to standard package syscall).
+// Do not remove or change the type signature.
+//
+//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
+ return
+}
+
+//go:linkname syscall_rawSyscall6X syscall.rawSyscall6X
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
+ return
+}
+
+//go:linkname syscall_rawSyscall10X syscall.rawSyscall10X
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_rawSyscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))
+ return
+}
+
// Tell the linker that the libc_* functions are to be found
// in a system library, with the libc_ prefix missing.
@@ -71,5 +476,40 @@ func pthread_create_trampoline()
//go:cgo_import_dynamic libc_pthread_create pthread_create "libpthread.so"
//go:cgo_import_dynamic libc_pthread_sigmask pthread_sigmask "libpthread.so"
+//go:cgo_import_dynamic libc_thrsleep __thrsleep "libc.so"
+//go:cgo_import_dynamic libc_thrwakeup __thrwakeup "libc.so"
+//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so"
+
+//go:cgo_import_dynamic libc_errno __errno "libc.so"
+//go:cgo_import_dynamic libc_exit exit "libc.so"
+//go:cgo_import_dynamic libc_getthrid getthrid "libc.so"
+//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so"
+//go:cgo_import_dynamic libc_thrkill thrkill "libc.so"
+
+//go:cgo_import_dynamic libc_mmap mmap "libc.so"
+//go:cgo_import_dynamic libc_munmap munmap "libc.so"
+//go:cgo_import_dynamic libc_madvise madvise "libc.so"
+
+//go:cgo_import_dynamic libc_open open "libc.so"
+//go:cgo_import_dynamic libc_close close "libc.so"
+//go:cgo_import_dynamic libc_read read "libc.so"
+//go:cgo_import_dynamic libc_write write "libc.so"
+//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so"
+
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
+//go:cgo_import_dynamic libc_setitimer setitimer "libc.so"
+//go:cgo_import_dynamic libc_usleep usleep "libc.so"
+//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+//go:cgo_import_dynamic libc_getpid getpid "libc.so"
+//go:cgo_import_dynamic libc_kill kill "libc.so"
+//go:cgo_import_dynamic libc_kqueue kqueue "libc.so"
+//go:cgo_import_dynamic libc_kevent kevent "libc.so"
+
+//go:cgo_import_dynamic libc_sigaction sigaction "libc.so"
+//go:cgo_import_dynamic libc_sigaltstack sigaltstack "libc.so"
+
+//go:cgo_import_dynamic libc_issetugid issetugid "libc.so"
+
//go:cgo_import_dynamic _ _ "libpthread.so"
//go:cgo_import_dynamic _ _ "libc.so"
diff --git a/src/runtime/sys_openbsd1.go b/src/runtime/sys_openbsd1.go
deleted file mode 100644
index d852e3c58a..0000000000
--- a/src/runtime/sys_openbsd1.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build openbsd && !mips64
-
-package runtime
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-//go:nosplit
-//go:cgo_unsafe_args
-func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(thrsleep_trampoline)), unsafe.Pointer(&ident))
- KeepAlive(tsp)
- KeepAlive(abort)
- return ret
-}
-func thrsleep_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func thrwakeup(ident uintptr, n int32) int32 {
- return libcCall(unsafe.Pointer(abi.FuncPCABI0(thrwakeup_trampoline)), unsafe.Pointer(&ident))
-}
-func thrwakeup_trampoline()
-
-//go:nosplit
-func osyield() {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
-}
-func sched_yield_trampoline()
-
-//go:nosplit
-func osyield_no_g() {
- asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil))
-}
-
-//go:cgo_import_dynamic libc_thrsleep __thrsleep "libc.so"
-//go:cgo_import_dynamic libc_thrwakeup __thrwakeup "libc.so"
-//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so"
-
-//go:cgo_import_dynamic _ _ "libc.so"
diff --git a/src/runtime/sys_openbsd2.go b/src/runtime/sys_openbsd2.go
deleted file mode 100644
index 8f5242018d..0000000000
--- a/src/runtime/sys_openbsd2.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build openbsd && !mips64
-
-package runtime
-
-import (
- "internal/abi"
- "internal/runtime/atomic"
- "unsafe"
-)
-
-// This is exported via linkname to assembly in runtime/cgo.
-//
-//go:linkname exit
-//go:nosplit
-//go:cgo_unsafe_args
-func exit(code int32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(exit_trampoline)), unsafe.Pointer(&code))
-}
-func exit_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func getthrid() (tid int32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(getthrid_trampoline)), unsafe.Pointer(&tid))
- return
-}
-func getthrid_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func raiseproc(sig uint32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(raiseproc_trampoline)), unsafe.Pointer(&sig))
-}
-func raiseproc_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func thrkill(tid int32, sig int) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(thrkill_trampoline)), unsafe.Pointer(&tid))
-}
-func thrkill_trampoline()
-
-// mmap is used to do low-level memory allocation via mmap. Don't allow stack
-// splits, since this function (used by sysAlloc) is called in a lot of low-level
-// parts of the runtime and callers often assume it won't acquire any locks.
-//
-//go:nosplit
-func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
- args := struct {
- addr unsafe.Pointer
- n uintptr
- prot, flags, fd int32
- off uint32
- ret1 unsafe.Pointer
- ret2 int
- }{addr, n, prot, flags, fd, off, nil, 0}
- libcCall(unsafe.Pointer(abi.FuncPCABI0(mmap_trampoline)), unsafe.Pointer(&args))
- KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
- return args.ret1, args.ret2
-}
-func mmap_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func munmap(addr unsafe.Pointer, n uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(munmap_trampoline)), unsafe.Pointer(&addr))
- KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
-}
-func munmap_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func madvise(addr unsafe.Pointer, n uintptr, flags int32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(madvise_trampoline)), unsafe.Pointer(&addr))
- KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address.
-}
-func madvise_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func open(name *byte, mode, perm int32) (ret int32) {
- ret = libcCall(unsafe.Pointer(abi.FuncPCABI0(open_trampoline)), unsafe.Pointer(&name))
- KeepAlive(name)
- return
-}
-func open_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func closefd(fd int32) int32 {
- return libcCall(unsafe.Pointer(abi.FuncPCABI0(close_trampoline)), unsafe.Pointer(&fd))
-}
-func close_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func read(fd int32, p unsafe.Pointer, n int32) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(read_trampoline)), unsafe.Pointer(&fd))
- KeepAlive(p)
- return ret
-}
-func read_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(write_trampoline)), unsafe.Pointer(&fd))
- KeepAlive(p)
- return ret
-}
-func write_trampoline()
-
-func pipe2(flags int32) (r, w int32, errno int32) {
- var p [2]int32
- args := struct {
- p unsafe.Pointer
- flags int32
- }{noescape(unsafe.Pointer(&p)), flags}
- errno = libcCall(unsafe.Pointer(abi.FuncPCABI0(pipe2_trampoline)), unsafe.Pointer(&args))
- return p[0], p[1], errno
-}
-func pipe2_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func setitimer(mode int32, new, old *itimerval) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(setitimer_trampoline)), unsafe.Pointer(&mode))
- KeepAlive(new)
- KeepAlive(old)
-}
-func setitimer_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func usleep(usec uint32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
-}
-func usleep_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func usleep_no_g(usec uint32) {
- asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec))
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctl_trampoline)), unsafe.Pointer(&mib))
- KeepAlive(mib)
- KeepAlive(out)
- KeepAlive(size)
- KeepAlive(dst)
- return ret
-}
-func sysctl_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func fcntl(fd, cmd, arg int32) (ret int32, errno int32) {
- args := struct {
- fd, cmd, arg int32
- ret, errno int32
- }{fd, cmd, arg, 0, 0}
- libcCall(unsafe.Pointer(abi.FuncPCABI0(fcntl_trampoline)), unsafe.Pointer(&args))
- return args.ret, args.errno
-}
-func fcntl_trampoline()
-
-//go:nosplit
-func nanotime1() int64 {
- var ts timespec
- args := struct {
- clock_id int32
- tp unsafe.Pointer
- }{_CLOCK_MONOTONIC, unsafe.Pointer(&ts)}
- if errno := libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args)); errno < 0 {
- // Avoid growing the nosplit stack.
- systemstack(func() {
- println("runtime: errno", -errno)
- throw("clock_gettime failed")
- })
- }
- return ts.tv_sec*1e9 + int64(ts.tv_nsec)
-}
-func clock_gettime_trampoline()
-
-//go:nosplit
-func walltime() (int64, int32) {
- var ts timespec
- args := struct {
- clock_id int32
- tp unsafe.Pointer
- }{_CLOCK_REALTIME, unsafe.Pointer(&ts)}
- if errno := libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args)); errno < 0 {
- // Avoid growing the nosplit stack.
- systemstack(func() {
- println("runtime: errno", -errno)
- throw("clock_gettime failed")
- })
- }
- return ts.tv_sec, int32(ts.tv_nsec)
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func kqueue() int32 {
- return libcCall(unsafe.Pointer(abi.FuncPCABI0(kqueue_trampoline)), nil)
-}
-func kqueue_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 {
- ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(kevent_trampoline)), unsafe.Pointer(&kq))
- KeepAlive(ch)
- KeepAlive(ev)
- KeepAlive(ts)
- return ret
-}
-func kevent_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func sigaction(sig uint32, new *sigactiont, old *sigactiont) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaction_trampoline)), unsafe.Pointer(&sig))
- KeepAlive(new)
- KeepAlive(old)
-}
-func sigaction_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func sigprocmask(how uint32, new *sigset, old *sigset) {
- // sigprocmask is called from sigsave, which is called from needm.
- // As such, we have to be able to run with no g here.
- asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sigprocmask_trampoline)), unsafe.Pointer(&how))
- KeepAlive(new)
- KeepAlive(old)
-}
-func sigprocmask_trampoline()
-
-//go:nosplit
-//go:cgo_unsafe_args
-func sigaltstack(new *stackt, old *stackt) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaltstack_trampoline)), unsafe.Pointer(&new))
- KeepAlive(new)
- KeepAlive(old)
-}
-func sigaltstack_trampoline()
-
-// Not used on OpenBSD, but must be defined.
-func exitThread(wait *atomic.Uint32) {
- throw("exitThread")
-}
-
-//go:nosplit
-//go:cgo_unsafe_args
-func issetugid() (ret int32) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(issetugid_trampoline)), unsafe.Pointer(&ret))
- return
-}
-func issetugid_trampoline()
-
-// Tell the linker that the libc_* functions are to be found
-// in a system library, with the libc_ prefix missing.
-
-//go:cgo_import_dynamic libc_errno __errno "libc.so"
-//go:cgo_import_dynamic libc_exit exit "libc.so"
-//go:cgo_import_dynamic libc_getthrid getthrid "libc.so"
-//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so"
-//go:cgo_import_dynamic libc_thrkill thrkill "libc.so"
-
-//go:cgo_import_dynamic libc_mmap mmap "libc.so"
-//go:cgo_import_dynamic libc_munmap munmap "libc.so"
-//go:cgo_import_dynamic libc_madvise madvise "libc.so"
-
-//go:cgo_import_dynamic libc_open open "libc.so"
-//go:cgo_import_dynamic libc_close close "libc.so"
-//go:cgo_import_dynamic libc_read read "libc.so"
-//go:cgo_import_dynamic libc_write write "libc.so"
-//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so"
-
-//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
-//go:cgo_import_dynamic libc_setitimer setitimer "libc.so"
-//go:cgo_import_dynamic libc_usleep usleep "libc.so"
-//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
-//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
-//go:cgo_import_dynamic libc_getpid getpid "libc.so"
-//go:cgo_import_dynamic libc_kill kill "libc.so"
-//go:cgo_import_dynamic libc_kqueue kqueue "libc.so"
-//go:cgo_import_dynamic libc_kevent kevent "libc.so"
-
-//go:cgo_import_dynamic libc_sigaction sigaction "libc.so"
-//go:cgo_import_dynamic libc_sigaltstack sigaltstack "libc.so"
-
-//go:cgo_import_dynamic libc_issetugid issetugid "libc.so"
-
-//go:cgo_import_dynamic _ _ "libc.so"
diff --git a/src/runtime/sys_openbsd3.go b/src/runtime/sys_openbsd3.go
deleted file mode 100644
index de09ec5e25..0000000000
--- a/src/runtime/sys_openbsd3.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build openbsd && !mips64
-
-package runtime
-
-import (
- "internal/abi"
- "unsafe"
-)
-
-// The X versions of syscall expect the libc call to return a 64-bit result.
-// Otherwise (the non-X version) expects a 32-bit result.
-// This distinction is required because an error is indicated by returning -1,
-// and we need to know whether to check 32 or 64 bits of the result.
-// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.)
-
-// golang.org/x/sys linknames syscall_syscall
-// (in addition to standard package syscall).
-// Do not remove or change the type signature.
-//
-//go:linkname syscall_syscall syscall.syscall
-//go:nosplit
-//go:cgo_unsafe_args
-func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
- exitsyscall()
- return
-}
-func syscall()
-
-//go:linkname syscall_syscallX syscall.syscallX
-//go:nosplit
-//go:cgo_unsafe_args
-func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn))
- exitsyscall()
- return
-}
-func syscallX()
-
-// golang.org/x/sys linknames syscall.syscall6
-// (in addition to standard package syscall).
-// Do not remove or change the type signature.
-//
-//go:linkname syscall_syscall6 syscall.syscall6
-//go:nosplit
-//go:cgo_unsafe_args
-func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
- exitsyscall()
- return
-}
-func syscall6()
-
-//go:linkname syscall_syscall6X syscall.syscall6X
-//go:nosplit
-//go:cgo_unsafe_args
-func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
- exitsyscall()
- return
-}
-func syscall6X()
-
-// golang.org/x/sys linknames syscall.syscall10
-// (in addition to standard package syscall).
-// Do not remove or change the type signature.
-//
-//go:linkname syscall_syscall10 syscall.syscall10
-//go:nosplit
-//go:cgo_unsafe_args
-func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10)), unsafe.Pointer(&fn))
- exitsyscall()
- return
-}
-func syscall10()
-
-//go:linkname syscall_syscall10X syscall.syscall10X
-//go:nosplit
-//go:cgo_unsafe_args
-func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))
- exitsyscall()
- return
-}
-func syscall10X()
-
-// golang.org/x/sys linknames syscall_rawSyscall
-// (in addition to standard package syscall).
-// Do not remove or change the type signature.
-//
-//go:linkname syscall_rawSyscall syscall.rawSyscall
-//go:nosplit
-//go:cgo_unsafe_args
-func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
- return
-}
-
-// golang.org/x/sys linknames syscall_rawSyscall6
-// (in addition to standard package syscall).
-// Do not remove or change the type signature.
-//
-//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
-//go:nosplit
-//go:cgo_unsafe_args
-func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
- return
-}
-
-//go:linkname syscall_rawSyscall6X syscall.rawSyscall6X
-//go:nosplit
-//go:cgo_unsafe_args
-func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
- return
-}
-
-//go:linkname syscall_rawSyscall10X syscall.rawSyscall10X
-//go:nosplit
-//go:cgo_unsafe_args
-func syscall_rawSyscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn))
- return
-}
diff --git a/src/runtime/sys_openbsd_mips64.s b/src/runtime/sys_openbsd_mips64.s
deleted file mode 100644
index 7ac0db0480..0000000000
--- a/src/runtime/sys_openbsd_mips64.s
+++ /dev/null
@@ -1,388 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//
-// System calls and other sys.stuff for mips64, OpenBSD
-// /usr/src/sys/kern/syscalls.master for syscall numbers.
-//
-
-#include "go_asm.h"
-#include "go_tls.h"
-#include "textflag.h"
-
-#define CLOCK_REALTIME $0
-#define CLOCK_MONOTONIC $3
-
-// Exit the entire program (like C exit)
-TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0
- MOVW code+0(FP), R4 // arg 1 - status
- MOVV $1, R2 // sys_exit
- SYSCALL
- BEQ R7, 3(PC)
- MOVV $0, R2 // crash on syscall failure
- MOVV R2, (R2)
- RET
-
-// func exitThread(wait *atomic.Uint32)
-TEXT runtime·exitThread(SB),NOSPLIT,$0
- MOVV wait+0(FP), R4 // arg 1 - notdead
- MOVV $302, R2 // sys___threxit
- SYSCALL
- MOVV $0, R2 // crash on syscall failure
- MOVV R2, (R2)
- JMP 0(PC)
-
-TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0
- MOVV name+0(FP), R4 // arg 1 - path
- MOVW mode+8(FP), R5 // arg 2 - mode
- MOVW perm+12(FP), R6 // arg 3 - perm
- MOVV $5, R2 // sys_open
- SYSCALL
- BEQ R7, 2(PC)
- MOVW $-1, R2
- MOVW R2, ret+16(FP)
- RET
-
-TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0
- MOVW fd+0(FP), R4 // arg 1 - fd
- MOVV $6, R2 // sys_close
- SYSCALL
- BEQ R7, 2(PC)
- MOVW $-1, R2
- MOVW R2, ret+8(FP)
- RET
-
-TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0
- MOVW fd+0(FP), R4 // arg 1 - fd
- MOVV p+8(FP), R5 // arg 2 - buf
- MOVW n+16(FP), R6 // arg 3 - nbyte
- MOVV $3, R2 // sys_read
- SYSCALL
- BEQ R7, 2(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, ret+24(FP)
- RET
-
-// func pipe2(flags int32) (r, w int32, errno int32)
-TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
- MOVV $r+8(FP), R4
- MOVW flags+0(FP), R5
- MOVV $101, R2 // sys_pipe2
- SYSCALL
- BEQ R7, 2(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, errno+16(FP)
- RET
-
-TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0
- MOVV fd+0(FP), R4 // arg 1 - fd
- MOVV p+8(FP), R5 // arg 2 - buf
- MOVW n+16(FP), R6 // arg 3 - nbyte
- MOVV $4, R2 // sys_write
- SYSCALL
- BEQ R7, 2(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, ret+24(FP)
- RET
-
-TEXT runtime·usleep(SB),NOSPLIT,$24-4
- MOVWU usec+0(FP), R3
- MOVV R3, R5
- MOVW $1000000, R4
- DIVVU R4, R3
- MOVV LO, R3
- MOVV R3, 8(R29) // tv_sec
- MOVW $1000, R4
- MULVU R3, R4
- MOVV LO, R4
- SUBVU R4, R5
- MOVV R5, 16(R29) // tv_nsec
-
- ADDV $8, R29, R4 // arg 1 - rqtp
- MOVV $0, R5 // arg 2 - rmtp
- MOVV $91, R2 // sys_nanosleep
- SYSCALL
- RET
-
-TEXT runtime·getthrid(SB),NOSPLIT,$0-4
- MOVV $299, R2 // sys_getthrid
- SYSCALL
- MOVW R2, ret+0(FP)
- RET
-
-TEXT runtime·thrkill(SB),NOSPLIT,$0-16
- MOVW tid+0(FP), R4 // arg 1 - tid
- MOVV sig+8(FP), R5 // arg 2 - signum
- MOVW $0, R6 // arg 3 - tcb
- MOVV $119, R2 // sys_thrkill
- SYSCALL
- RET
-
-TEXT runtime·raiseproc(SB),NOSPLIT,$0
- MOVV $20, R4 // sys_getpid
- SYSCALL
- MOVV R2, R4 // arg 1 - pid
- MOVW sig+0(FP), R5 // arg 2 - signum
- MOVV $122, R2 // sys_kill
- SYSCALL
- RET
-
-TEXT runtime·mmap(SB),NOSPLIT,$0
- MOVV addr+0(FP), R4 // arg 1 - addr
- MOVV n+8(FP), R5 // arg 2 - len
- MOVW prot+16(FP), R6 // arg 3 - prot
- MOVW flags+20(FP), R7 // arg 4 - flags
- MOVW fd+24(FP), R8 // arg 5 - fd
- MOVW $0, R9 // arg 6 - pad
- MOVW off+28(FP), R10 // arg 7 - offset
- MOVV $197, R2 // sys_mmap
- SYSCALL
- MOVV $0, R4
- BEQ R7, 3(PC)
- MOVV R2, R4 // if error, move to R4
- MOVV $0, R2
- MOVV R2, p+32(FP)
- MOVV R4, err+40(FP)
- RET
-
-TEXT runtime·munmap(SB),NOSPLIT,$0
- MOVV addr+0(FP), R4 // arg 1 - addr
- MOVV n+8(FP), R5 // arg 2 - len
- MOVV $73, R2 // sys_munmap
- SYSCALL
- BEQ R7, 3(PC)
- MOVV $0, R2 // crash on syscall failure
- MOVV R2, (R2)
- RET
-
-TEXT runtime·madvise(SB),NOSPLIT,$0
- MOVV addr+0(FP), R4 // arg 1 - addr
- MOVV n+8(FP), R5 // arg 2 - len
- MOVW flags+16(FP), R6 // arg 2 - flags
- MOVV $75, R2 // sys_madvise
- SYSCALL
- BEQ R7, 2(PC)
- MOVW $-1, R2
- MOVW R2, ret+24(FP)
- RET
-
-TEXT runtime·setitimer(SB),NOSPLIT,$0
- MOVW mode+0(FP), R4 // arg 1 - mode
- MOVV new+8(FP), R5 // arg 2 - new value
- MOVV old+16(FP), R6 // arg 3 - old value
- MOVV $69, R2 // sys_setitimer
- SYSCALL
- RET
-
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $32
- MOVW CLOCK_REALTIME, R4 // arg 1 - clock_id
- MOVV $8(R29), R5 // arg 2 - tp
- MOVV $87, R2 // sys_clock_gettime
- SYSCALL
-
- MOVV 8(R29), R4 // sec
- MOVV 16(R29), R5 // nsec
- MOVV R4, sec+0(FP)
- MOVW R5, nsec+8(FP)
-
- RET
-
-// int64 nanotime1(void) so really
-// void nanotime1(int64 *nsec)
-TEXT runtime·nanotime1(SB),NOSPLIT,$32
- MOVW CLOCK_MONOTONIC, R4 // arg 1 - clock_id
- MOVV $8(R29), R5 // arg 2 - tp
- MOVV $87, R2 // sys_clock_gettime
- SYSCALL
-
- MOVV 8(R29), R3 // sec
- MOVV 16(R29), R5 // nsec
-
- MOVV $1000000000, R4
- MULVU R4, R3
- MOVV LO, R3
- ADDVU R5, R3
- MOVV R3, ret+0(FP)
- RET
-
-TEXT runtime·sigaction(SB),NOSPLIT,$0
- MOVW sig+0(FP), R4 // arg 1 - signum
- MOVV new+8(FP), R5 // arg 2 - new sigaction
- MOVV old+16(FP), R6 // arg 3 - old sigaction
- MOVV $46, R2 // sys_sigaction
- SYSCALL
- BEQ R7, 3(PC)
- MOVV $3, R2 // crash on syscall failure
- MOVV R2, (R2)
- RET
-
-TEXT runtime·obsdsigprocmask(SB),NOSPLIT,$0
- MOVW how+0(FP), R4 // arg 1 - mode
- MOVW new+4(FP), R5 // arg 2 - new
- MOVV $48, R2 // sys_sigprocmask
- SYSCALL
- BEQ R7, 3(PC)
- MOVV $3, R2 // crash on syscall failure
- MOVV R2, (R2)
- MOVW R2, ret+8(FP)
- RET
-
-TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
- MOVW sig+8(FP), R4
- MOVV info+16(FP), R5
- MOVV ctx+24(FP), R6
- MOVV fn+0(FP), R25 // Must use R25, needed for PIC code.
- CALL (R25)
- RET
-
-TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$192
- // initialize REGSB = PC&0xffffffff00000000
- BGEZAL R0, 1(PC)
- SRLV $32, R31, RSB
- SLLV $32, RSB
-
- // this might be called in external code context,
- // where g is not set.
- MOVB runtime·iscgo(SB), R1
- BEQ R1, 2(PC)
- JAL runtime·load_g(SB)
-
- MOVW R4, 8(R29)
- MOVV R5, 16(R29)
- MOVV R6, 24(R29)
- MOVV $runtime·sigtrampgo(SB), R1
- JAL (R1)
- RET
-
-// int32 tfork(void *param, uintptr psize, M *mp, G *gp, void (*fn)(void));
-TEXT runtime·tfork(SB),NOSPLIT,$0
-
- // Copy mp, gp and fn off parent stack for use by child.
- MOVV mm+16(FP), R16
- MOVV gg+24(FP), R17
- MOVV fn+32(FP), R18
-
- MOVV param+0(FP), R4 // arg 1 - param
- MOVV psize+8(FP), R5 // arg 2 - psize
- MOVV $8, R2 // sys___tfork
- SYSCALL
-
- // Return if syscall failed.
- BEQ R7, 4(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, ret+40(FP)
- RET
-
- // In parent, return.
- BEQ R2, 3(PC)
- MOVW $0, ret+40(FP)
- RET
-
- // Initialise m, g.
- MOVV R17, g
- MOVV R16, g_m(g)
-
- // Call fn.
- CALL (R18)
-
- // fn should never return.
- MOVV $2, R8 // crash if reached
- MOVV R8, (R8)
- RET
-
-TEXT runtime·sigaltstack(SB),NOSPLIT,$0
- MOVV new+0(FP), R4 // arg 1 - new sigaltstack
- MOVV old+8(FP), R5 // arg 2 - old sigaltstack
- MOVV $288, R2 // sys_sigaltstack
- SYSCALL
- BEQ R7, 3(PC)
- MOVV $0, R8 // crash on syscall failure
- MOVV R8, (R8)
- RET
-
-TEXT runtime·osyield(SB),NOSPLIT,$0
- MOVV $298, R2 // sys_sched_yield
- SYSCALL
- RET
-
-TEXT runtime·thrsleep(SB),NOSPLIT,$0
- MOVV ident+0(FP), R4 // arg 1 - ident
- MOVW clock_id+8(FP), R5 // arg 2 - clock_id
- MOVV tsp+16(FP), R6 // arg 3 - tsp
- MOVV lock+24(FP), R7 // arg 4 - lock
- MOVV abort+32(FP), R8 // arg 5 - abort
- MOVV $94, R2 // sys___thrsleep
- SYSCALL
- MOVW R2, ret+40(FP)
- RET
-
-TEXT runtime·thrwakeup(SB),NOSPLIT,$0
- MOVV ident+0(FP), R4 // arg 1 - ident
- MOVW n+8(FP), R5 // arg 2 - n
- MOVV $301, R2 // sys___thrwakeup
- SYSCALL
- MOVW R2, ret+16(FP)
- RET
-
-TEXT runtime·sysctl(SB),NOSPLIT,$0
- MOVV mib+0(FP), R4 // arg 1 - mib
- MOVW miblen+8(FP), R5 // arg 2 - miblen
- MOVV out+16(FP), R6 // arg 3 - out
- MOVV size+24(FP), R7 // arg 4 - size
- MOVV dst+32(FP), R8 // arg 5 - dest
- MOVV ndst+40(FP), R9 // arg 6 - newlen
- MOVV $202, R2 // sys___sysctl
- SYSCALL
- BEQ R7, 2(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, ret+48(FP)
- RET
-
-// int32 runtime·kqueue(void);
-TEXT runtime·kqueue(SB),NOSPLIT,$0
- MOVV $269, R2 // sys_kqueue
- SYSCALL
- BEQ R7, 2(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, ret+0(FP)
- RET
-
-// int32 runtime·kevent(int kq, Kevent *changelist, int nchanges, Kevent *eventlist, int nevents, Timespec *timeout);
-TEXT runtime·kevent(SB),NOSPLIT,$0
- MOVW kq+0(FP), R4 // arg 1 - kq
- MOVV ch+8(FP), R5 // arg 2 - changelist
- MOVW nch+16(FP), R6 // arg 3 - nchanges
- MOVV ev+24(FP), R7 // arg 4 - eventlist
- MOVW nev+32(FP), R8 // arg 5 - nevents
- MOVV ts+40(FP), R9 // arg 6 - timeout
- MOVV $72, R2 // sys_kevent
- SYSCALL
- BEQ R7, 2(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, ret+48(FP)
- RET
-
-// func fcntl(fd, cmd, arg int32) (int32, int32)
-TEXT runtime·fcntl(SB),NOSPLIT,$0
- MOVW fd+0(FP), R4 // fd
- MOVW cmd+4(FP), R5 // cmd
- MOVW arg+8(FP), R6 // arg
- MOVV $92, R2 // sys_fcntl
- SYSCALL
- MOVV $0, R4
- BEQ R7, noerr
- MOVV R2, R4
- MOVW $-1, R2
-noerr:
- MOVW R2, ret+16(FP)
- MOVW R4, errno+20(FP)
- RET
-
-// func issetugid() int32
-TEXT runtime·issetugid(SB),NOSPLIT,$0
- MOVV $253, R2 // sys_issetugid
- SYSCALL
- MOVW R2, ret+0(FP)
- RET
diff --git a/src/runtime/sys_solaris_amd64.s b/src/runtime/sys_solaris_amd64.s
index 7a80020ba3..9235cad391 100644
--- a/src/runtime/sys_solaris_amd64.s
+++ b/src/runtime/sys_solaris_amd64.s
@@ -155,7 +155,7 @@ allgood:
// save m->libcall
MOVQ g_m(R10), BP
- LEAQ m_libcall(BP), R11
+ LEAQ (m_mOS+mOS_libcall)(BP), R11
MOVQ libcall_fn(R11), R10
MOVQ R10, 72(SP)
MOVQ libcall_args(R11), R10
@@ -197,7 +197,7 @@ allgood:
MOVQ g(BX), BP
MOVQ g_m(BP), BP
// restore libcall
- LEAQ m_libcall(BP), R11
+ LEAQ (m_mOS+mOS_libcall)(BP), R11
MOVQ 72(SP), R10
MOVQ R10, libcall_fn(R11)
MOVQ 80(SP), R10
diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s
index e71fda78ae..4030e4c38b 100644
--- a/src/runtime/sys_windows_386.s
+++ b/src/runtime/sys_windows_386.s
@@ -11,49 +11,6 @@
#define TEB_TlsSlots 0xE10
#define TEB_ArbitraryPtr 0x14
-TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0
- JMP runtime·asmstdcall(SB)
-
-// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall(SB),NOSPLIT,$0
- MOVL fn+0(FP), BX
- MOVL SP, BP // save stack pointer
-
- // SetLastError(0).
- MOVL $0, 0x34(FS)
-
- MOVL libcall_n(BX), CX
-
- // Fast version, do not store args on the stack.
- CMPL CX, $0
- JE docall
-
- // Copy args to the stack.
- MOVL CX, AX
- SALL $2, AX
- SUBL AX, SP // room for args
- MOVL SP, DI
- MOVL libcall_args(BX), SI
- CLD
- REP; MOVSL
-
-docall:
- // Call stdcall or cdecl function.
- // DI SI BP BX are preserved, SP is not
- CALL libcall_fn(BX)
- MOVL BP, SP
-
- // Return result.
- MOVL fn+0(FP), BX
- MOVL AX, libcall_r1(BX)
- MOVL DX, libcall_r2(BX)
-
- // GetLastError().
- MOVL 0x34(FS), AX
- MOVL AX, libcall_err(BX)
-
- RET
-
// faster get/set last error
TEXT runtime·getlasterror(SB),NOSPLIT,$0
MOVL 0x34(FS), AX
diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s
index 56a2dc0bcf..e438599910 100644
--- a/src/runtime/sys_windows_amd64.s
+++ b/src/runtime/sys_windows_amd64.s
@@ -12,85 +12,6 @@
#define TEB_TlsSlots 0x1480
#define TEB_ArbitraryPtr 0x28
-TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0
- MOVQ AX, CX
- JMP runtime·asmstdcall(SB)
-
-// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall(SB),NOSPLIT,$16
- MOVQ SP, AX
- ANDQ $~15, SP // alignment as per Windows requirement
- MOVQ AX, 8(SP)
- MOVQ CX, 0(SP) // asmcgocall will put first argument into CX.
-
- MOVQ libcall_fn(CX), AX
- MOVQ libcall_args(CX), SI
- MOVQ libcall_n(CX), CX
-
- // SetLastError(0).
- MOVQ 0x30(GS), DI
- MOVL $0, 0x68(DI)
-
- SUBQ $(const_maxArgs*8), SP // room for args
-
- // Fast version, do not store args on the stack.
- CMPL CX, $0; JE _0args
- CMPL CX, $1; JE _1args
- CMPL CX, $2; JE _2args
- CMPL CX, $3; JE _3args
- CMPL CX, $4; JE _4args
-
- // Check we have enough room for args.
- CMPL CX, $const_maxArgs
- JLE 2(PC)
- INT $3 // not enough room -> crash
-
- // Copy args to the stack.
- MOVQ SP, DI
- CLD
- REP; MOVSQ
- MOVQ SP, SI
-
- // Load first 4 args into correspondent registers.
- // Floating point arguments are passed in the XMM
- // registers. Set them here in case any of the arguments
- // are floating point values. For details see
- // https://learn.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-170
-_4args:
- MOVQ 24(SI), R9
- MOVQ R9, X3
-_3args:
- MOVQ 16(SI), R8
- MOVQ R8, X2
-_2args:
- MOVQ 8(SI), DX
- MOVQ DX, X1
-_1args:
- MOVQ 0(SI), CX
- MOVQ CX, X0
-_0args:
-
- // Call stdcall function.
- CALL AX
-
- ADDQ $(const_maxArgs*8), SP
-
- // Return result.
- MOVQ 0(SP), CX
- MOVQ 8(SP), SP
- MOVQ AX, libcall_r1(CX)
- // Floating point return values are returned in XMM0. Setting r2 to this
- // value in case this call returned a floating point value. For details,
- // see https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
- MOVQ X0, libcall_r2(CX)
-
- // GetLastError().
- MOVQ 0x30(GS), DI
- MOVL 0x68(DI), AX
- MOVQ AX, libcall_err(CX)
-
- RET
-
// faster get/set last error
TEXT runtime·getlasterror(SB),NOSPLIT,$0
MOVQ 0x30(GS), AX
diff --git a/src/runtime/sys_windows_arm.s b/src/runtime/sys_windows_arm.s
index 99f33cf07d..c7f2369e57 100644
--- a/src/runtime/sys_windows_arm.s
+++ b/src/runtime/sys_windows_arm.s
@@ -9,76 +9,6 @@
// Note: For system ABI, R0-R3 are args, R4-R11 are callee-save.
-TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0
- B runtime·asmstdcall(SB)
-
-// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
- MOVM.DB.W [R4, R5, R14], (R13) // push {r4, r5, lr}
- MOVW R0, R4 // put libcall * in r4
- MOVW R13, R5 // save stack pointer in r5
-
- // SetLastError(0)
- MOVW $0, R0
- MRC 15, 0, R1, C13, C0, 2
- MOVW R0, 0x34(R1)
-
- MOVW 8(R4), R12 // libcall->args
-
- // Do we have more than 4 arguments?
- MOVW 4(R4), R0 // libcall->n
- SUB.S $4, R0, R2
- BLE loadregs
-
- // Reserve stack space for remaining args
- SUB R2<<2, R13
- BIC $0x7, R13 // alignment for ABI
-
- // R0: count of arguments
- // R1:
- // R2: loop counter, from 0 to (n-4)
- // R3: scratch
- // R4: pointer to libcall struct
- // R12: libcall->args
- MOVW $0, R2
-stackargs:
- ADD $4, R2, R3 // r3 = args[4 + i]
- MOVW R3<<2(R12), R3
- MOVW R3, R2<<2(R13) // stack[i] = r3
-
- ADD $1, R2 // i++
- SUB $4, R0, R3 // while (i < (n - 4))
- CMP R3, R2
- BLT stackargs
-
-loadregs:
- CMP $3, R0
- MOVW.GT 12(R12), R3
-
- CMP $2, R0
- MOVW.GT 8(R12), R2
-
- CMP $1, R0
- MOVW.GT 4(R12), R1
-
- CMP $0, R0
- MOVW.GT 0(R12), R0
-
- BIC $0x7, R13 // alignment for ABI
- MOVW 0(R4), R12 // branch to libcall->fn
- BL (R12)
-
- MOVW R5, R13 // free stack space
- MOVW R0, 12(R4) // save return value to libcall->r1
- MOVW R1, 16(R4)
-
- // GetLastError
- MRC 15, 0, R1, C13, C0, 2
- MOVW 0x34(R1), R0
- MOVW R0, 20(R4) // store in libcall->err
-
- MOVM.IA.W (R13), [R4, R5, R15]
-
TEXT runtime·getlasterror(SB),NOSPLIT,$0
MRC 15, 0, R0, C13, C0, 2
MOVW 0x34(R0), R0
diff --git a/src/runtime/sys_windows_arm64.s b/src/runtime/sys_windows_arm64.s
index 1f6d411b07..da3cb7e546 100644
--- a/src/runtime/sys_windows_arm64.s
+++ b/src/runtime/sys_windows_arm64.s
@@ -19,88 +19,6 @@
//
// load_g and save_g (in tls_arm64.s) clobber R27 (REGTMP) and R0.
-TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0
- B runtime·asmstdcall(SB)
-
-// void runtime·asmstdcall(void *c);
-TEXT runtime·asmstdcall(SB),NOSPLIT,$16
- STP (R19, R20), 16(RSP) // save old R19, R20
- MOVD R0, R19 // save libcall pointer
- MOVD RSP, R20 // save stack pointer
-
- // SetLastError(0)
- MOVD $0, TEB_error(R18_PLATFORM)
- MOVD libcall_args(R19), R12 // libcall->args
-
- // Do we have more than 8 arguments?
- MOVD libcall_n(R19), R0
- CMP $0, R0; BEQ _0args
- CMP $1, R0; BEQ _1args
- CMP $2, R0; BEQ _2args
- CMP $3, R0; BEQ _3args
- CMP $4, R0; BEQ _4args
- CMP $5, R0; BEQ _5args
- CMP $6, R0; BEQ _6args
- CMP $7, R0; BEQ _7args
- CMP $8, R0; BEQ _8args
-
- // Reserve stack space for remaining args
- SUB $8, R0, R2
- ADD $1, R2, R3 // make even number of words for stack alignment
- AND $~1, R3
- LSL $3, R3
- SUB R3, RSP
-
- // R4: size of stack arguments (n-8)*8
- // R5: &args[8]
- // R6: loop counter, from 0 to (n-8)*8
- // R7: scratch
- // R8: copy of RSP - (R2)(RSP) assembles as (R2)(ZR)
- SUB $8, R0, R4
- LSL $3, R4
- ADD $(8*8), R12, R5
- MOVD $0, R6
- MOVD RSP, R8
-stackargs:
- MOVD (R6)(R5), R7
- MOVD R7, (R6)(R8)
- ADD $8, R6
- CMP R6, R4
- BNE stackargs
-
-_8args:
- MOVD (7*8)(R12), R7
-_7args:
- MOVD (6*8)(R12), R6
-_6args:
- MOVD (5*8)(R12), R5
-_5args:
- MOVD (4*8)(R12), R4
-_4args:
- MOVD (3*8)(R12), R3
-_3args:
- MOVD (2*8)(R12), R2
-_2args:
- MOVD (1*8)(R12), R1
-_1args:
- MOVD (0*8)(R12), R0
-_0args:
-
- MOVD libcall_fn(R19), R12 // branch to libcall->fn
- BL (R12)
-
- MOVD R20, RSP // free stack space
- MOVD R0, libcall_r1(R19) // save return value to libcall->r1
- // TODO(rsc) floating point like amd64 in libcall->r2?
-
- // GetLastError
- MOVD TEB_error(R18_PLATFORM), R0
- MOVD R0, libcall_err(R19)
-
- // Restore callee-saved registers.
- LDP 16(RSP), (R19, R20)
- RET
-
TEXT runtime·getlasterror(SB),NOSPLIT,$0
MOVD TEB_error(R18_PLATFORM), R0
MOVD R0, ret+0(FP)
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
index 85b1b8c902..b3c3d8c0d5 100644
--- a/src/runtime/syscall_windows.go
+++ b/src/runtime/syscall_windows.go
@@ -7,6 +7,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
+ "internal/runtime/syscall/windows"
"unsafe"
)
@@ -103,7 +104,7 @@ func (p *abiDesc) assignArg(t *_type) {
// registers and the stack.
panic("compileCallback: argument size is larger than uintptr")
}
- if k := t.Kind_ & abi.KindMask; GOARCH != "386" && (k == abi.Float32 || k == abi.Float64) {
+ if k := t.Kind(); GOARCH != "386" && (k == abi.Float32 || k == abi.Float64) {
// In fastcall, floating-point arguments in
// the first four positions are passed in
// floating-point registers, which we don't
@@ -174,7 +175,7 @@ func (p *abiDesc) assignArg(t *_type) {
//
// Returns whether the assignment succeeded.
func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool {
- switch k := t.Kind_ & abi.KindMask; k {
+ switch k := t.Kind(); k {
case abi.Bool, abi.Int, abi.Int8, abi.Int16, abi.Int32, abi.Uint, abi.Uint8, abi.Uint16, abi.Uint32, abi.Uintptr, abi.Pointer, abi.UnsafePointer:
// Assign a register for all these types.
return p.assignReg(t.Size_, offset)
@@ -269,7 +270,7 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) {
cdecl = false
}
- if fn._type == nil || (fn._type.Kind_&abi.KindMask) != abi.Func {
+ if fn._type == nil || fn._type.Kind() != abi.Func {
panic("compileCallback: expected function with one uintptr-sized result")
}
ft := (*functype)(unsafe.Pointer(fn._type))
@@ -290,7 +291,7 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) {
if ft.OutSlice()[0].Size_ != goarch.PtrSize {
panic("compileCallback: expected function with one uintptr-sized result")
}
- if k := ft.OutSlice()[0].Kind_ & abi.KindMask; k == abi.Float32 || k == abi.Float64 {
+ if k := ft.OutSlice()[0].Kind(); k == abi.Float32 || k == abi.Float64 {
// In cdecl and stdcall, float results are returned in
// ST(0). In fastcall, they're returned in XMM0.
// Either way, it's not AX.
@@ -411,101 +412,19 @@ func callbackWrap(a *callbackArgs) {
}
}
-const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
-
-//go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary
-func syscall_loadsystemlibrary(filename *uint16) (handle, err uintptr) {
- handle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_LoadLibraryExW)), uintptr(unsafe.Pointer(filename)), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
- KeepAlive(filename)
- if handle != 0 {
- err = 0
- }
- return
-}
-
-// golang.org/x/sys linknames syscall.loadlibrary
-// (in addition to standard package syscall).
-// Do not remove or change the type signature.
-//
-//go:linkname syscall_loadlibrary syscall.loadlibrary
-func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
- handle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_LoadLibraryW)), uintptr(unsafe.Pointer(filename)))
- KeepAlive(filename)
- if handle != 0 {
- err = 0
- }
- return
-}
-
-// golang.org/x/sys linknames syscall.getprocaddress
-// (in addition to standard package syscall).
-// Do not remove or change the type signature.
-//
-//go:linkname syscall_getprocaddress syscall.getprocaddress
-func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) {
- outhandle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_GetProcAddress)), handle, uintptr(unsafe.Pointer(procname)))
- KeepAlive(procname)
- if outhandle != 0 {
- err = 0
- }
- return
-}
-
-//go:linkname syscall_Syscall syscall.Syscall
-//go:nosplit
-func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- return syscall_syscalln(fn, nargs, a1, a2, a3)
-}
-
-//go:linkname syscall_Syscall6 syscall.Syscall6
-//go:nosplit
-func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6)
-}
-
-//go:linkname syscall_Syscall9 syscall.Syscall9
-//go:nosplit
-func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
- return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9)
-}
-
-//go:linkname syscall_Syscall12 syscall.Syscall12
-//go:nosplit
-func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) {
- return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12)
-}
-
-//go:linkname syscall_Syscall15 syscall.Syscall15
-//go:nosplit
-func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
- return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15)
-}
-
-//go:linkname syscall_Syscall18 syscall.Syscall18
-//go:nosplit
-func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) {
- return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18)
-}
-
-// maxArgs should be divisible by 2, as Windows stack
-// must be kept 16-byte aligned on syscall entry.
+// syscall_syscalln calls fn with args[:n].
+// It is used to implement [syscall.SyscallN].
+// It shouldn't be used in the runtime package,
+// use [stdcall] instead.
//
-// Although it only permits maximum 42 parameters, it
-// is arguably large enough.
-const maxArgs = 42
-
-//go:linkname syscall_SyscallN syscall.SyscallN
-//go:nosplit
-func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) {
- return syscall_syscalln(fn, uintptr(len(args)), args...)
-}
-
+//go:linkname syscall_syscalln syscall.syscalln
//go:nosplit
+//go:uintptrkeepalive
func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) {
if n > uintptr(len(args)) {
panic("syscall: n > len(args)") // should not be reachable from user code
}
- if n > maxArgs {
+ if n > windows.MaxArgs {
panic("runtime: SyscallN has too many arguments")
}
@@ -513,15 +432,15 @@ func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) {
// the stack because the stack can move during fn if it
// calls back into Go.
c := &getg().m.winsyscall
- c.fn = fn
- c.n = n
- if c.n != 0 {
- c.args = uintptr(noescape(unsafe.Pointer(&args[0])))
+ c.Fn = fn
+ c.N = n
+ if c.N != 0 {
+ c.Args = uintptr(noescape(unsafe.Pointer(&args[0])))
}
cgocall(asmstdcallAddr, unsafe.Pointer(c))
// cgocall may reschedule us on to a different M,
// but it copies the return values into the new M's
// so we can read them from there.
c = &getg().m.winsyscall
- return c.r1, c.r2, c.err
+ return c.R1, c.R2, c.Err
}
diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go
index ad9bfb464b..6a9b165d62 100644
--- a/src/runtime/syscall_windows_test.go
+++ b/src/runtime/syscall_windows_test.go
@@ -8,6 +8,7 @@ import (
"fmt"
"internal/abi"
"internal/race"
+ "internal/runtime/syscall/windows"
"internal/syscall/windows/sysdll"
"internal/testenv"
"io"
@@ -776,7 +777,7 @@ func TestSyscallN(t *testing.T) {
t.Skipf("skipping test: GOARCH=%s", runtime.GOARCH)
}
- for arglen := 0; arglen <= runtime.MaxArgs; arglen++ {
+ for arglen := 0; arglen <= windows.MaxArgs; arglen++ {
arglen := arglen
t.Run(fmt.Sprintf("arg-%d", arglen), func(t *testing.T) {
t.Parallel()
diff --git a/src/runtime/testdata/testprog/coro.go b/src/runtime/testdata/testprog/coro.go
index 032215b801..5f3d302987 100644
--- a/src/runtime/testdata/testprog/coro.go
+++ b/src/runtime/testdata/testprog/coro.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.rangefunc
-
package main
import (
diff --git a/src/runtime/testdata/testprog/gc.go b/src/runtime/testdata/testprog/gc.go
index 5dc85fbb62..bbe1453401 100644
--- a/src/runtime/testdata/testprog/gc.go
+++ b/src/runtime/testdata/testprog/gc.go
@@ -395,6 +395,9 @@ func gcMemoryLimit(gcPercent int) {
// somewhat heavily here) this bound is kept loose. In practice the Go runtime
// should do considerably better than this bound.
bound := int64(myLimit + 16<<20)
+ if runtime.GOOS == "darwin" {
+ bound += 16 << 20 // Be more lax on Darwin, see issue 73136.
+ }
start := time.Now()
for time.Since(start) < 200*time.Millisecond {
metrics.Read(m[:])
diff --git a/src/runtime/testdata/testprogcgo/coro.go b/src/runtime/testdata/testprogcgo/coro.go
index e0cb945112..93be92cb7a 100644
--- a/src/runtime/testdata/testprogcgo/coro.go
+++ b/src/runtime/testdata/testprogcgo/coro.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build goexperiment.rangefunc && !windows
+//go:build !windows
package main
diff --git a/src/runtime/traceallocfree.go b/src/runtime/traceallocfree.go
index 70e48ea3a6..b1b6c63462 100644
--- a/src/runtime/traceallocfree.go
+++ b/src/runtime/traceallocfree.go
@@ -37,7 +37,7 @@ func traceSnapshotMemory(gen uintptr) {
}
// Emit info.
- w.varint(uint64(trace.minPageHeapAddr))
+ w.varint(trace.minPageHeapAddr)
w.varint(uint64(pageSize))
w.varint(uint64(gc.MinHeapAlign))
w.varint(uint64(fixedStack))
diff --git a/src/runtime/tracebuf.go b/src/runtime/tracebuf.go
index 08a1d46838..5adaede424 100644
--- a/src/runtime/tracebuf.go
+++ b/src/runtime/tracebuf.go
@@ -183,7 +183,7 @@ func (w traceWriter) refill() traceWriter {
// Tolerate a nil mp.
mID := ^uint64(0)
if w.mp != nil {
- mID = uint64(w.mp.procid)
+ mID = w.mp.procid
}
// Write the buffer's header.
@@ -194,7 +194,7 @@ func (w traceWriter) refill() traceWriter {
w.byte(byte(w.exp))
}
w.varint(uint64(w.gen))
- w.varint(uint64(mID))
+ w.varint(mID)
w.varint(uint64(ts))
w.traceBuf.lenPos = w.varintReserve()
return w
diff --git a/src/runtime/tracecpu.go b/src/runtime/tracecpu.go
index 092c707f83..e64ca32cdf 100644
--- a/src/runtime/tracecpu.go
+++ b/src/runtime/tracecpu.go
@@ -258,7 +258,7 @@ func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
if gp != nil {
hdr[1] = gp.goid
}
- hdr[2] = uint64(mp.procid)
+ hdr[2] = mp.procid
// Allow only one writer at a time
for !trace.signalLock.CompareAndSwap(0, 1) {
diff --git a/src/runtime/traceevent.go b/src/runtime/traceevent.go
index 263847be2e..b0bc4c017d 100644
--- a/src/runtime/traceevent.go
+++ b/src/runtime/traceevent.go
@@ -42,7 +42,7 @@ func (tl traceLocker) eventWriter(goStatus tracev2.GoStatus, procStatus tracev2.
tl.writer().writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep).end()
}
if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
- tl.writer().writeGoStatus(uint64(gp.goid), int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end()
+ tl.writer().writeGoStatus(gp.goid, int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end()
}
return traceEventWriter{tl}
}
diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go
index a2775a3427..06e36fd802 100644
--- a/src/runtime/traceruntime.go
+++ b/src/runtime/traceruntime.go
@@ -457,7 +457,7 @@ func (tl traceLocker) GoPreempt() {
// GoStop emits a GoStop event with the provided reason.
func (tl traceLocker) GoStop(reason traceGoStopReason) {
- tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(0))
+ tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, trace.goStopReasons[tl.gen%2][reason], tl.stack(0))
}
// GoPark emits a GoBlock event with the provided reason.
@@ -465,7 +465,7 @@ func (tl traceLocker) GoStop(reason traceGoStopReason) {
// TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
// that we have both, and waitReason is way more descriptive.
func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
- tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
+ tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, trace.goBlockReasons[tl.gen%2][reason], tl.stack(skip))
}
// GoUnpark emits a GoUnblock event.
diff --git a/src/runtime/tracestack.go b/src/runtime/tracestack.go
index 76d6b05048..51f3c29445 100644
--- a/src/runtime/tracestack.go
+++ b/src/runtime/tracestack.go
@@ -190,7 +190,7 @@ func dumpStacksRec(node *traceMapNode, w traceWriter, stackBuf []uintptr) traceW
// Emit stack event.
w.byte(byte(tracev2.EvStack))
- w.varint(uint64(node.id))
+ w.varint(node.id)
w.varint(uint64(len(frames)))
for _, frame := range frames {
w.varint(uint64(frame.PC))
diff --git a/src/runtime/tracetype.go b/src/runtime/tracetype.go
index f54f812578..613fc88202 100644
--- a/src/runtime/tracetype.go
+++ b/src/runtime/tracetype.go
@@ -64,7 +64,7 @@ func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter {
}
// Emit type.
- w.varint(uint64(node.id))
+ w.varint(node.id)
w.varint(uint64(uintptr(unsafe.Pointer(typ))))
w.varint(uint64(typ.Size()))
w.varint(uint64(typ.PtrBytes))
diff --git a/src/runtime/type.go b/src/runtime/type.go
index c11c866cd8..9009119464 100644
--- a/src/runtime/type.go
+++ b/src/runtime/type.go
@@ -9,7 +9,6 @@ package runtime
import (
"internal/abi"
"internal/goarch"
- "internal/goexperiment"
"internal/runtime/atomic"
"unsafe"
)
@@ -69,7 +68,7 @@ func (t rtype) pkgpath() string {
if u := t.uncommon(); u != nil {
return t.nameOff(u.PkgPath).Name()
}
- switch t.Kind_ & abi.KindMask {
+ switch t.Kind() {
case abi.Struct:
st := (*structtype)(unsafe.Pointer(t.Type))
return st.PkgPath.Name()
@@ -522,8 +521,8 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
if t == v {
return true
}
- kind := t.Kind_ & abi.KindMask
- if kind != v.Kind_&abi.KindMask {
+ kind := t.Kind()
+ if kind != v.Kind() {
return false
}
rt, rv := toRType(t), toRType(v)
@@ -605,13 +604,8 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool {
}
return true
case abi.Map:
- if goexperiment.SwissMap {
- mt := (*abi.SwissMapType)(unsafe.Pointer(t))
- mv := (*abi.SwissMapType)(unsafe.Pointer(v))
- return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
- }
- mt := (*abi.OldMapType)(unsafe.Pointer(t))
- mv := (*abi.OldMapType)(unsafe.Pointer(v))
+ mt := (*abi.MapType)(unsafe.Pointer(t))
+ mv := (*abi.MapType)(unsafe.Pointer(v))
return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen)
case abi.Pointer:
pt := (*ptrtype)(unsafe.Pointer(t))
diff --git a/src/runtime/typekind.go b/src/runtime/typekind.go
deleted file mode 100644
index 4920a7cf14..0000000000
--- a/src/runtime/typekind.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "internal/abi"
-
-// isDirectIface reports whether t is stored directly in an interface value.
-func isDirectIface(t *_type) bool {
- return t.Kind_&abi.KindDirectIface != 0
-}
diff --git a/src/runtime/vdso_test.go b/src/runtime/vdso_test.go
index b0f5fbe728..cb70a040d6 100644
--- a/src/runtime/vdso_test.go
+++ b/src/runtime/vdso_test.go
@@ -62,7 +62,7 @@ func TestUsingVDSO(t *testing.T) {
t.Logf("%s", out)
}
if err != nil {
- if err := err.(*exec.ExitError); err != nil && err.Sys().(syscall.WaitStatus).Signaled() {
+ if err, ok := err.(*exec.ExitError); ok && err.Sys().(syscall.WaitStatus).Signaled() {
if !bytes.Contains(out, []byte("+++ killed by")) {
// strace itself occasionally crashes.
// Here, it exited with a signal, but
diff --git a/src/sync/hashtriemap.go b/src/sync/hashtriemap.go
deleted file mode 100644
index ce30f590bb..0000000000
--- a/src/sync/hashtriemap.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.synchashtriemap
-
-package sync
-
-import (
- isync "internal/sync"
-)
-
-// Map is like a Go map[any]any but is safe for concurrent use
-// by multiple goroutines without additional locking or coordination.
-// Loads, stores, and deletes run in amortized constant time.
-//
-// The Map type is specialized. Most code should use a plain Go map instead,
-// with separate locking or coordination, for better type safety and to make it
-// easier to maintain other invariants along with the map content.
-//
-// The Map type is optimized for two common use cases: (1) when the entry for a given
-// key is only ever written once but read many times, as in caches that only grow,
-// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
-// sets of keys. In these two cases, use of a Map may significantly reduce lock
-// contention compared to a Go map paired with a separate [Mutex] or [RWMutex].
-//
-// The zero Map is empty and ready for use. A Map must not be copied after first use.
-//
-// In the terminology of [the Go memory model], Map arranges that a write operation
-// “synchronizes before” any read operation that observes the effect of the write, where
-// read and write operations are defined as follows.
-// [Map.Load], [Map.LoadAndDelete], [Map.LoadOrStore], [Map.Swap], [Map.CompareAndSwap],
-// and [Map.CompareAndDelete] are read operations;
-// [Map.Delete], [Map.LoadAndDelete], [Map.Store], and [Map.Swap] are write operations;
-// [Map.LoadOrStore] is a write operation when it returns loaded set to false;
-// [Map.CompareAndSwap] is a write operation when it returns swapped set to true;
-// and [Map.CompareAndDelete] is a write operation when it returns deleted set to true.
-//
-// [the Go memory model]: https://go.dev/ref/mem
-type Map struct {
- _ noCopy
-
- m isync.HashTrieMap[any, any]
-}
-
-// Load returns the value stored in the map for a key, or nil if no
-// value is present.
-// The ok result indicates whether value was found in the map.
-func (m *Map) Load(key any) (value any, ok bool) {
- return m.m.Load(key)
-}
-
-// Store sets the value for a key.
-func (m *Map) Store(key, value any) {
- m.m.Store(key, value)
-}
-
-// Clear deletes all the entries, resulting in an empty Map.
-func (m *Map) Clear() {
- m.m.Clear()
-}
-
-// LoadOrStore returns the existing value for the key if present.
-// Otherwise, it stores and returns the given value.
-// The loaded result is true if the value was loaded, false if stored.
-func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) {
- return m.m.LoadOrStore(key, value)
-}
-
-// LoadAndDelete deletes the value for a key, returning the previous value if any.
-// The loaded result reports whether the key was present.
-func (m *Map) LoadAndDelete(key any) (value any, loaded bool) {
- return m.m.LoadAndDelete(key)
-}
-
-// Delete deletes the value for a key.
-// If the key is not in the map, Delete does nothing.
-func (m *Map) Delete(key any) {
- m.m.Delete(key)
-}
-
-// Swap swaps the value for a key and returns the previous value if any.
-// The loaded result reports whether the key was present.
-func (m *Map) Swap(key, value any) (previous any, loaded bool) {
- return m.m.Swap(key, value)
-}
-
-// CompareAndSwap swaps the old and new values for key
-// if the value stored in the map is equal to old.
-// The old value must be of a comparable type.
-func (m *Map) CompareAndSwap(key, old, new any) (swapped bool) {
- return m.m.CompareAndSwap(key, old, new)
-}
-
-// CompareAndDelete deletes the entry for key if its value is equal to old.
-// The old value must be of a comparable type.
-//
-// If there is no current value for key in the map, CompareAndDelete
-// returns false (even if the old value is the nil interface value).
-func (m *Map) CompareAndDelete(key, old any) (deleted bool) {
- return m.m.CompareAndDelete(key, old)
-}
-
-// Range calls f sequentially for each key and value present in the map.
-// If f returns false, range stops the iteration.
-//
-// Range does not necessarily correspond to any consistent snapshot of the Map's
-// contents: no key will be visited more than once, but if the value for any key
-// is stored or deleted concurrently (including by f), Range may reflect any
-// mapping for that key from any point during the Range call. Range does not
-// block other methods on the receiver; even f itself may call any method on m.
-//
-// Range may be O(N) with the number of elements in the map even if f returns
-// false after a constant number of calls.
-func (m *Map) Range(f func(key, value any) bool) {
- m.m.Range(f)
-}
diff --git a/src/sync/map.go b/src/sync/map.go
index 25181e0c78..934a651117 100644
--- a/src/sync/map.go
+++ b/src/sync/map.go
@@ -1,13 +1,11 @@
-// Copyright 2016 The Go Authors. All rights reserved.
+// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !goexperiment.synchashtriemap
-
package sync
import (
- "sync/atomic"
+ isync "internal/sync"
)
// Map is like a Go map[any]any but is safe for concurrent use
@@ -40,390 +38,56 @@ import (
type Map struct {
_ noCopy
- mu Mutex
-
- // read contains the portion of the map's contents that are safe for
- // concurrent access (with or without mu held).
- //
- // The read field itself is always safe to load, but must only be stored with
- // mu held.
- //
- // Entries stored in read may be updated concurrently without mu, but updating
- // a previously-expunged entry requires that the entry be copied to the dirty
- // map and unexpunged with mu held.
- read atomic.Pointer[readOnly]
-
- // dirty contains the portion of the map's contents that require mu to be
- // held. To ensure that the dirty map can be promoted to the read map quickly,
- // it also includes all of the non-expunged entries in the read map.
- //
- // Expunged entries are not stored in the dirty map. An expunged entry in the
- // clean map must be unexpunged and added to the dirty map before a new value
- // can be stored to it.
- //
- // If the dirty map is nil, the next write to the map will initialize it by
- // making a shallow copy of the clean map, omitting stale entries.
- dirty map[any]*entry
-
- // misses counts the number of loads since the read map was last updated that
- // needed to lock mu to determine whether the key was present.
- //
- // Once enough misses have occurred to cover the cost of copying the dirty
- // map, the dirty map will be promoted to the read map (in the unamended
- // state) and the next store to the map will make a new dirty copy.
- misses int
-}
-
-// readOnly is an immutable struct stored atomically in the Map.read field.
-type readOnly struct {
- m map[any]*entry
- amended bool // true if the dirty map contains some key not in m.
-}
-
-// expunged is an arbitrary pointer that marks entries which have been deleted
-// from the dirty map.
-var expunged = new(any)
-
-// An entry is a slot in the map corresponding to a particular key.
-type entry struct {
- // p points to the interface{} value stored for the entry.
- //
- // If p == nil, the entry has been deleted, and either m.dirty == nil or
- // m.dirty[key] is e.
- //
- // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
- // is missing from m.dirty.
- //
- // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
- // != nil, in m.dirty[key].
- //
- // An entry can be deleted by atomic replacement with nil: when m.dirty is
- // next created, it will atomically replace nil with expunged and leave
- // m.dirty[key] unset.
- //
- // An entry's associated value can be updated by atomic replacement, provided
- // p != expunged. If p == expunged, an entry's associated value can be updated
- // only after first setting m.dirty[key] = e so that lookups using the dirty
- // map find the entry.
- p atomic.Pointer[any]
-}
-
-func newEntry(i any) *entry {
- e := &entry{}
- e.p.Store(&i)
- return e
-}
-
-func (m *Map) loadReadOnly() readOnly {
- if p := m.read.Load(); p != nil {
- return *p
- }
- return readOnly{}
+ m isync.HashTrieMap[any, any]
}
// Load returns the value stored in the map for a key, or nil if no
// value is present.
// The ok result indicates whether value was found in the map.
func (m *Map) Load(key any) (value any, ok bool) {
- read := m.loadReadOnly()
- e, ok := read.m[key]
- if !ok && read.amended {
- m.mu.Lock()
- // Avoid reporting a spurious miss if m.dirty got promoted while we were
- // blocked on m.mu. (If further loads of the same key will not miss, it's
- // not worth copying the dirty map for this key.)
- read = m.loadReadOnly()
- e, ok = read.m[key]
- if !ok && read.amended {
- e, ok = m.dirty[key]
- // Regardless of whether the entry was present, record a miss: this key
- // will take the slow path until the dirty map is promoted to the read
- // map.
- m.missLocked()
- }
- m.mu.Unlock()
- }
- if !ok {
- return nil, false
- }
- return e.load()
-}
-
-func (e *entry) load() (value any, ok bool) {
- p := e.p.Load()
- if p == nil || p == expunged {
- return nil, false
- }
- return *p, true
+ return m.m.Load(key)
}
// Store sets the value for a key.
func (m *Map) Store(key, value any) {
- _, _ = m.Swap(key, value)
+ m.m.Store(key, value)
}
// Clear deletes all the entries, resulting in an empty Map.
func (m *Map) Clear() {
- read := m.loadReadOnly()
- if len(read.m) == 0 && !read.amended {
- // Avoid allocating a new readOnly when the map is already clear.
- return
- }
-
- m.mu.Lock()
- defer m.mu.Unlock()
-
- read = m.loadReadOnly()
- if len(read.m) > 0 || read.amended {
- m.read.Store(&readOnly{})
- }
-
- clear(m.dirty)
- // Don't immediately promote the newly-cleared dirty map on the next operation.
- m.misses = 0
-}
-
-// tryCompareAndSwap compare the entry with the given old value and swaps
-// it with a new value if the entry is equal to the old value, and the entry
-// has not been expunged.
-//
-// If the entry is expunged, tryCompareAndSwap returns false and leaves
-// the entry unchanged.
-func (e *entry) tryCompareAndSwap(old, new any) bool {
- p := e.p.Load()
- if p == nil || p == expunged || *p != old {
- return false
- }
-
- // Copy the interface after the first load to make this method more amenable
- // to escape analysis: if the comparison fails from the start, we shouldn't
- // bother heap-allocating an interface value to store.
- nc := new
- for {
- if e.p.CompareAndSwap(p, &nc) {
- return true
- }
- p = e.p.Load()
- if p == nil || p == expunged || *p != old {
- return false
- }
- }
-}
-
-// unexpungeLocked ensures that the entry is not marked as expunged.
-//
-// If the entry was previously expunged, it must be added to the dirty map
-// before m.mu is unlocked.
-func (e *entry) unexpungeLocked() (wasExpunged bool) {
- return e.p.CompareAndSwap(expunged, nil)
-}
-
-// swapLocked unconditionally swaps a value into the entry.
-//
-// The entry must be known not to be expunged.
-func (e *entry) swapLocked(i *any) *any {
- return e.p.Swap(i)
+ m.m.Clear()
}
// LoadOrStore returns the existing value for the key if present.
// Otherwise, it stores and returns the given value.
// The loaded result is true if the value was loaded, false if stored.
func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) {
- // Avoid locking if it's a clean hit.
- read := m.loadReadOnly()
- if e, ok := read.m[key]; ok {
- actual, loaded, ok := e.tryLoadOrStore(value)
- if ok {
- return actual, loaded
- }
- }
-
- m.mu.Lock()
- read = m.loadReadOnly()
- if e, ok := read.m[key]; ok {
- if e.unexpungeLocked() {
- m.dirty[key] = e
- }
- actual, loaded, _ = e.tryLoadOrStore(value)
- } else if e, ok := m.dirty[key]; ok {
- actual, loaded, _ = e.tryLoadOrStore(value)
- m.missLocked()
- } else {
- if !read.amended {
- // We're adding the first new key to the dirty map.
- // Make sure it is allocated and mark the read-only map as incomplete.
- m.dirtyLocked()
- m.read.Store(&readOnly{m: read.m, amended: true})
- }
- m.dirty[key] = newEntry(value)
- actual, loaded = value, false
- }
- m.mu.Unlock()
-
- return actual, loaded
-}
-
-// tryLoadOrStore atomically loads or stores a value if the entry is not
-// expunged.
-//
-// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
-// returns with ok==false.
-func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) {
- p := e.p.Load()
- if p == expunged {
- return nil, false, false
- }
- if p != nil {
- return *p, true, true
- }
-
- // Copy the interface after the first load to make this method more amenable
- // to escape analysis: if we hit the "load" path or the entry is expunged, we
- // shouldn't bother heap-allocating.
- ic := i
- for {
- if e.p.CompareAndSwap(nil, &ic) {
- return i, false, true
- }
- p = e.p.Load()
- if p == expunged {
- return nil, false, false
- }
- if p != nil {
- return *p, true, true
- }
- }
+ return m.m.LoadOrStore(key, value)
}
// LoadAndDelete deletes the value for a key, returning the previous value if any.
// The loaded result reports whether the key was present.
func (m *Map) LoadAndDelete(key any) (value any, loaded bool) {
- read := m.loadReadOnly()
- e, ok := read.m[key]
- if !ok && read.amended {
- m.mu.Lock()
- read = m.loadReadOnly()
- e, ok = read.m[key]
- if !ok && read.amended {
- e, ok = m.dirty[key]
- delete(m.dirty, key)
- // Regardless of whether the entry was present, record a miss: this key
- // will take the slow path until the dirty map is promoted to the read
- // map.
- m.missLocked()
- }
- m.mu.Unlock()
- }
- if ok {
- return e.delete()
- }
- return nil, false
+ return m.m.LoadAndDelete(key)
}
// Delete deletes the value for a key.
+// If the key is not in the map, Delete does nothing.
func (m *Map) Delete(key any) {
- m.LoadAndDelete(key)
-}
-
-func (e *entry) delete() (value any, ok bool) {
- for {
- p := e.p.Load()
- if p == nil || p == expunged {
- return nil, false
- }
- if e.p.CompareAndSwap(p, nil) {
- return *p, true
- }
- }
-}
-
-// trySwap swaps a value if the entry has not been expunged.
-//
-// If the entry is expunged, trySwap returns false and leaves the entry
-// unchanged.
-func (e *entry) trySwap(i *any) (*any, bool) {
- for {
- p := e.p.Load()
- if p == expunged {
- return nil, false
- }
- if e.p.CompareAndSwap(p, i) {
- return p, true
- }
- }
+ m.m.Delete(key)
}
// Swap swaps the value for a key and returns the previous value if any.
// The loaded result reports whether the key was present.
func (m *Map) Swap(key, value any) (previous any, loaded bool) {
- read := m.loadReadOnly()
- if e, ok := read.m[key]; ok {
- if v, ok := e.trySwap(&value); ok {
- if v == nil {
- return nil, false
- }
- return *v, true
- }
- }
-
- m.mu.Lock()
- read = m.loadReadOnly()
- if e, ok := read.m[key]; ok {
- if e.unexpungeLocked() {
- // The entry was previously expunged, which implies that there is a
- // non-nil dirty map and this entry is not in it.
- m.dirty[key] = e
- }
- if v := e.swapLocked(&value); v != nil {
- loaded = true
- previous = *v
- }
- } else if e, ok := m.dirty[key]; ok {
- if v := e.swapLocked(&value); v != nil {
- loaded = true
- previous = *v
- }
- } else {
- if !read.amended {
- // We're adding the first new key to the dirty map.
- // Make sure it is allocated and mark the read-only map as incomplete.
- m.dirtyLocked()
- m.read.Store(&readOnly{m: read.m, amended: true})
- }
- m.dirty[key] = newEntry(value)
- }
- m.mu.Unlock()
- return previous, loaded
+ return m.m.Swap(key, value)
}
// CompareAndSwap swaps the old and new values for key
// if the value stored in the map is equal to old.
// The old value must be of a comparable type.
func (m *Map) CompareAndSwap(key, old, new any) (swapped bool) {
- read := m.loadReadOnly()
- if e, ok := read.m[key]; ok {
- return e.tryCompareAndSwap(old, new)
- } else if !read.amended {
- return false // No existing value for key.
- }
-
- m.mu.Lock()
- defer m.mu.Unlock()
- read = m.loadReadOnly()
- swapped = false
- if e, ok := read.m[key]; ok {
- swapped = e.tryCompareAndSwap(old, new)
- } else if e, ok := m.dirty[key]; ok {
- swapped = e.tryCompareAndSwap(old, new)
- // We needed to lock mu in order to load the entry for key,
- // and the operation didn't change the set of keys in the map
- // (so it would be made more efficient by promoting the dirty
- // map to read-only).
- // Count it as a miss so that we will eventually switch to the
- // more efficient steady state.
- m.missLocked()
- }
- return swapped
+ return m.m.CompareAndSwap(key, old, new)
}
// CompareAndDelete deletes the entry for key if its value is equal to old.
@@ -432,35 +96,7 @@ func (m *Map) CompareAndSwap(key, old, new any) (swapped bool) {
// If there is no current value for key in the map, CompareAndDelete
// returns false (even if the old value is the nil interface value).
func (m *Map) CompareAndDelete(key, old any) (deleted bool) {
- read := m.loadReadOnly()
- e, ok := read.m[key]
- if !ok && read.amended {
- m.mu.Lock()
- read = m.loadReadOnly()
- e, ok = read.m[key]
- if !ok && read.amended {
- e, ok = m.dirty[key]
- // Don't delete key from m.dirty: we still need to do the “compare” part
- // of the operation. The entry will eventually be expunged when the
- // dirty map is promoted to the read map.
- //
- // Regardless of whether the entry was present, record a miss: this key
- // will take the slow path until the dirty map is promoted to the read
- // map.
- m.missLocked()
- }
- m.mu.Unlock()
- }
- for ok {
- p := e.p.Load()
- if p == nil || p == expunged || *p != old {
- return false
- }
- if e.p.CompareAndSwap(p, nil) {
- return true
- }
- }
- return false
+ return m.m.CompareAndDelete(key, old)
}
// Range calls f sequentially for each key and value present in the map.
@@ -475,70 +111,5 @@ func (m *Map) CompareAndDelete(key, old any) (deleted bool) {
// Range may be O(N) with the number of elements in the map even if f returns
// false after a constant number of calls.
func (m *Map) Range(f func(key, value any) bool) {
- // We need to be able to iterate over all of the keys that were already
- // present at the start of the call to Range.
- // If read.amended is false, then read.m satisfies that property without
- // requiring us to hold m.mu for a long time.
- read := m.loadReadOnly()
- if read.amended {
- // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
- // (assuming the caller does not break out early), so a call to Range
- // amortizes an entire copy of the map: we can promote the dirty copy
- // immediately!
- m.mu.Lock()
- read = m.loadReadOnly()
- if read.amended {
- read = readOnly{m: m.dirty}
- copyRead := read
- m.read.Store(&copyRead)
- m.dirty = nil
- m.misses = 0
- }
- m.mu.Unlock()
- }
-
- for k, e := range read.m {
- v, ok := e.load()
- if !ok {
- continue
- }
- if !f(k, v) {
- break
- }
- }
-}
-
-func (m *Map) missLocked() {
- m.misses++
- if m.misses < len(m.dirty) {
- return
- }
- m.read.Store(&readOnly{m: m.dirty})
- m.dirty = nil
- m.misses = 0
-}
-
-func (m *Map) dirtyLocked() {
- if m.dirty != nil {
- return
- }
-
- read := m.loadReadOnly()
- m.dirty = make(map[any]*entry, len(read.m))
- for k, e := range read.m {
- if !e.tryExpungeLocked() {
- m.dirty[k] = e
- }
- }
-}
-
-func (e *entry) tryExpungeLocked() (isExpunged bool) {
- p := e.p.Load()
- for p == nil {
- if e.p.CompareAndSwap(nil, expunged) {
- return true
- }
- p = e.p.Load()
- }
- return p == expunged
+ m.m.Range(f)
}
diff --git a/src/sync/map_test.go b/src/sync/map_test.go
index 05c81354c8..0d6690c746 100644
--- a/src/sync/map_test.go
+++ b/src/sync/map_test.go
@@ -161,7 +161,7 @@ func TestConcurrentRange(t *testing.T) {
m := new(sync.Map)
for n := int64(1); n <= mapSize; n++ {
- m.Store(n, int64(n))
+ m.Store(n, n)
}
done := make(chan struct{})
diff --git a/src/syscall/dirent.go b/src/syscall/dirent.go
index f6e78d9bb5..c12b119335 100644
--- a/src/syscall/dirent.go
+++ b/src/syscall/dirent.go
@@ -33,7 +33,7 @@ func readIntBE(b []byte, size uintptr) uint64 {
case 4:
return uint64(byteorder.BEUint32(b))
case 8:
- return uint64(byteorder.BEUint64(b))
+ return byteorder.BEUint64(b)
default:
panic("syscall: readInt with unsupported size")
}
@@ -48,7 +48,7 @@ func readIntLE(b []byte, size uintptr) uint64 {
case 4:
return uint64(byteorder.LEUint32(b))
case 8:
- return uint64(byteorder.LEUint64(b))
+ return byteorder.LEUint64(b)
default:
panic("syscall: readInt with unsupported size")
}
diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go
index a7873e6ad8..9d77986953 100644
--- a/src/syscall/dll_windows.go
+++ b/src/syscall/dll_windows.go
@@ -11,6 +11,15 @@ import (
"unsafe"
)
+// Use double underscore to avoid name collision autogenerated functions.
+//go:cgo_import_dynamic syscall.__LoadLibraryExW LoadLibraryExW%3 "kernel32.dll"
+//go:cgo_import_dynamic syscall.__GetProcAddress GetProcAddress%2 "kernel32.dll"
+
+var (
+ __LoadLibraryExW unsafe.Pointer
+ __GetProcAddress unsafe.Pointer
+)
+
// DLLError describes reasons for DLL load failures.
type DLLError struct {
Err error
@@ -22,31 +31,111 @@ func (e *DLLError) Error() string { return e.Msg }
func (e *DLLError) Unwrap() error { return e.Err }
-// Implemented in ../runtime/syscall_windows.go.
+// N.B. For the Syscall functions below:
+//
+// //go:uintptrkeepalive because the uintptr argument may be converted pointers
+// that need to be kept alive in the caller.
+//
+// //go:nosplit because stack copying does not account for uintptrkeepalive, so
+// the stack must not grow. Stack copying cannot blindly assume that all
+// uintptr arguments are pointers, because some values may look like pointers,
+// but not really be pointers, and adjusting their value would break the call.
// Deprecated: Use [SyscallN] instead.
-func Syscall(trap, nargs, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
+//
+//go:nosplit
+//go:uintptrkeepalive
+func Syscall(trap, nargs, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) {
+ return syscalln(trap, nargs, a1, a2, a3)
+}
// Deprecated: Use [SyscallN] instead.
-func Syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
+//
+//go:nosplit
+//go:uintptrkeepalive
+func Syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) {
+ return syscalln(trap, nargs, a1, a2, a3, a4, a5, a6)
+}
// Deprecated: Use [SyscallN] instead.
-func Syscall9(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno)
+//
+//go:nosplit
+//go:uintptrkeepalive
+func Syscall9(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) {
+ return syscalln(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+}
// Deprecated: Use [SyscallN] instead.
-func Syscall12(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2 uintptr, err Errno)
+//
+//go:nosplit
+//go:uintptrkeepalive
+func Syscall12(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2 uintptr, err Errno) {
+ return syscalln(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12)
+}
// Deprecated: Use [SyscallN] instead.
-func Syscall15(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2 uintptr, err Errno)
+//
+//go:nosplit
+//go:uintptrkeepalive
+func Syscall15(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2 uintptr, err Errno) {
+ return syscalln(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15)
+}
// Deprecated: Use [SyscallN] instead.
-func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno)
+//
+//go:nosplit
+//go:uintptrkeepalive
+func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno) {
+ return syscalln(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18)
+}
+// SyscallN executes procedure p with arguments args.
+//
+// See [Proc.Call] for more information.
+//
+//go:nosplit
+//go:uintptrkeepalive
+func SyscallN(p uintptr, args ...uintptr) (r1, r2 uintptr, err Errno) {
+ return syscalln(p, uintptr(len(args)), args...)
+}
+
+// syscalln is implemented in runtime/syscall_windows.go.
+//
//go:noescape
-func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno)
-func loadlibrary(filename *uint16) (handle uintptr, err Errno)
-func loadsystemlibrary(filename *uint16) (handle uintptr, err Errno)
-func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err Errno)
+func syscalln(fn, n uintptr, args ...uintptr) (r1, r2 uintptr, err Errno)
+
+// N.B. For the loadlibrary, loadlibrary, and getprocaddress functions below:
+//
+// //go:linkname to act as an allowlist for linker's -checklinkname, as
+// golang.org/x/sys/windows linknames these functions.
+
+//go:linkname loadlibrary
+func loadlibrary(filename *uint16) (uintptr, Errno) {
+ handle, _, err := SyscallN(uintptr(__LoadLibraryExW), uintptr(unsafe.Pointer(filename)), 0, 0)
+ if handle != 0 {
+ err = 0
+ }
+ return handle, err
+}
+
+//go:linkname loadsystemlibrary
+func loadsystemlibrary(filename *uint16) (uintptr, Errno) {
+ const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
+ handle, _, err := SyscallN(uintptr(__LoadLibraryExW), uintptr(unsafe.Pointer(filename)), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32)
+ if handle != 0 {
+ err = 0
+ }
+ return handle, err
+}
+
+//go:linkname getprocaddress
+func getprocaddress(handle uintptr, procname *uint8) (uintptr, Errno) {
+ proc, _, err := SyscallN(uintptr(__GetProcAddress), handle, uintptr(unsafe.Pointer(procname)))
+ if proc != 0 {
+ err = 0
+ }
+ return proc, err
+}
// A DLL implements access to a single DLL.
type DLL struct {
diff --git a/src/syscall/syscall_linux.go b/src/syscall/syscall_linux.go
index d733ca9bf9..ec9f771daa 100644
--- a/src/syscall/syscall_linux.go
+++ b/src/syscall/syscall_linux.go
@@ -13,7 +13,7 @@ package syscall
import (
"internal/itoa"
- runtimesyscall "internal/runtime/syscall"
+ "internal/runtime/syscall/linux"
"runtime"
"slices"
"unsafe"
@@ -62,7 +62,7 @@ func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) {
//go:linkname RawSyscall6
func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) {
var errno uintptr
- r1, r2, errno = runtimesyscall.Syscall6(trap, a1, a2, a3, a4, a5, a6)
+ r1, r2, errno = linux.Syscall6(trap, a1, a2, a3, a4, a5, a6)
err = Errno(errno)
return
}
diff --git a/src/time/example_test.go b/src/time/example_test.go
index eeadcdb1c1..05eac86738 100644
--- a/src/time/example_test.go
+++ b/src/time/example_test.go
@@ -735,8 +735,8 @@ func ExampleTime_String() {
timeWithoutNanoseconds := time.Date(2000, 2, 1, 12, 13, 14, 0, time.UTC)
withoutNanoseconds := timeWithoutNanoseconds.String()
- fmt.Printf("withNanoseconds = %v\n", string(withNanoseconds))
- fmt.Printf("withoutNanoseconds = %v\n", string(withoutNanoseconds))
+ fmt.Printf("withNanoseconds = %v\n", withNanoseconds)
+ fmt.Printf("withoutNanoseconds = %v\n", withoutNanoseconds)
// Output:
// withNanoseconds = 2000-02-01 12:13:14.000000015 +0000 UTC
diff --git a/src/time/sleep.go b/src/time/sleep.go
index e9cd483be5..4b7750eb94 100644
--- a/src/time/sleep.go
+++ b/src/time/sleep.go
@@ -142,7 +142,7 @@ func (t *Timer) Stop() bool {
// in Go 1.27 or later.
func NewTimer(d Duration) *Timer {
c := make(chan Time, 1)
- t := (*Timer)(newTimer(when(d), 0, sendTime, c, syncTimer(c)))
+ t := newTimer(when(d), 0, sendTime, c, syncTimer(c))
t.C = c
return t
}
@@ -208,7 +208,7 @@ func After(d Duration) <-chan Time {
// be used to cancel the call using its Stop method.
// The returned Timer's C field is not used and will be nil.
func AfterFunc(d Duration, f func()) *Timer {
- return (*Timer)(newTimer(when(d), 0, goFunc, f, nil))
+ return newTimer(when(d), 0, goFunc, f, nil)
}
func goFunc(arg any, seq uintptr, delta int64) {
diff --git a/src/time/time.go b/src/time/time.go
index bcaeee407e..cf9abc7196 100644
--- a/src/time/time.go
+++ b/src/time/time.go
@@ -667,7 +667,7 @@ func (days absDays) split() (century absCentury, cyear absCyear, ayday absYday)
// so do that instead, saving a few cycles.
// See Neri and Schneider, section 8.3
// for more about this optimization.
- hi, lo := bits.Mul32(2939745, uint32(cd))
+ hi, lo := bits.Mul32(2939745, cd)
cyear = absCyear(hi)
ayday = absYday(lo / 2939745 / 4)
return
diff --git a/src/unicode/utf8/utf8.go b/src/unicode/utf8/utf8.go
index 82fa7c0d4d..01cad1cc81 100644
--- a/src/unicode/utf8/utf8.go
+++ b/src/unicode/utf8/utf8.go
@@ -263,7 +263,7 @@ func DecodeLastRune(p []byte) (r rune, size int) {
// guard against O(n^2) behavior when traversing
// backwards through strings with long sequences of
// invalid UTF-8.
- lim := max(end - UTFMax, 0)
+ lim := max(end-UTFMax, 0)
for start--; start >= lim; start-- {
if RuneStart(p[start]) {
break
@@ -300,7 +300,7 @@ func DecodeLastRuneInString(s string) (r rune, size int) {
// guard against O(n^2) behavior when traversing
// backwards through strings with long sequences of
// invalid UTF-8.
- lim := max(end - UTFMax, 0)
+ lim := max(end-UTFMax, 0)
for start--; start >= lim; start-- {
if RuneStart(s[start]) {
break
@@ -430,99 +430,111 @@ func RuneCountInString(s string) (n int) {
// bits set to 10.
func RuneStart(b byte) bool { return b&0xC0 != 0x80 }
+const ptrSize = 4 << (^uintptr(0) >> 63)
+const hiBits = 0x8080808080808080 >> (64 - 8*ptrSize)
+
+func word[T string | []byte](s T) uintptr {
+ if ptrSize == 4 {
+ return uintptr(s[0]) | uintptr(s[1])<<8 | uintptr(s[2])<<16 | uintptr(s[3])<<24
+ }
+ return uintptr(uint64(s[0]) | uint64(s[1])<<8 | uint64(s[2])<<16 | uint64(s[3])<<24 | uint64(s[4])<<32 | uint64(s[5])<<40 | uint64(s[6])<<48 | uint64(s[7])<<56)
+}
+
// Valid reports whether p consists entirely of valid UTF-8-encoded runes.
func Valid(p []byte) bool {
// This optimization avoids the need to recompute the capacity
- // when generating code for p[8:], bringing it to parity with
+ // when generating code for slicing p, bringing it to parity with
// ValidString, which was 20% faster on long ASCII strings.
p = p[:len(p):len(p)]
- // Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
- for len(p) >= 8 {
- // Combining two 32 bit loads allows the same code to be used
- // for 32 and 64 bit platforms.
- // The compiler can generate a 32bit load for first32 and second32
- // on many platforms. See test/codegen/memcombine.go.
- first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
- second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24
- if (first32|second32)&0x80808080 != 0 {
- // Found a non ASCII byte (>= RuneSelf).
- break
- }
- p = p[8:]
- }
- n := len(p)
- for i := 0; i < n; {
- pi := p[i]
- if pi < RuneSelf {
- i++
+ for len(p) > 0 {
+ p0 := p[0]
+ if p0 < RuneSelf {
+ p = p[1:]
+ // If there's one ASCII byte, there are probably more.
+ // Advance quickly through ASCII-only data.
+ // Note: using > instead of >= here is intentional. That avoids
+ // needing pointing-past-the-end fixup on the slice operations.
+ if len(p) > ptrSize && word(p)&hiBits == 0 {
+ p = p[ptrSize:]
+ if len(p) > 2*ptrSize && (word(p)|word(p[ptrSize:]))&hiBits == 0 {
+ p = p[2*ptrSize:]
+ for len(p) > 4*ptrSize && ((word(p)|word(p[ptrSize:]))|(word(p[2*ptrSize:])|word(p[3*ptrSize:])))&hiBits == 0 {
+ p = p[4*ptrSize:]
+ }
+ }
+ }
continue
}
- x := first[pi]
- if x == xx {
- return false // Illegal starter byte.
- }
+ x := first[p0]
size := int(x & 7)
- if i+size > n {
- return false // Short or invalid.
- }
accept := acceptRanges[x>>4]
- if c := p[i+1]; c < accept.lo || accept.hi < c {
- return false
- } else if size == 2 {
- } else if c := p[i+2]; c < locb || hicb < c {
- return false
- } else if size == 3 {
- } else if c := p[i+3]; c < locb || hicb < c {
- return false
+ switch size {
+ case 2:
+ if len(p) < 2 || p[1] < accept.lo || accept.hi < p[1] {
+ return false
+ }
+ p = p[2:]
+ case 3:
+ if len(p) < 3 || p[1] < accept.lo || accept.hi < p[1] || p[2] < locb || hicb < p[2] {
+ return false
+ }
+ p = p[3:]
+ case 4:
+ if len(p) < 4 || p[1] < accept.lo || accept.hi < p[1] || p[2] < locb || hicb < p[2] || p[3] < locb || hicb < p[3] {
+ return false
+ }
+ p = p[4:]
+ default:
+ return false // illegal starter byte
}
- i += size
}
return true
}
// ValidString reports whether s consists entirely of valid UTF-8-encoded runes.
func ValidString(s string) bool {
- // Fast path. Check for and skip 8 bytes of ASCII characters per iteration.
- for len(s) >= 8 {
- // Combining two 32 bit loads allows the same code to be used
- // for 32 and 64 bit platforms.
- // The compiler can generate a 32bit load for first32 and second32
- // on many platforms. See test/codegen/memcombine.go.
- first32 := uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 | uint32(s[3])<<24
- second32 := uint32(s[4]) | uint32(s[5])<<8 | uint32(s[6])<<16 | uint32(s[7])<<24
- if (first32|second32)&0x80808080 != 0 {
- // Found a non ASCII byte (>= RuneSelf).
- break
- }
- s = s[8:]
- }
- n := len(s)
- for i := 0; i < n; {
- si := s[i]
- if si < RuneSelf {
- i++
+ for len(s) > 0 {
+ s0 := s[0]
+ if s0 < RuneSelf {
+ s = s[1:]
+ // If there's one ASCII byte, there are probably more.
+ // Advance quickly through ASCII-only data.
+ // Note: using > instead of >= here is intentional. That avoids
+ // needing pointing-past-the-end fixup on the slice operations.
+ if len(s) > ptrSize && word(s)&hiBits == 0 {
+ s = s[ptrSize:]
+ if len(s) > 2*ptrSize && (word(s)|word(s[ptrSize:]))&hiBits == 0 {
+ s = s[2*ptrSize:]
+ for len(s) > 4*ptrSize && ((word(s)|word(s[ptrSize:]))|(word(s[2*ptrSize:])|word(s[3*ptrSize:])))&hiBits == 0 {
+ s = s[4*ptrSize:]
+ }
+ }
+ }
continue
}
- x := first[si]
- if x == xx {
- return false // Illegal starter byte.
- }
+ x := first[s0]
size := int(x & 7)
- if i+size > n {
- return false // Short or invalid.
- }
accept := acceptRanges[x>>4]
- if c := s[i+1]; c < accept.lo || accept.hi < c {
- return false
- } else if size == 2 {
- } else if c := s[i+2]; c < locb || hicb < c {
- return false
- } else if size == 3 {
- } else if c := s[i+3]; c < locb || hicb < c {
- return false
+ switch size {
+ case 2:
+ if len(s) < 2 || s[1] < accept.lo || accept.hi < s[1] {
+ return false
+ }
+ s = s[2:]
+ case 3:
+ if len(s) < 3 || s[1] < accept.lo || accept.hi < s[1] || s[2] < locb || hicb < s[2] {
+ return false
+ }
+ s = s[3:]
+ case 4:
+ if len(s) < 4 || s[1] < accept.lo || accept.hi < s[1] || s[2] < locb || hicb < s[2] || s[3] < locb || hicb < s[3] {
+ return false
+ }
+ s = s[4:]
+ default:
+ return false // illegal starter byte
}
- i += size
}
return true
}
diff --git a/src/unicode/utf8/utf8_test.go b/src/unicode/utf8/utf8_test.go
index 865167731f..aece0fab73 100644
--- a/src/unicode/utf8/utf8_test.go
+++ b/src/unicode/utf8/utf8_test.go
@@ -489,6 +489,16 @@ var validTests = []ValidTest{
{string("\xed\xbf\xbf"), false}, // U+DFFF low surrogate (sic)
}
+func init() {
+ for i := range 100 {
+ validTests = append(validTests, ValidTest{in: strings.Repeat("a", i), out: true})
+ validTests = append(validTests, ValidTest{in: strings.Repeat("a", i) + "Ж", out: true})
+ validTests = append(validTests, ValidTest{in: strings.Repeat("a", i) + "\xe2", out: false})
+ validTests = append(validTests, ValidTest{in: strings.Repeat("a", i) + "Ж" + strings.Repeat("b", i), out: true})
+ validTests = append(validTests, ValidTest{in: strings.Repeat("a", i) + "\xe2" + strings.Repeat("b", i), out: false})
+ }
+}
+
func TestValid(t *testing.T) {
for _, tt := range validTests {
if Valid([]byte(tt.in)) != tt.out {