diff options
| author | Cherry Mui <cherryyz@google.com> | 2025-08-04 15:07:05 -0400 |
|---|---|---|
| committer | Cherry Mui <cherryyz@google.com> | 2025-08-04 15:07:05 -0400 |
| commit | 775fb527458e09258d07a3c92eada92167b3e7d3 (patch) | |
| tree | a6ce5d30793ee5797673481fa13d4d4179444bd7 /src/runtime | |
| parent | 6b9b59e144a0db697b0e22920ff0b7e0b51c0945 (diff) | |
| parent | 7a1679d7ae32dd8a01bd355413ee77ba517f5f43 (diff) | |
| download | go-775fb527458e09258d07a3c92eada92167b3e7d3.tar.xz | |
[dev.simd] all: merge master (7a1679d) into dev.simd
Conflicts:
- src/cmd/compile/internal/amd64/ssa.go
- src/cmd/compile/internal/ssa/rewriteAMD64.go
- src/internal/buildcfg/exp.go
- src/internal/cpu/cpu.go
- src/internal/cpu/cpu_x86.go
- src/internal/goexperiment/flags.go
Merge List:
+ 2025-08-04 7a1679d7ae cmd/compile: move s390x over to new bounds check strategy
+ 2025-08-04 95693816a5 cmd/compile: move riscv64 over to new bounds check strategy
+ 2025-08-04 d7bd7773eb go/parser: remove safePos
+ 2025-08-04 4b6cbc377f cmd/cgo/internal/test: use (syntactic) constant for C array bound
+ 2025-08-03 b2960e3580 cmd/internal/obj/loong64: add {V,XV}{BITCLR/BITSET/BITREV}[I].{B/H/W/D} instructions support
+ 2025-08-03 abeeef1c08 cmd/compile/internal/test: fix typo in comments
+ 2025-08-03 d44749b65b cmd/internal/obj/loong64: add [X]VLDREPL.{B/H/W/D} instructions support
+ 2025-08-03 d6beda863e runtime: add reference to debugPinnerV1
+ 2025-08-01 4ab1aec007 cmd/go: modload should use a read-write lock to improve concurrency
+ 2025-08-01 e666972a67 runtime: deduplicate Windows stdcall
+ 2025-08-01 ef40549786 runtime,syscall: move loadlibrary and getprocaddress to syscall
+ 2025-08-01 336931a4ca cmd/go: use os.Rename to move files on Windows
+ 2025-08-01 eef5f8d930 cmd/compile: enforce that locals are always accessed with SP base register
+ 2025-08-01 e071617222 cmd/compile: optimize multiplication rules on loong64
+ 2025-07-31 eb7f515c4d cmd/compile: use generated loops instead of DUFFZERO on amd64
+ 2025-07-31 c0ee2fd4e3 cmd/go: explicitly reject module paths "go" and "toolchain"
+ 2025-07-30 a4d99770c0 runtime/metrics: add cleanup and finalizer queue metrics
+ 2025-07-30 70a2ff7648 runtime: add cgo call benchmark
+ 2025-07-30 69338a335a cmd/go/internal/gover: fix ModIsPrerelease for toolchain versions
+ 2025-07-30 cedf63616a cmd/compile: add floating point min/max intrinsics on s390x
+ 2025-07-30 82a1921c3b all: remove redundant Swiss prefixes
+ 2025-07-30 2ae059ccaf all: remove GOEXPERIMENT=swissmap
+ 2025-07-30 cc571dab91 cmd/compile: deduplicate instructions when rewrite func results
+ 2025-07-30 2174a7936c crypto/tls: use standard chacha20-poly1305 cipher suite names
+ 2025-07-30 8330fb48a6 cmd/compile: move mips32 over to new bounds check strategy
+ 2025-07-30 9f9d7b50e8 cmd/compile: move mips64 over to new bounds check strategy
+ 2025-07-30 5216fd570e cmd/compile: move loong64 over to new bounds check strategy
+ 2025-07-30 89a0af86b8 cmd/compile: allow ops to specify clobbering input registers
+ 2025-07-30 5e94d72158 cmd/compile: simplify zerorange on arm64
+ 2025-07-30 8cd85e602a cmd/compile: check domination of loop return in both controls
+ 2025-07-30 cefaed0de0 reflect: fix noswiss builder
+ 2025-07-30 3aa1b00081 regexp: fix compiling alternate patterns of different fold case literals
+ 2025-07-30 b1e933d955 cmd/compile: avoid extending when already sufficiently masked on loong64
+ 2025-07-29 880ca333d7 cmd/compile: removing log2uint32 function
+ 2025-07-29 1513661dc3 cmd/compile: simplify logX implementations
+ 2025-07-29 bd94ae8903 cmd/compile: use unsigned power-of-two detector for unsigned mod
+ 2025-07-29 f3582fc80e cmd/compile: add unsigned power-of-two detector
+ 2025-07-29 f7d167fe71 internal/abi: move direct/indirect flag from Kind to TFlag
+ 2025-07-29 e0b07dc22e os/exec: fix incorrect expansion of "", "." and ".." in LookPath
+ 2025-07-29 25816d401c internal/goexperiment: delete RangeFunc goexperiment
+ 2025-07-29 7961bf71f8 internal/goexperiment: delete CacheProg goexperiment
+ 2025-07-29 e15a14c4dd sync: remove synchashtriemap GOEXPERIMENT
+ 2025-07-29 7dccd6395c cmd/compile: move arm32 over to new bounds check strategy
+ 2025-07-29 d79405a344 runtime: only deduct assist credit for arenas during GC
+ 2025-07-29 19a086f716 cmd/go/internal/telemetrystats: count goexperiments
+ 2025-07-29 aa95ab8215 image: fix formatting of godoc link
+ 2025-07-29 4c854b7a3e crypto/elliptic: change a variable name that have the same name as keywords
+ 2025-07-28 b10eb1d042 cmd/compile: simplify zerorange on amd64
+ 2025-07-28 f8eae7a3c3 os/user: fix tests to pass on non-english Windows
+ 2025-07-28 0984264471 internal/poll: remove msg field from Windows' poll.operation
+ 2025-07-28 d7b4114346 internal/poll: remove rsan field from Windows' poll.operation
+ 2025-07-28 361b1ab41f internal/poll: remove sa field from Windows' poll.operation
+ 2025-07-28 9b6bd64e46 internal/poll: remove qty and flags fields from Windows' poll.operation
+ 2025-07-28 cd3655a824 internal/runtime/maps: fix spelling errors in comments
+ 2025-07-28 d5dc36af45 runtime: remove openbsd/mips64 related code
+ 2025-07-28 64ba72474d errors: omit redundant nil check in type assertion for Join
+ 2025-07-28 e151db3e06 all: omit unnecessary type conversions
+ 2025-07-28 4569255f8c cmd/compile: cleanup SelectN rules by indexing into args
+ 2025-07-28 94645d2413 cmd/compile: rewrite cmov(x, x, cond) into x
+ 2025-07-28 10c5cf68d4 net/http: add proper panic message
+ 2025-07-28 46b5839231 test/codegen: fix failing condmove wasm tests
+ 2025-07-28 98f301cf68 runtime,syscall: move SyscallX implementations from runtime to syscall
+ 2025-07-28 c7ed3a1c5a internal/runtime/syscall/windows: factor out code from runtime
+ 2025-07-28 e81eac19d3 hash/crc32: fix incorrect checksums with avx512+race
+ 2025-07-25 6fbad4be75 cmd/compile: remove no-longer-necessary call to calculateDepths
+ 2025-07-25 5045fdd8ff cmd/compile: fix containsUnavoidableCall computation
+ 2025-07-25 d28b27cd8e go/types, types2: use nil to represent incomplete explicit aliases
+ 2025-07-25 7b53d8d06e cmd/compile/internal/types2: add loaded state between loader calls and constraint expansion
+ 2025-07-25 374e3be2eb os/user: user random name for the test user account
+ 2025-07-25 1aa154621d runtime: rename scanobject to scanObject
+ 2025-07-25 41b429881a runtime: duplicate scanobject in greentea and non-greentea files
+ 2025-07-25 aeb256e98a cmd/compile: remove unused arg from gorecover
+ 2025-07-25 08376e1a9c runtime: iterate through inlinings when processing recover()
+ 2025-07-25 c76c3abc54 encoding/json: fix truncated Token error regression in goexperiment.jsonv2
+ 2025-07-25 ebdbfccd98 encoding/json/jsontext: preserve buffer capacity in Encoder.Reset
+ 2025-07-25 91c4f0ccd5 reflect: avoid a bounds check in stack-constrained code
+ 2025-07-24 3636ced112 encoding/json: fix extra data regression under goexperiment.jsonv2
+ 2025-07-24 a6eec8bdc7 encoding/json: reduce error text regressions under goexperiment.jsonv2
+ 2025-07-24 0fa88dec1e time: remove redundant uint32 conversion in split
+ 2025-07-24 ada30b8248 internal/buildcfg: add ability to get GORISCV64 variable in GOGOARCH
+ 2025-07-24 6f6c6c5782 cmd/internal/obj: rip out argp adjustment for wrapper frames
+ 2025-07-24 7b50024330 runtime: detect successful recovers differently
+ 2025-07-24 7b9de668bd unicode/utf8: skip ahead during ascii runs in Valid/ValidString
+ 2025-07-24 076eae436e cmd/compile: move amd64 and 386 over to new bounds check strategy
+ 2025-07-24 f703dc5bef cmd/compile: add missing StringLen rule in prove
+ 2025-07-24 394d0bee8d cmd/compile: move arm64 over to new bounds check strategy
+ 2025-07-24 3024785b92 cmd/compile,runtime: remember idx+len for bounds check failure with less code
+ 2025-07-24 741a19ab41 runtime: move bounds check constants to internal/abi
+ 2025-07-24 ce05ad448f cmd/compile: rewrite condselects into doublings and halvings
+ 2025-07-24 fcd28070fe cmd/compile: add opt branchelim to rewrite some CondSelect into math
+ 2025-07-24 f32cf8e4b0 cmd/compile: learn transitive proofs for safe unsigned subs
+ 2025-07-24 d574856482 cmd/compile: learn transitive proofs for safe negative signed adds
+ 2025-07-24 1a72920f09 cmd/compile: learn transitive proofs for safe positive signed adds
+ 2025-07-24 e5f202bb60 cmd/compile: learn transitive proofs for safe unsigned adds
+ 2025-07-24 bd80f74bc1 cmd/compile: fold shift through AND for slice operations
+ 2025-07-24 5c45fe1385 internal/runtime/syscall: rename to internal/runtime/syscall/linux
+ 2025-07-24 592c2db868 cmd/compile: improve loopRotate to handle nested loops
+ 2025-07-24 dcb479c2f9 cmd/compile: optimize slice bounds checking with SUB/SUBconst comparisons
+ 2025-07-24 f11599b0b9 internal/poll: remove handle field from Windows' poll.operation
+ 2025-07-24 f7432e0230 internal/poll: remove fd field from Windows' poll.operation
+ 2025-07-24 e84ed38641 runtime: add benchmark for small-size memmory operation
+ 2025-07-24 18dbe5b941 hash/crc32: add AVX512 IEEE CRC32 calculation
+ 2025-07-24 c641900f72 cmd/compile: prefer base.Fatalf to panic in dwarfgen
+ 2025-07-24 d71d8aeafd cmd/internal/obj/s390x: add MVCLE instruction
+ 2025-07-24 b6cf1d94dc runtime: optimize memclr on mips64x
+ 2025-07-24 a8edd99479 runtime: improvement in memclr for s390x
+ 2025-07-24 bd04f65511 internal/runtime/exithook: fix a typo
+ 2025-07-24 5c8624a396 cmd/internal/goobj: make error output clear
+ 2025-07-24 44d73dfb4e cmd/go/internal/doc: clean up after merge with cmd/internal/doc
+ 2025-07-24 bd446662dd cmd/internal/doc: merge with cmd/go/internal/doc
+ 2025-07-24 da8b50c830 cmd/doc: delete
+ 2025-07-24 6669aa3b14 runtime: randomize heap base address
+ 2025-07-24 26338a7f69 cmd/compile: use better fatal message for staticValue1
+ 2025-07-24 8587ba272e cmd/cgo: compare malloc return value to NULL instead of literal 0
+ 2025-07-24 cae45167b7 go/types, types2: better error messages for certain type mismatches
+ 2025-07-24 2ddf542e4c cmd/compile: use ,ok return idiom for sparsemap.get
+ 2025-07-24 6505fcbd0a cmd/compile: use generics for sparse map
+ 2025-07-24 14f5eb7812 cmd/api: rerun updategolden
+ 2025-07-24 52b6d7f67a runtime: drop NetBSD kernel bug sysmon workaround fixed in NetBSD 9.2
+ 2025-07-24 1ebebf1cc1 cmd/go: clean should respect workspaces
+ 2025-07-24 6536a93547 encoding/json/jsontext: preserve buffer capacity in Decoder.Reset
+ 2025-07-24 efc37e97c0 cmd/go: always return the cached path from go tool -n
+ 2025-07-23 98a031193b runtime: check TestUsingVDSO ExitError type assertion
+ 2025-07-23 6bb42997c8 doc/next: initialize
+ 2025-07-23 2696a11a97 internal/goversion: update Version to 1.26
+ 2025-07-23 489868f776 cmd/link: scope test to linux & net.sendFile
+ 2025-07-22 71c2bf5513 cmd/compile: fix loclist for heap return vars without optimizations
+ 2025-07-22 c74399e7f5 net: correct comment for ListenConfig.ListenPacket
+ 2025-07-22 4ed9943b26 all: go fmt
+ 2025-07-22 1aaf7422f1 cmd/internal/objabi: remove redundant word in comment
+ 2025-07-21 d5ec0815e6 runtime: relax TestMemoryLimitNoGCPercent a bit
+ 2025-07-21 f7cc61e7d7 cmd/compile: for arm64 epilog, do SP increment with a single instruction
+ 2025-07-21 5dac42363b runtime: fix asan wrapper for riscv64
+ 2025-07-21 e5502e0959 cmd/go: check subcommand properties
+ 2025-07-19 2363897932 cmd/internal/obj: enable got pcrel itype in fips140 for riscv64
+ 2025-07-19 e32255fcc0 cmd/compile/internal/ssa: restrict architectures for TestDebugLines_74576
+ 2025-07-18 0451816430 os: revert the use of AddCleanup to close files and roots
+ 2025-07-18 34b70684ba go/types: infer correct type for y in append(bytes, y...)
+ 2025-07-17 66536242fc cmd/compile/internal/escape: improve DWARF .debug_line numbering for literal rewriting optimizations
+ 2025-07-16 385000b004 runtime: fix idle time double-counting bug
+ 2025-07-16 f506ad2644 cmd/compile/internal/escape: speed up analyzing some functions with many closures
+ 2025-07-16 9c507e7942 cmd/link, runtime: on Wasm, put only function index in method table and func table
+ 2025-07-16 9782dcfd16 runtime: use 32-bit function index on Wasm
+ 2025-07-16 c876bf9346 cmd/internal/obj/wasm: use 64-bit instructions for indirect calls
+ 2025-07-15 b4309ece66 cmd/internal/doc: upgrade godoc pkgsite to 01b046e
+ 2025-07-15 75a19dbcd7 runtime: use memclrNoHeapPointers to clear inline mark bits
+ 2025-07-15 6d4a91c7a5 runtime: only clear inline mark bits on span alloc if necessary
+ 2025-07-15 0c6296ab12 runtime: have mergeInlineMarkBits also clear the inline mark bits
+ 2025-07-15 397d2117ec runtime: merge inline mark bits with gcmarkBits 8 bytes at a time
+ 2025-07-15 7dceabd3be runtime/maps: fix typo in group.go comment (instrinsified -> intrinsified)
+ 2025-07-15 d826bf4d74 os: remove useless error check
+ 2025-07-14 bb07e55aff runtime: expand GOMAXPROCS documentation
+ 2025-07-14 9159cd4ec6 encoding/json: decompose legacy options
+ 2025-07-14 c6556b8eb3 encoding/json/v2: add security section to doc
+ 2025-07-11 6ebb5f56d9 runtime: gofmt after CL 643897 and CL 662455
+ 2025-07-11 1e48ca7020 encoding/json: remove legacy option to EscapeInvalidUTF8
+ 2025-07-11 a0a99cb22b encoding/json/v2: report wrapped io.ErrUnexpectedEOF
+ 2025-07-11 9d04122d24 crypto/rsa: drop contradictory promise to keep PublicKey modulus secret
+ 2025-07-11 1ca23682dd crypto/rsa: fix documentation formatting
+ 2025-07-11 4bc3373c8e runtime: turn off large memmove tests under asan/msan
Change-Id: I1e32d964eba770b85421efb86b305a2242f24466
Diffstat (limited to 'src/runtime')
123 files changed, 2321 insertions, 7359 deletions
diff --git a/src/runtime/alg.go b/src/runtime/alg.go index df32bc7941..b956f9d05a 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -144,7 +144,7 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr { // we want to report the struct, not the slice). panic(errorString("hash of unhashable type " + toRType(t).string())) } - if isDirectIface(t) { + if t.IsDirectIface() { return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) } else { return c1 * typehash(t, a.data, h^c0) @@ -171,7 +171,7 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { // See comment in interhash above. panic(errorString("hash of unhashable type " + toRType(t).string())) } - if isDirectIface(t) { + if t.IsDirectIface() { return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) } else { return c1 * typehash(t, a.data, h^c0) @@ -211,7 +211,7 @@ func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { return memhash(p, h, t.Size_) } } - switch t.Kind_ & abi.KindMask { + switch t.Kind() { case abi.Float32: return f32hash(p, h) case abi.Float64: @@ -306,7 +306,7 @@ func efaceeq(t *_type, x, y unsafe.Pointer) bool { if eq == nil { panic(errorString("comparing uncomparable type " + toRType(t).string())) } - if isDirectIface(t) { + if t.IsDirectIface() { // Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof. // Maps and funcs are not comparable, so they can't reach here. // Ptrs, chans, and single-element items can be compared directly using ==. @@ -323,7 +323,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool { if eq == nil { panic(errorString("comparing uncomparable type " + toRType(t).string())) } - if isDirectIface(t) { + if t.IsDirectIface() { // See comment in efaceeq. return x == y } diff --git a/src/runtime/arena.go b/src/runtime/arena.go index e807995810..52a2a99d6c 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -111,7 +111,7 @@ func arena_newArena() unsafe.Pointer { //go:linkname arena_arena_New arena.runtime_arena_arena_New func arena_arena_New(arena unsafe.Pointer, typ any) any { t := (*_type)(efaceOf(&typ).data) - if t.Kind_&abi.KindMask != abi.Pointer { + if t.Kind() != abi.Pointer { throw("arena_New: non-pointer type") } te := (*ptrtype)(unsafe.Pointer(t)).Elem @@ -145,7 +145,7 @@ func arena_heapify(s any) any { var v unsafe.Pointer e := efaceOf(&s) t := e._type - switch t.Kind_ & abi.KindMask { + switch t.Kind() { case abi.String: v = stringStructOf((*string)(e.data)).str case abi.Slice: @@ -162,7 +162,7 @@ func arena_heapify(s any) any { } // Heap-allocate storage for a copy. var x any - switch t.Kind_ & abi.KindMask { + switch t.Kind() { case abi.String: s1 := s.(string) s2, b := rawstring(len(s1)) @@ -293,11 +293,11 @@ func (a *userArena) slice(sl any, cap int) { } i := efaceOf(&sl) typ := i._type - if typ.Kind_&abi.KindMask != abi.Pointer { + if typ.Kind() != abi.Pointer { panic("slice result of non-ptr type") } typ = (*ptrtype)(unsafe.Pointer(typ)).Elem - if typ.Kind_&abi.KindMask != abi.Slice { + if typ.Kind() != abi.Slice { panic("slice of non-ptr-to-slice type") } typ = (*slicetype)(unsafe.Pointer(typ)).Elem @@ -745,7 +745,9 @@ func newUserArenaChunk() (unsafe.Pointer, *mspan) { // does represent additional work for the GC, but we also have no idea // what that looks like until we actually allocate things into the // arena). - deductAssistCredit(userArenaChunkBytes) + if gcBlackenEnabled != 0 { + deductAssistCredit(userArenaChunkBytes) + } // Set mp.mallocing to keep from being preempted by GC. mp := acquirem() diff --git a/src/runtime/asan_riscv64.s b/src/runtime/asan_riscv64.s index eb76e61ffb..5a333361dd 100644 --- a/src/runtime/asan_riscv64.s +++ b/src/runtime/asan_riscv64.s @@ -81,13 +81,13 @@ TEXT asancall<>(SB), NOSPLIT, $0-0 MOV g_m(g), X21 // Switch to g0 stack if we aren't already on g0 or gsignal. - MOV m_gsignal(X21), X21 - BEQ X21, g, call + MOV m_gsignal(X21), X22 + BEQ X22, g, call - MOV m_g0(X21), X21 - BEQ X21, g, call + MOV m_g0(X21), X22 + BEQ X22, g, call - MOV (g_sched+gobuf_sp)(X21), X2 + MOV (g_sched+gobuf_sp)(X22), X2 call: JALR RA, X14 diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s index 62ab83985f..df32e90fda 100644 --- a/src/runtime/asm_386.s +++ b/src/runtime/asm_386.s @@ -1509,161 +1509,47 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0 MOVL $32, DI JMP gcWriteBarrier<>(SB) -// Note: these functions use a special calling convention to save generated code space. -// Arguments are passed in registers, but the space for those arguments are allocated -// in the caller's stack frame. These stubs write the args into that stack space and -// then tail call to the corresponding runtime handler. -// The tail call makes these stubs disappear in backtraces. -TEXT runtime·panicIndex(SB),NOSPLIT,$0-8 - MOVL AX, x+0(FP) - MOVL CX, y+4(FP) - JMP runtime·goPanicIndex(SB) -TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8 - MOVL AX, x+0(FP) - MOVL CX, y+4(FP) - JMP runtime·goPanicIndexU(SB) -TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8 - MOVL CX, x+0(FP) - MOVL DX, y+4(FP) - JMP runtime·goPanicSliceAlen(SB) -TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8 - MOVL CX, x+0(FP) - MOVL DX, y+4(FP) - JMP runtime·goPanicSliceAlenU(SB) -TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8 - MOVL CX, x+0(FP) - MOVL DX, y+4(FP) - JMP runtime·goPanicSliceAcap(SB) -TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8 - MOVL CX, x+0(FP) - MOVL DX, y+4(FP) - JMP runtime·goPanicSliceAcapU(SB) -TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8 - MOVL AX, x+0(FP) - MOVL CX, y+4(FP) - JMP runtime·goPanicSliceB(SB) -TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8 - MOVL AX, x+0(FP) - MOVL CX, y+4(FP) - JMP runtime·goPanicSliceBU(SB) -TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8 - MOVL DX, x+0(FP) - MOVL BX, y+4(FP) - JMP runtime·goPanicSlice3Alen(SB) -TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8 - MOVL DX, x+0(FP) - MOVL BX, y+4(FP) - JMP runtime·goPanicSlice3AlenU(SB) -TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8 - MOVL DX, x+0(FP) - MOVL BX, y+4(FP) - JMP runtime·goPanicSlice3Acap(SB) -TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8 - MOVL DX, x+0(FP) - MOVL BX, y+4(FP) - JMP runtime·goPanicSlice3AcapU(SB) -TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8 - MOVL CX, x+0(FP) - MOVL DX, y+4(FP) - JMP runtime·goPanicSlice3B(SB) -TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8 - MOVL CX, x+0(FP) - MOVL DX, y+4(FP) - JMP runtime·goPanicSlice3BU(SB) -TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8 - MOVL AX, x+0(FP) - MOVL CX, y+4(FP) - JMP runtime·goPanicSlice3C(SB) -TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8 - MOVL AX, x+0(FP) - MOVL CX, y+4(FP) - JMP runtime·goPanicSlice3CU(SB) -TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-8 - MOVL DX, x+0(FP) - MOVL BX, y+4(FP) - JMP runtime·goPanicSliceConvert(SB) +TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$40-0 + NO_LOCAL_POINTERS + // Save all int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + MOVL AX, 8(SP) + MOVL CX, 12(SP) + MOVL DX, 16(SP) + MOVL BX, 20(SP) + // skip SP @ 24(SP) + MOVL BP, 28(SP) + MOVL SI, 32(SP) + MOVL DI, 36(SP) + + MOVL SP, AX // hide SP read from vet + MOVL 40(AX), AX // PC immediately after call to panicBounds + MOVL AX, 0(SP) + LEAL 8(SP), AX + MOVL AX, 4(SP) + CALL runtime·panicBounds32<ABIInternal>(SB) + RET -// Extended versions for 64-bit indexes. -TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL AX, lo+4(FP) - MOVL CX, y+8(FP) - JMP runtime·goPanicExtendIndex(SB) -TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL AX, lo+4(FP) - MOVL CX, y+8(FP) - JMP runtime·goPanicExtendIndexU(SB) -TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL CX, lo+4(FP) - MOVL DX, y+8(FP) - JMP runtime·goPanicExtendSliceAlen(SB) -TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL CX, lo+4(FP) - MOVL DX, y+8(FP) - JMP runtime·goPanicExtendSliceAlenU(SB) -TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL CX, lo+4(FP) - MOVL DX, y+8(FP) - JMP runtime·goPanicExtendSliceAcap(SB) -TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL CX, lo+4(FP) - MOVL DX, y+8(FP) - JMP runtime·goPanicExtendSliceAcapU(SB) -TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL AX, lo+4(FP) - MOVL CX, y+8(FP) - JMP runtime·goPanicExtendSliceB(SB) -TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL AX, lo+4(FP) - MOVL CX, y+8(FP) - JMP runtime·goPanicExtendSliceBU(SB) -TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL DX, lo+4(FP) - MOVL BX, y+8(FP) - JMP runtime·goPanicExtendSlice3Alen(SB) -TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL DX, lo+4(FP) - MOVL BX, y+8(FP) - JMP runtime·goPanicExtendSlice3AlenU(SB) -TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL DX, lo+4(FP) - MOVL BX, y+8(FP) - JMP runtime·goPanicExtendSlice3Acap(SB) -TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL DX, lo+4(FP) - MOVL BX, y+8(FP) - JMP runtime·goPanicExtendSlice3AcapU(SB) -TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL CX, lo+4(FP) - MOVL DX, y+8(FP) - JMP runtime·goPanicExtendSlice3B(SB) -TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL CX, lo+4(FP) - MOVL DX, y+8(FP) - JMP runtime·goPanicExtendSlice3BU(SB) -TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL AX, lo+4(FP) - MOVL CX, y+8(FP) - JMP runtime·goPanicExtendSlice3C(SB) -TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12 - MOVL SI, hi+0(FP) - MOVL AX, lo+4(FP) - MOVL CX, y+8(FP) - JMP runtime·goPanicExtendSlice3CU(SB) +TEXT runtime·panicExtend<ABIInternal>(SB),NOSPLIT,$40-0 + NO_LOCAL_POINTERS + // Save all int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + MOVL AX, 8(SP) + MOVL CX, 12(SP) + MOVL DX, 16(SP) + MOVL BX, 20(SP) + // skip SP @ 24(SP) + MOVL BP, 28(SP) + MOVL SI, 32(SP) + MOVL DI, 36(SP) + + MOVL SP, AX // hide SP read from vet + MOVL 40(AX), AX // PC immediately after call to panicExtend + MOVL AX, 0(SP) + LEAL 8(SP), AX + MOVL AX, 4(SP) + CALL runtime·panicBounds32X<ABIInternal>(SB) + RET #ifdef GOOS_android // Use the free TLS_SLOT_APP slot #2 on Android Q. diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s index 8983eeafcb..cf1d49a4ad 100644 --- a/src/runtime/asm_amd64.s +++ b/src/runtime/asm_amd64.s @@ -2024,69 +2024,32 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16 BYTE $0xcc RET -// Note: these functions use a special calling convention to save generated code space. -// Arguments are passed in registers, but the space for those arguments are allocated -// in the caller's stack frame. These stubs write the args into that stack space and -// then tail call to the corresponding runtime handler. -// The tail call makes these stubs disappear in backtraces. -// Defined as ABIInternal since they do not use the stack-based Go ABI. -TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, BX - JMP runtime·goPanicIndex<ABIInternal>(SB) -TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, BX - JMP runtime·goPanicIndexU<ABIInternal>(SB) -TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, AX - MOVQ DX, BX - JMP runtime·goPanicSliceAlen<ABIInternal>(SB) -TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, AX - MOVQ DX, BX - JMP runtime·goPanicSliceAlenU<ABIInternal>(SB) -TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, AX - MOVQ DX, BX - JMP runtime·goPanicSliceAcap<ABIInternal>(SB) -TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, AX - MOVQ DX, BX - JMP runtime·goPanicSliceAcapU<ABIInternal>(SB) -TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, BX - JMP runtime·goPanicSliceB<ABIInternal>(SB) -TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, BX - JMP runtime·goPanicSliceBU<ABIInternal>(SB) -TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ DX, AX - JMP runtime·goPanicSlice3Alen<ABIInternal>(SB) -TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ DX, AX - JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB) -TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ DX, AX - JMP runtime·goPanicSlice3Acap<ABIInternal>(SB) -TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ DX, AX - JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB) -TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, AX - MOVQ DX, BX - JMP runtime·goPanicSlice3B<ABIInternal>(SB) -TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, AX - MOVQ DX, BX - JMP runtime·goPanicSlice3BU<ABIInternal>(SB) -TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, BX - JMP runtime·goPanicSlice3C<ABIInternal>(SB) -TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ CX, BX - JMP runtime·goPanicSlice3CU<ABIInternal>(SB) -TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16 - MOVQ DX, AX - JMP runtime·goPanicSliceConvert<ABIInternal>(SB) +TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0 + NO_LOCAL_POINTERS + // Save all 14 int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + MOVQ AX, 16(SP) + MOVQ CX, 24(SP) + MOVQ DX, 32(SP) + MOVQ BX, 40(SP) + // skip SP @ 48(SP) + MOVQ BP, 56(SP) + MOVQ SI, 64(SP) + MOVQ DI, 72(SP) + MOVQ R8, 80(SP) + MOVQ R9, 88(SP) + MOVQ R10, 96(SP) + MOVQ R11, 104(SP) + MOVQ R12, 112(SP) + MOVQ R13, 120(SP) + // skip R14 @ 128(SP) (aka G) + MOVQ R15, 136(SP) + + MOVQ SP, AX // hide SP read from vet + MOVQ 152(AX), AX // PC immediately after call to panicBounds + LEAQ 16(SP), BX + CALL runtime·panicBounds64<ABIInternal>(SB) + RET #ifdef GOOS_android // Use the free TLS_SLOT_APP slot #2 on Android Q. diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s index ca9f0ced03..742b97f888 100644 --- a/src/runtime/asm_arm.s +++ b/src/runtime/asm_arm.s @@ -991,158 +991,56 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0 MOVW $32, R8 JMP gcWriteBarrier<>(SB) -// Note: these functions use a special calling convention to save generated code space. -// Arguments are passed in registers, but the space for those arguments are allocated -// in the caller's stack frame. These stubs write the args into that stack space and -// then tail call to the corresponding runtime handler. -// The tail call makes these stubs disappear in backtraces. -TEXT runtime·panicIndex(SB),NOSPLIT,$0-8 - MOVW R0, x+0(FP) - MOVW R1, y+4(FP) - JMP runtime·goPanicIndex(SB) -TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8 - MOVW R0, x+0(FP) - MOVW R1, y+4(FP) - JMP runtime·goPanicIndexU(SB) -TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicSliceAlen(SB) -TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicSliceAlenU(SB) -TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicSliceAcap(SB) -TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicSliceAcapU(SB) -TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8 - MOVW R0, x+0(FP) - MOVW R1, y+4(FP) - JMP runtime·goPanicSliceB(SB) -TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8 - MOVW R0, x+0(FP) - MOVW R1, y+4(FP) - JMP runtime·goPanicSliceBU(SB) -TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSlice3Alen(SB) -TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSlice3AlenU(SB) -TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSlice3Acap(SB) -TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSlice3AcapU(SB) -TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicSlice3B(SB) -TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicSlice3BU(SB) -TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8 - MOVW R0, x+0(FP) - MOVW R1, y+4(FP) - JMP runtime·goPanicSlice3C(SB) -TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8 - MOVW R0, x+0(FP) - MOVW R1, y+4(FP) - JMP runtime·goPanicSlice3CU(SB) -TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSliceConvert(SB) +TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$72-0 + NO_LOCAL_POINTERS + // Save all int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + MOVW R0, 12(R13) + MOVW R1, 16(R13) + MOVW R2, 20(R13) + MOVW R3, 24(R13) + MOVW R4, 28(R13) + MOVW R5, 32(R13) + MOVW R6, 36(R13) + MOVW R7, 40(R13) + MOVW R8, 44(R13) + MOVW R9, 48(R13) + // skip R10 aka G @ 52(R13) + // skip R11 aka tmp @ 56(R13) + MOVW R12, 60(R13) + // skip R13 aka SP @ 64(R13) + MOVW R14, 68(R13) + // skip R15 aka PC @ 72(R13) + + MOVW R14, 4(R13) // PC immediately after call to panicBounds + ADD $12, R13, R0 // pointer to save area + MOVW R0, 8(R13) + CALL runtime·panicBounds32<ABIInternal>(SB) + RET -// Extended versions for 64-bit indexes. -TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R0, lo+4(FP) - MOVW R1, y+8(FP) - JMP runtime·goPanicExtendIndex(SB) -TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R0, lo+4(FP) - MOVW R1, y+8(FP) - JMP runtime·goPanicExtendIndexU(SB) -TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendSliceAlen(SB) -TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendSliceAlenU(SB) -TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendSliceAcap(SB) -TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendSliceAcapU(SB) -TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R0, lo+4(FP) - MOVW R1, y+8(FP) - JMP runtime·goPanicExtendSliceB(SB) -TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R0, lo+4(FP) - MOVW R1, y+8(FP) - JMP runtime·goPanicExtendSliceBU(SB) -TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R2, lo+4(FP) - MOVW R3, y+8(FP) - JMP runtime·goPanicExtendSlice3Alen(SB) -TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R2, lo+4(FP) - MOVW R3, y+8(FP) - JMP runtime·goPanicExtendSlice3AlenU(SB) -TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R2, lo+4(FP) - MOVW R3, y+8(FP) - JMP runtime·goPanicExtendSlice3Acap(SB) -TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R2, lo+4(FP) - MOVW R3, y+8(FP) - JMP runtime·goPanicExtendSlice3AcapU(SB) -TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendSlice3B(SB) -TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendSlice3BU(SB) -TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R0, lo+4(FP) - MOVW R1, y+8(FP) - JMP runtime·goPanicExtendSlice3C(SB) -TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12 - MOVW R4, hi+0(FP) - MOVW R0, lo+4(FP) - MOVW R1, y+8(FP) - JMP runtime·goPanicExtendSlice3CU(SB) +TEXT runtime·panicExtend<ABIInternal>(SB),NOSPLIT,$72-0 + NO_LOCAL_POINTERS + // Save all int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + MOVW R0, 12(R13) + MOVW R1, 16(R13) + MOVW R2, 20(R13) + MOVW R3, 24(R13) + MOVW R4, 28(R13) + MOVW R5, 32(R13) + MOVW R6, 36(R13) + MOVW R7, 40(R13) + MOVW R8, 44(R13) + MOVW R9, 48(R13) + // skip R10 aka G @ 52(R13) + // skip R11 aka tmp @ 56(R13) + MOVW R12, 60(R13) + // skip R13 aka SP @ 64(R13) + // skip R14 aka LR @ 68(R13) + // skip R15 aka PC @ 72(R13) + + MOVW R14, 4(R13) // PC immediately after call to panicExtend + ADD $12, R13, R0 // pointer to save area + MOVW R0, 8(R13) + CALL runtime·panicBounds32X<ABIInternal>(SB) + RET diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s index d2261c5160..a0072a3931 100644 --- a/src/runtime/asm_arm64.s +++ b/src/runtime/asm_arm64.s @@ -1574,70 +1574,22 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16 BREAK RET -// Note: these functions use a special calling convention to save generated code space. -// Arguments are passed in registers, but the space for those arguments are allocated -// in the caller's stack frame. These stubs write the args into that stack space and -// then tail call to the corresponding runtime handler. -// The tail call makes these stubs disappear in backtraces. -// -// Defined as ABIInternal since the compiler generates ABIInternal -// calls to it directly and it does not use the stack-based Go ABI. -TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16 - JMP runtime·goPanicIndex<ABIInternal>(SB) -TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16 - JMP runtime·goPanicIndexU<ABIInternal>(SB) -TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R1, R0 - MOVD R2, R1 - JMP runtime·goPanicSliceAlen<ABIInternal>(SB) -TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R1, R0 - MOVD R2, R1 - JMP runtime·goPanicSliceAlenU<ABIInternal>(SB) -TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R1, R0 - MOVD R2, R1 - JMP runtime·goPanicSliceAcap<ABIInternal>(SB) -TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R1, R0 - MOVD R2, R1 - JMP runtime·goPanicSliceAcapU<ABIInternal>(SB) -TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16 - JMP runtime·goPanicSliceB<ABIInternal>(SB) -TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16 - JMP runtime·goPanicSliceBU<ABIInternal>(SB) -TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R2, R0 - MOVD R3, R1 - JMP runtime·goPanicSlice3Alen<ABIInternal>(SB) -TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R2, R0 - MOVD R3, R1 - JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB) -TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R2, R0 - MOVD R3, R1 - JMP runtime·goPanicSlice3Acap<ABIInternal>(SB) -TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R2, R0 - MOVD R3, R1 - JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB) -TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R1, R0 - MOVD R2, R1 - JMP runtime·goPanicSlice3B<ABIInternal>(SB) -TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R1, R0 - MOVD R2, R1 - JMP runtime·goPanicSlice3BU<ABIInternal>(SB) -TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16 - JMP runtime·goPanicSlice3C<ABIInternal>(SB) -TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16 - JMP runtime·goPanicSlice3CU<ABIInternal>(SB) -TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16 - MOVD R2, R0 - MOVD R3, R1 - JMP runtime·goPanicSliceConvert<ABIInternal>(SB) +TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0 + NO_LOCAL_POINTERS + // Save all 16 int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + STP (R0, R1), 24(RSP) + STP (R2, R3), 40(RSP) + STP (R4, R5), 56(RSP) + STP (R6, R7), 72(RSP) + STP (R8, R9), 88(RSP) + STP (R10, R11), 104(RSP) + STP (R12, R13), 120(RSP) + STP (R14, R15), 136(RSP) + MOVD LR, R0 // PC immediately after call to panicBounds + ADD $24, RSP, R1 // pointer to save area + CALL runtime·panicBounds64<ABIInternal>(SB) + RET TEXT ·getfp<ABIInternal>(SB),NOSPLIT|NOFRAME,$0 MOVD R29, R0 diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s index e3b593961a..ee7f825e1f 100644 --- a/src/runtime/asm_loong64.s +++ b/src/runtime/asm_loong64.s @@ -70,8 +70,9 @@ nocgo: // start this M JAL runtime·mstart(SB) - // Prevent dead-code elimination of debugCallV2, which is + // Prevent dead-code elimination of debugCallV2 and debugPinnerV1, which are // intended to be called by debuggers. + MOVV $runtime·debugPinnerV1<ABIInternal>(SB), R0 MOVV $runtime·debugCallV2<ABIInternal>(SB), R0 MOVV R0, 1(R0) @@ -1135,76 +1136,29 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16 BREAK RET -// Note: these functions use a special calling convention to save generated code space. -// Arguments are passed in registers, but the space for those arguments are allocated -// in the caller's stack frame. These stubs write the args into that stack space and -// then tail call to the corresponding runtime handler. -// The tail call makes these stubs disappear in backtraces. -TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R20, R4 - MOVV R21, R5 - JMP runtime·goPanicIndex<ABIInternal>(SB) -TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R20, R4 - MOVV R21, R5 - JMP runtime·goPanicIndexU<ABIInternal>(SB) -TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R21, R4 - MOVV R23, R5 - JMP runtime·goPanicSliceAlen<ABIInternal>(SB) -TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R21, R4 - MOVV R23, R5 - JMP runtime·goPanicSliceAlenU<ABIInternal>(SB) -TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R21, R4 - MOVV R23, R5 - JMP runtime·goPanicSliceAcap<ABIInternal>(SB) -TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R21, R4 - MOVV R23, R5 - JMP runtime·goPanicSliceAcapU<ABIInternal>(SB) -TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R20, R4 - MOVV R21, R5 - JMP runtime·goPanicSliceB<ABIInternal>(SB) -TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R20, R4 - MOVV R21, R5 - JMP runtime·goPanicSliceBU<ABIInternal>(SB) -TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R23, R4 - MOVV R24, R5 - JMP runtime·goPanicSlice3Alen<ABIInternal>(SB) -TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R23, R4 - MOVV R24, R5 - JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB) -TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R23, R4 - MOVV R24, R5 - JMP runtime·goPanicSlice3Acap<ABIInternal>(SB) -TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R23, R4 - MOVV R24, R5 - JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB) -TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R21, R4 - MOVV R23, R5 - JMP runtime·goPanicSlice3B<ABIInternal>(SB) -TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R21, R4 - MOVV R23, R5 - JMP runtime·goPanicSlice3BU<ABIInternal>(SB) -TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R20, R4 - MOVV R21, R5 - JMP runtime·goPanicSlice3C<ABIInternal>(SB) -TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R20, R4 - MOVV R21, R5 - JMP runtime·goPanicSlice3CU<ABIInternal>(SB) -TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16 - MOVV R23, R4 - MOVV R24, R5 - JMP runtime·goPanicSliceConvert<ABIInternal>(SB) +TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0 + NO_LOCAL_POINTERS + // Save all 16 int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + // Skip R0 aka ZERO, R1 aka LR, R2 aka thread pointer, R3 aka SP. + MOVV R4, 24(R3) + MOVV R5, 32(R3) + MOVV R6, 40(R3) + MOVV R7, 48(R3) + MOVV R8, 56(R3) + MOVV R9, 64(R3) + MOVV R10, 72(R3) + MOVV R11, 80(R3) + MOVV R12, 88(R3) + MOVV R13, 96(R3) + MOVV R14, 104(R3) + MOVV R15, 112(R3) + MOVV R16, 120(R3) + MOVV R17, 128(R3) + MOVV R18, 136(R3) + MOVV R19, 144(R3) + + MOVV R1, R4 // PC immediately after call to panicBounds + ADDV $24, R3, R5 // pointer to save area + CALL runtime·panicBounds64<ABIInternal>(SB) + RET diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s index 9509d5ba77..d4523b4a74 100644 --- a/src/runtime/asm_mips64x.s +++ b/src/runtime/asm_mips64x.s @@ -791,76 +791,30 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0 MOVV $64, R25 JMP gcWriteBarrier<>(SB) -// Note: these functions use a special calling convention to save generated code space. -// Arguments are passed in registers, but the space for those arguments are allocated -// in the caller's stack frame. These stubs write the args into that stack space and -// then tail call to the corresponding runtime handler. -// The tail call makes these stubs disappear in backtraces. -TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 - MOVV R1, x+0(FP) - MOVV R2, y+8(FP) - JMP runtime·goPanicIndex(SB) -TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 - MOVV R1, x+0(FP) - MOVV R2, y+8(FP) - JMP runtime·goPanicIndexU(SB) -TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 - MOVV R2, x+0(FP) - MOVV R3, y+8(FP) - JMP runtime·goPanicSliceAlen(SB) -TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 - MOVV R2, x+0(FP) - MOVV R3, y+8(FP) - JMP runtime·goPanicSliceAlenU(SB) -TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 - MOVV R2, x+0(FP) - MOVV R3, y+8(FP) - JMP runtime·goPanicSliceAcap(SB) -TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 - MOVV R2, x+0(FP) - MOVV R3, y+8(FP) - JMP runtime·goPanicSliceAcapU(SB) -TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 - MOVV R1, x+0(FP) - MOVV R2, y+8(FP) - JMP runtime·goPanicSliceB(SB) -TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 - MOVV R1, x+0(FP) - MOVV R2, y+8(FP) - JMP runtime·goPanicSliceBU(SB) -TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 - MOVV R3, x+0(FP) - MOVV R4, y+8(FP) - JMP runtime·goPanicSlice3Alen(SB) -TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 - MOVV R3, x+0(FP) - MOVV R4, y+8(FP) - JMP runtime·goPanicSlice3AlenU(SB) -TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 - MOVV R3, x+0(FP) - MOVV R4, y+8(FP) - JMP runtime·goPanicSlice3Acap(SB) -TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 - MOVV R3, x+0(FP) - MOVV R4, y+8(FP) - JMP runtime·goPanicSlice3AcapU(SB) -TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 - MOVV R2, x+0(FP) - MOVV R3, y+8(FP) - JMP runtime·goPanicSlice3B(SB) -TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 - MOVV R2, x+0(FP) - MOVV R3, y+8(FP) - JMP runtime·goPanicSlice3BU(SB) -TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 - MOVV R1, x+0(FP) - MOVV R2, y+8(FP) - JMP runtime·goPanicSlice3C(SB) -TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 - MOVV R1, x+0(FP) - MOVV R2, y+8(FP) - JMP runtime·goPanicSlice3CU(SB) -TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 - MOVV R3, x+0(FP) - MOVV R4, y+8(FP) - JMP runtime·goPanicSliceConvert(SB) +TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0 + NO_LOCAL_POINTERS + // Save all 16 int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + // Skip R0 aka ZERO. + MOVV R1, 24(R29) + MOVV R2, 32(R29) + MOVV R3, 40(R29) + MOVV R4, 48(R29) + MOVV R5, 56(R29) + MOVV R6, 64(R29) + MOVV R7, 72(R29) + MOVV R8, 80(R29) + MOVV R9, 88(R29) + MOVV R10, 96(R29) + MOVV R11, 104(R29) + MOVV R12, 112(R29) + MOVV R13, 120(R29) + MOVV R14, 128(R29) + MOVV R15, 136(R29) + MOVV R16, 144(R29) + + MOVV R31, 8(R29) // PC immediately after call to panicBounds + ADDV $24, R29, R1 // pointer to save area + MOVV R1, 16(R29) + CALL runtime·panicBounds64<ABIInternal>(SB) + RET diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s index 7245e8ac49..ec352f5828 100644 --- a/src/runtime/asm_mipsx.s +++ b/src/runtime/asm_mipsx.s @@ -787,158 +787,58 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0 MOVW $32, R25 JMP gcWriteBarrier<>(SB) -// Note: these functions use a special calling convention to save generated code space. -// Arguments are passed in registers, but the space for those arguments are allocated -// in the caller's stack frame. These stubs write the args into that stack space and -// then tail call to the corresponding runtime handler. -// The tail call makes these stubs disappear in backtraces. -TEXT runtime·panicIndex(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicIndex(SB) -TEXT runtime·panicIndexU(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicIndexU(SB) -TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSliceAlen(SB) -TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSliceAlenU(SB) -TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSliceAcap(SB) -TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSliceAcapU(SB) -TEXT runtime·panicSliceB(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicSliceB(SB) -TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicSliceBU(SB) -TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-8 - MOVW R3, x+0(FP) - MOVW R4, y+4(FP) - JMP runtime·goPanicSlice3Alen(SB) -TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-8 - MOVW R3, x+0(FP) - MOVW R4, y+4(FP) - JMP runtime·goPanicSlice3AlenU(SB) -TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-8 - MOVW R3, x+0(FP) - MOVW R4, y+4(FP) - JMP runtime·goPanicSlice3Acap(SB) -TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-8 - MOVW R3, x+0(FP) - MOVW R4, y+4(FP) - JMP runtime·goPanicSlice3AcapU(SB) -TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSlice3B(SB) -TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-8 - MOVW R2, x+0(FP) - MOVW R3, y+4(FP) - JMP runtime·goPanicSlice3BU(SB) -TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicSlice3C(SB) -TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-8 - MOVW R1, x+0(FP) - MOVW R2, y+4(FP) - JMP runtime·goPanicSlice3CU(SB) -TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-8 - MOVW R3, x+0(FP) - MOVW R4, y+4(FP) - JMP runtime·goPanicSliceConvert(SB) +TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$72-0 + NO_LOCAL_POINTERS + // Save all 16 int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + // Skip R0 aka ZERO. + MOVW R1, 12(R29) + MOVW R2, 16(R29) + MOVW R3, 20(R29) + MOVW R4, 24(R29) + MOVW R5, 28(R29) + MOVW R6, 32(R29) + MOVW R7, 36(R29) + MOVW R8, 40(R29) + MOVW R9, 44(R29) + MOVW R10, 48(R29) + MOVW R11, 52(R29) + MOVW R12, 56(R29) + MOVW R13, 60(R29) + MOVW R14, 64(R29) + MOVW R15, 68(R29) + MOVW R16, 72(R29) + + MOVW R31, 4(R29) // PC immediately after call to panicBounds + ADD $12, R29, R1 // pointer to save area + MOVW R1, 8(R29) + CALL runtime·panicBounds32<ABIInternal>(SB) + RET -// Extended versions for 64-bit indexes. -TEXT runtime·panicExtendIndex(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendIndex(SB) -TEXT runtime·panicExtendIndexU(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendIndexU(SB) -TEXT runtime·panicExtendSliceAlen(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R2, lo+4(FP) - MOVW R3, y+8(FP) - JMP runtime·goPanicExtendSliceAlen(SB) -TEXT runtime·panicExtendSliceAlenU(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R2, lo+4(FP) - MOVW R3, y+8(FP) - JMP runtime·goPanicExtendSliceAlenU(SB) -TEXT runtime·panicExtendSliceAcap(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R2, lo+4(FP) - MOVW R3, y+8(FP) - JMP runtime·goPanicExtendSliceAcap(SB) -TEXT runtime·panicExtendSliceAcapU(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R2, lo+4(FP) - MOVW R3, y+8(FP) - JMP runtime·goPanicExtendSliceAcapU(SB) -TEXT runtime·panicExtendSliceB(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendSliceB(SB) -TEXT runtime·panicExtendSliceBU(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendSliceBU(SB) -TEXT runtime·panicExtendSlice3Alen(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R3, lo+4(FP) - MOVW R4, y+8(FP) - JMP runtime·goPanicExtendSlice3Alen(SB) -TEXT runtime·panicExtendSlice3AlenU(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R3, lo+4(FP) - MOVW R4, y+8(FP) - JMP runtime·goPanicExtendSlice3AlenU(SB) -TEXT runtime·panicExtendSlice3Acap(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R3, lo+4(FP) - MOVW R4, y+8(FP) - JMP runtime·goPanicExtendSlice3Acap(SB) -TEXT runtime·panicExtendSlice3AcapU(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R3, lo+4(FP) - MOVW R4, y+8(FP) - JMP runtime·goPanicExtendSlice3AcapU(SB) -TEXT runtime·panicExtendSlice3B(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R2, lo+4(FP) - MOVW R3, y+8(FP) - JMP runtime·goPanicExtendSlice3B(SB) -TEXT runtime·panicExtendSlice3BU(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R2, lo+4(FP) - MOVW R3, y+8(FP) - JMP runtime·goPanicExtendSlice3BU(SB) -TEXT runtime·panicExtendSlice3C(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendSlice3C(SB) -TEXT runtime·panicExtendSlice3CU(SB),NOSPLIT,$0-12 - MOVW R5, hi+0(FP) - MOVW R1, lo+4(FP) - MOVW R2, y+8(FP) - JMP runtime·goPanicExtendSlice3CU(SB) +TEXT runtime·panicExtend<ABIInternal>(SB),NOSPLIT,$72-0 + NO_LOCAL_POINTERS + // Save all 16 int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + // Skip R0 aka ZERO. + MOVW R1, 12(R29) + MOVW R2, 16(R29) + MOVW R3, 20(R29) + MOVW R4, 24(R29) + MOVW R5, 28(R29) + MOVW R6, 32(R29) + MOVW R7, 36(R29) + MOVW R8, 40(R29) + MOVW R9, 44(R29) + MOVW R10, 48(R29) + MOVW R11, 52(R29) + MOVW R12, 56(R29) + MOVW R13, 60(R29) + MOVW R14, 64(R29) + MOVW R15, 68(R29) + MOVW R16, 72(R29) + + MOVW R31, 4(R29) // PC immediately after call to panicBounds + ADD $12, R29, R1 // pointer to save area + MOVW R1, 8(R29) + CALL runtime·panicBounds32X<ABIInternal>(SB) + RET diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s index 4031cdde9e..6b16d03c9a 100644 --- a/src/runtime/asm_riscv64.s +++ b/src/runtime/asm_riscv64.s @@ -884,80 +884,32 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0 MOV $64, X24 JMP gcWriteBarrier<>(SB) -// Note: these functions use a special calling convention to save generated code space. -// Arguments are passed in registers (ssa/gen/RISCV64Ops.go), but the space for those -// arguments are allocated in the caller's stack frame. -// These stubs write the args into that stack space and then tail call to the -// corresponding runtime handler. -// The tail call makes these stubs disappear in backtraces. -TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T0, X10 - MOV T1, X11 - JMP runtime·goPanicIndex<ABIInternal>(SB) -TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T0, X10 - MOV T1, X11 - JMP runtime·goPanicIndexU<ABIInternal>(SB) -TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T1, X10 - MOV T2, X11 - JMP runtime·goPanicSliceAlen<ABIInternal>(SB) -TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T1, X10 - MOV T2, X11 - JMP runtime·goPanicSliceAlenU<ABIInternal>(SB) -TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T1, X10 - MOV T2, X11 - JMP runtime·goPanicSliceAcap<ABIInternal>(SB) -TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T1, X10 - MOV T2, X11 - JMP runtime·goPanicSliceAcapU<ABIInternal>(SB) -TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T0, X10 - MOV T1, X11 - JMP runtime·goPanicSliceB<ABIInternal>(SB) -TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T0, X10 - MOV T1, X11 - JMP runtime·goPanicSliceBU<ABIInternal>(SB) -TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T2, X10 - MOV T3, X11 - JMP runtime·goPanicSlice3Alen<ABIInternal>(SB) -TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T2, X10 - MOV T3, X11 - JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB) -TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T2, X10 - MOV T3, X11 - JMP runtime·goPanicSlice3Acap<ABIInternal>(SB) -TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T2, X10 - MOV T3, X11 - JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB) -TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T1, X10 - MOV T2, X11 - JMP runtime·goPanicSlice3B<ABIInternal>(SB) -TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T1, X10 - MOV T2, X11 - JMP runtime·goPanicSlice3BU<ABIInternal>(SB) -TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T0, X10 - MOV T1, X11 - JMP runtime·goPanicSlice3C<ABIInternal>(SB) -TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T0, X10 - MOV T1, X11 - JMP runtime·goPanicSlice3CU<ABIInternal>(SB) -TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16 - MOV T2, X10 - MOV T3, X11 - JMP runtime·goPanicSliceConvert<ABIInternal>(SB) +TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0 + NO_LOCAL_POINTERS + // Save all 16 int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + // Skip X0 aka ZERO, X1 aka LR, X2 aka SP, X3 aka GP, X4 aka TP. + MOV X5, 24(X2) + MOV X6, 32(X2) + MOV X7, 40(X2) + MOV X8, 48(X2) + MOV X9, 56(X2) + MOV X10, 64(X2) + MOV X11, 72(X2) + MOV X12, 80(X2) + MOV X13, 88(X2) + MOV X14, 96(X2) + MOV X15, 104(X2) + MOV X16, 112(X2) + MOV X17, 120(X2) + MOV X18, 128(X2) + MOV X19, 136(X2) + MOV X20, 144(X2) + + MOV X1, X10 // PC immediately after call to panicBounds + ADD $24, X2, X11 // pointer to save area + CALL runtime·panicBounds64<ABIInternal>(SB) + RET DATA runtime·mainPC+0(SB)/8,$runtime·main<ABIInternal>(SB) GLOBL runtime·mainPC(SB),RODATA,$8 diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s index 7fc88009e8..4cc1c0eb10 100644 --- a/src/runtime/asm_s390x.s +++ b/src/runtime/asm_s390x.s @@ -892,76 +892,18 @@ TEXT runtime·gcWriteBarrier8<ABIInternal>(SB),NOSPLIT,$0 MOVD $64, R9 JMP gcWriteBarrier<>(SB) -// Note: these functions use a special calling convention to save generated code space. -// Arguments are passed in registers, but the space for those arguments are allocated -// in the caller's stack frame. These stubs write the args into that stack space and -// then tail call to the corresponding runtime handler. -// The tail call makes these stubs disappear in backtraces. -TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 - MOVD R0, x+0(FP) - MOVD R1, y+8(FP) - JMP runtime·goPanicIndex(SB) -TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 - MOVD R0, x+0(FP) - MOVD R1, y+8(FP) - JMP runtime·goPanicIndexU(SB) -TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 - MOVD R1, x+0(FP) - MOVD R2, y+8(FP) - JMP runtime·goPanicSliceAlen(SB) -TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 - MOVD R1, x+0(FP) - MOVD R2, y+8(FP) - JMP runtime·goPanicSliceAlenU(SB) -TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 - MOVD R1, x+0(FP) - MOVD R2, y+8(FP) - JMP runtime·goPanicSliceAcap(SB) -TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 - MOVD R1, x+0(FP) - MOVD R2, y+8(FP) - JMP runtime·goPanicSliceAcapU(SB) -TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 - MOVD R0, x+0(FP) - MOVD R1, y+8(FP) - JMP runtime·goPanicSliceB(SB) -TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 - MOVD R0, x+0(FP) - MOVD R1, y+8(FP) - JMP runtime·goPanicSliceBU(SB) -TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 - MOVD R2, x+0(FP) - MOVD R3, y+8(FP) - JMP runtime·goPanicSlice3Alen(SB) -TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 - MOVD R2, x+0(FP) - MOVD R3, y+8(FP) - JMP runtime·goPanicSlice3AlenU(SB) -TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 - MOVD R2, x+0(FP) - MOVD R3, y+8(FP) - JMP runtime·goPanicSlice3Acap(SB) -TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 - MOVD R2, x+0(FP) - MOVD R3, y+8(FP) - JMP runtime·goPanicSlice3AcapU(SB) -TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 - MOVD R1, x+0(FP) - MOVD R2, y+8(FP) - JMP runtime·goPanicSlice3B(SB) -TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 - MOVD R1, x+0(FP) - MOVD R2, y+8(FP) - JMP runtime·goPanicSlice3BU(SB) -TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 - MOVD R0, x+0(FP) - MOVD R1, y+8(FP) - JMP runtime·goPanicSlice3C(SB) -TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 - MOVD R0, x+0(FP) - MOVD R1, y+8(FP) - JMP runtime·goPanicSlice3CU(SB) -TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 - MOVD R2, x+0(FP) - MOVD R3, y+8(FP) - JMP runtime·goPanicSliceConvert(SB) +TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$144-0 + NO_LOCAL_POINTERS + // Save all 16 int registers that could have an index in them. + // They may be pointers, but if they are they are dead. + STMG R0, R12, 24(R15) + // Note that R10 @ 104 is not needed, it is an assembler temp + // skip R13 aka G @ 128 + // skip R14 aka LR @ 136 + // skip R15 aka SP @ 144 + + MOVD R14, 8(R15) // PC immediately after call to panicBounds + ADD $24, R15, R0 // pointer to save area + MOVD R0, 16(R15) + CALL runtime·panicBounds64<ABIInternal>(SB) + RET diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s index 751bf0aec7..85aa52e0f7 100644 --- a/src/runtime/asm_wasm.s +++ b/src/runtime/asm_wasm.s @@ -532,7 +532,7 @@ TEXT wasm_pc_f_loop(SB),NOSPLIT,$0 Get SP I32Const $8 I32Sub - I32Load16U $2 // PC_F + I32Load $2 // PC_F CallIndirect $0 Drop @@ -568,7 +568,7 @@ outer: Get SP I32Const $8 I32Sub - I32Load16U $2 // PC_F + I32Load $2 // PC_F Tee R2 Get R0 diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index b046ab960f..18e1dc8baf 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -191,8 +191,8 @@ func cgocall(fn, arg unsafe.Pointer) int32 { osPreemptExtExit(mp) - // Save current syscall parameters, so m.winsyscall can be - // used again if callback decide to make syscall. + // After exitsyscall we can be rescheduled on a different M, + // so we need to restore the original M's winsyscall. winsyscall := mp.winsyscall exitsyscall() @@ -543,18 +543,18 @@ func cgoCheckPointer(ptr any, arg any) { t := ep._type top := true - if arg != nil && (t.Kind_&abi.KindMask == abi.Pointer || t.Kind_&abi.KindMask == abi.UnsafePointer) { + if arg != nil && (t.Kind() == abi.Pointer || t.Kind() == abi.UnsafePointer) { p := ep.data - if t.Kind_&abi.KindDirectIface == 0 { + if !t.IsDirectIface() { p = *(*unsafe.Pointer)(p) } if p == nil || !cgoIsGoPointer(p) { return } aep := efaceOf(&arg) - switch aep._type.Kind_ & abi.KindMask { + switch aep._type.Kind() { case abi.Bool: - if t.Kind_&abi.KindMask == abi.UnsafePointer { + if t.Kind() == abi.UnsafePointer { // We don't know the type of the element. break } @@ -578,7 +578,7 @@ func cgoCheckPointer(ptr any, arg any) { // Check the array rather than the pointer. pt := (*abi.PtrType)(unsafe.Pointer(aep._type)) t = pt.Elem - if t.Kind_&abi.KindMask != abi.Array { + if t.Kind() != abi.Array { throw("can't happen") } ep = aep @@ -588,7 +588,7 @@ func cgoCheckPointer(ptr any, arg any) { } } - cgoCheckArg(t, ep.data, t.Kind_&abi.KindDirectIface == 0, top, cgoCheckPointerFail) + cgoCheckArg(t, ep.data, !t.IsDirectIface(), top, cgoCheckPointerFail) } const cgoCheckPointerFail = "cgo argument has Go pointer to unpinned Go pointer" @@ -605,7 +605,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { return } - switch t.Kind_ & abi.KindMask { + switch t.Kind() { default: throw("can't happen") case abi.Array: @@ -614,7 +614,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if at.Len != 1 { throw("can't happen") } - cgoCheckArg(at.Elem, p, at.Elem.Kind_&abi.KindDirectIface == 0, top, msg) + cgoCheckArg(at.Elem, p, !at.Elem.IsDirectIface(), top, msg) return } for i := uintptr(0); i < at.Len; i++ { @@ -652,7 +652,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if !top && !isPinned(p) { panic(errorString(msg)) } - cgoCheckArg(it, p, it.Kind_&abi.KindDirectIface == 0, false, msg) + cgoCheckArg(it, p, !it.IsDirectIface(), false, msg) case abi.Slice: st := (*slicetype)(unsafe.Pointer(t)) s := (*slice)(p) @@ -684,7 +684,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if len(st.Fields) != 1 { throw("can't happen") } - cgoCheckArg(st.Fields[0].Typ, p, st.Fields[0].Typ.Kind_&abi.KindDirectIface == 0, top, msg) + cgoCheckArg(st.Fields[0].Typ, p, !st.Fields[0].Typ.IsDirectIface(), top, msg) return } for _, f := range st.Fields { @@ -792,5 +792,5 @@ func cgoCheckResult(val any) { ep := efaceOf(&val) t := ep._type - cgoCheckArg(t, ep.data, t.Kind_&abi.KindDirectIface == 0, false, cgoResultFail) + cgoCheckArg(t, ep.data, !t.IsDirectIface(), false, cgoResultFail) } diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index 8696672065..2db86e0562 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -186,21 +186,6 @@ func buildTestProg(t *testing.T, binary string, flags ...string) (string, error) t.Logf("running %v", cmd) cmd.Dir = "testdata/" + binary cmd = testenv.CleanCmdEnv(cmd) - - // Add the rangefunc GOEXPERIMENT unconditionally since some tests depend on it. - // TODO(61405): Remove this once it's enabled by default. - edited := false - for i := range cmd.Env { - e := cmd.Env[i] - if _, vars, ok := strings.Cut(e, "GOEXPERIMENT="); ok { - cmd.Env[i] = "GOEXPERIMENT=" + vars + ",rangefunc" - edited = true - } - } - if !edited { - cmd.Env = append(cmd.Env, "GOEXPERIMENT=rangefunc") - } - out, err := cmd.CombinedOutput() if err != nil { target.err = fmt.Errorf("building %s %v: %v\n%s", binary, flags, err, out) diff --git a/src/runtime/crash_unix_test.go b/src/runtime/crash_unix_test.go index 101107d2f7..f9c12d5404 100644 --- a/src/runtime/crash_unix_test.go +++ b/src/runtime/crash_unix_test.go @@ -65,7 +65,7 @@ func TestCrashDumpsAllThreads(t *testing.T) { t.Skipf("skipping; not supported on %v", runtime.GOOS) } - if runtime.GOOS == "openbsd" && (runtime.GOARCH == "arm" || runtime.GOARCH == "mips64" || runtime.GOARCH == "ppc64") { + if runtime.GOOS == "openbsd" && (runtime.GOARCH == "arm" || runtime.GOARCH == "ppc64") { // This may be ncpu < 2 related... t.Skipf("skipping; test fails on %s/%s - see issue #42464", runtime.GOOS, runtime.GOARCH) } diff --git a/src/runtime/debug.go b/src/runtime/debug.go index c7592d3329..dacadd2721 100644 --- a/src/runtime/debug.go +++ b/src/runtime/debug.go @@ -13,13 +13,23 @@ import ( // simultaneously and returns the previous setting. If n < 1, it does not change // the current setting. // +// # Default +// // If the GOMAXPROCS environment variable is set to a positive whole number, // GOMAXPROCS defaults to that value. // -// Otherwise, the Go runtime selects an appropriate default value based on the -// number of logical CPUs on the machine, the process’s CPU affinity mask, and, -// on Linux, the process’s average CPU throughput limit based on cgroup CPU -// quota, if any. +// Otherwise, the Go runtime selects an appropriate default value from a combination of +// - the number of logical CPUs on the machine, +// - the process’s CPU affinity mask, +// - and, on Linux, the process’s average CPU throughput limit based on cgroup CPU +// quota, if any. +// +// If GODEBUG=containermaxprocs=0 is set and GOMAXPROCS is not set by the +// environment variable, then GOMAXPROCS instead defaults to the value of +// [runtime.NumCPU]. Note that GODEBUG=containermaxprocs=0 is [default] for +// language version 1.24 and below. +// +// # Updates // // The Go runtime periodically updates the default value based on changes to // the total logical CPU count, the CPU affinity mask, or cgroup quota. Setting @@ -27,11 +37,36 @@ import ( // GOMAXPROCS disables automatic updates. The default value and automatic // updates can be restored by calling [SetDefaultGOMAXPROCS]. // -// If GODEBUG=containermaxprocs=0 is set, GOMAXPROCS defaults to the value of -// [runtime.NumCPU]. If GODEBUG=updatemaxprocs=0 is set, the Go runtime does -// not perform automatic GOMAXPROCS updating. +// If GODEBUG=updatemaxprocs=0 is set, the Go runtime does not perform +// automatic GOMAXPROCS updating. Note that GODEBUG=updatemaxprocs=0 is +// [default] for language version 1.24 and below. +// +// # Compatibility +// +// Note that the default GOMAXPROCS behavior may change as the scheduler +// improves, especially the implementation detail below. +// +// # Implementation details +// +// When computing default GOMAXPROCS via cgroups, the Go runtime computes the +// "average CPU throughput limit" as the cgroup CPU quota / period. In cgroup +// v2, these values come from the cpu.max file. In cgroup v1, they come from +// cpu.cfs_quota_us and cpu.cfs_period_us, respectively. In container runtimes +// that allow configuring CPU limits, this value usually corresponds to the +// "CPU limit" option, not "CPU request". +// +// The Go runtime typically selects the default GOMAXPROCS as the minimum of +// the logical CPU count, the CPU affinity mask count, or the cgroup CPU +// throughput limit. However, it will never set GOMAXPROCS less than 2 unless +// the logical CPU count or CPU affinity mask count are below 2. +// +// If the cgroup CPU throughput limit is not a whole number, the Go runtime +// rounds up to the next whole number. +// +// GOMAXPROCS updates are performed up to once per second, or less if the +// application is idle. // -// The default GOMAXPROCS behavior may change as the scheduler improves. +// [default]: https://go.dev/doc/godebug#default func GOMAXPROCS(n int) int { if GOARCH == "wasm" && n > 1 { n = 1 // WebAssembly has no threads yet, so only one CPU is possible. diff --git a/src/runtime/debuglog.go b/src/runtime/debuglog.go index 50fba3568d..e993e396c1 100644 --- a/src/runtime/debuglog.go +++ b/src/runtime/debuglog.go @@ -327,7 +327,7 @@ func (l *dloggerImpl) p(x any) *dloggerImpl { l.w.uvarint(0) } else { v := efaceOf(&x) - switch v._type.Kind_ & abi.KindMask { + switch v._type.Kind() { case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer: l.w.uvarint(uint64(uintptr(v.data))) default: diff --git a/src/runtime/defs_openbsd.go b/src/runtime/defs_openbsd.go index d93c087a81..9564a3354c 100644 --- a/src/runtime/defs_openbsd.go +++ b/src/runtime/defs_openbsd.go @@ -11,7 +11,6 @@ GOARCH=amd64 go tool cgo -godefs defs_openbsd.go GOARCH=386 go tool cgo -godefs defs_openbsd.go GOARCH=arm go tool cgo -godefs defs_openbsd.go GOARCH=arm64 go tool cgo -godefs defs_openbsd.go -GOARCH=mips64 go tool cgo -godefs defs_openbsd.go */ package runtime diff --git a/src/runtime/defs_openbsd_mips64.go b/src/runtime/defs_openbsd_mips64.go deleted file mode 100644 index 7958044d04..0000000000 --- a/src/runtime/defs_openbsd_mips64.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Generated from: -// -// GOARCH=mips64 go tool cgo -godefs defs_openbsd.go -// -// Then converted to the form used by the runtime. - -package runtime - -import "unsafe" - -const ( - _EINTR = 0x4 - _EFAULT = 0xe - _EAGAIN = 0x23 - _ETIMEDOUT = 0x3c - - _O_WRONLY = 0x1 - _O_NONBLOCK = 0x4 - _O_CREAT = 0x200 - _O_TRUNC = 0x400 - _O_CLOEXEC = 0x10000 - - _PROT_NONE = 0x0 - _PROT_READ = 0x1 - _PROT_WRITE = 0x2 - _PROT_EXEC = 0x4 - - _MAP_ANON = 0x1000 - _MAP_PRIVATE = 0x2 - _MAP_FIXED = 0x10 - _MAP_STACK = 0x4000 - - _MADV_DONTNEED = 0x4 - _MADV_FREE = 0x6 - - _SA_SIGINFO = 0x40 - _SA_RESTART = 0x2 - _SA_ONSTACK = 0x1 - - _SIGHUP = 0x1 - _SIGINT = 0x2 - _SIGQUIT = 0x3 - _SIGILL = 0x4 - _SIGTRAP = 0x5 - _SIGABRT = 0x6 - _SIGEMT = 0x7 - _SIGFPE = 0x8 - _SIGKILL = 0x9 - _SIGBUS = 0xa - _SIGSEGV = 0xb - _SIGSYS = 0xc - _SIGPIPE = 0xd - _SIGALRM = 0xe - _SIGTERM = 0xf - _SIGURG = 0x10 - _SIGSTOP = 0x11 - _SIGTSTP = 0x12 - _SIGCONT = 0x13 - _SIGCHLD = 0x14 - _SIGTTIN = 0x15 - _SIGTTOU = 0x16 - _SIGIO = 0x17 - _SIGXCPU = 0x18 - _SIGXFSZ = 0x19 - _SIGVTALRM = 0x1a - _SIGPROF = 0x1b - _SIGWINCH = 0x1c - _SIGINFO = 0x1d - _SIGUSR1 = 0x1e - _SIGUSR2 = 0x1f - - _FPE_INTDIV = 0x1 - _FPE_INTOVF = 0x2 - _FPE_FLTDIV = 0x3 - _FPE_FLTOVF = 0x4 - _FPE_FLTUND = 0x5 - _FPE_FLTRES = 0x6 - _FPE_FLTINV = 0x7 - _FPE_FLTSUB = 0x8 - - _BUS_ADRALN = 0x1 - _BUS_ADRERR = 0x2 - _BUS_OBJERR = 0x3 - - _SEGV_MAPERR = 0x1 - _SEGV_ACCERR = 0x2 - - _ITIMER_REAL = 0x0 - _ITIMER_VIRTUAL = 0x1 - _ITIMER_PROF = 0x2 - - _EV_ADD = 0x1 - _EV_DELETE = 0x2 - _EV_CLEAR = 0x20 - _EV_ERROR = 0x4000 - _EV_EOF = 0x8000 - _EVFILT_READ = -0x1 - _EVFILT_WRITE = -0x2 -) - -type tforkt struct { - tf_tcb unsafe.Pointer - tf_tid *int32 - tf_stack uintptr -} - -type sigcontext struct { - sc_cookie uint64 - sc_mask uint64 - sc_pc uint64 - sc_regs [32]uint64 - mullo uint64 - mulhi uint64 - sc_fpregs [33]uint64 - sc_fpused uint64 - sc_fpc_eir uint64 - _xxx [8]int64 -} - -type siginfo struct { - si_signo int32 - si_code int32 - si_errno int32 - pad_cgo_0 [4]byte - _data [120]byte -} - -type stackt struct { - ss_sp uintptr - ss_size uintptr - ss_flags int32 - pad_cgo_0 [4]byte -} - -type timespec struct { - tv_sec int64 - tv_nsec int64 -} - -//go:nosplit -func (ts *timespec) setNsec(ns int64) { - ts.tv_sec = ns / 1e9 - ts.tv_nsec = ns % 1e9 -} - -type timeval struct { - tv_sec int64 - tv_usec int64 -} - -func (tv *timeval) set_usec(x int32) { - tv.tv_usec = int64(x) -} - -type itimerval struct { - it_interval timeval - it_value timeval -} - -type keventt struct { - ident uint64 - filter int16 - flags uint16 - fflags uint32 - data int64 - udata *byte -} diff --git a/src/runtime/ehooks_test.go b/src/runtime/ehooks_test.go index c7f51740fb..380d709876 100644 --- a/src/runtime/ehooks_test.go +++ b/src/runtime/ehooks_test.go @@ -63,12 +63,12 @@ func TestExitHooks(t *testing.T) { outs = strings.TrimSpace(outs) if s.expected != "" && s.expected != outs { t.Fatalf("failed %s: wanted %q\noutput:\n%s", - s.mode, s.expected, outs) + s.mode, s.expected, outs) } for _, need := range s.musthave { if !strings.Contains(outs, need) { t.Fatalf("failed mode %s: output does not contain %q\noutput:\n%s", - s.mode, need, outs) + s.mode, need, outs) } } if s.expected == "" && s.musthave == nil && outs != "" { diff --git a/src/runtime/error.go b/src/runtime/error.go index 8e50c0fea4..f95b14d780 100644 --- a/src/runtime/error.go +++ b/src/runtime/error.go @@ -132,52 +132,34 @@ type boundsError struct { // Instead, we keep track of whether x should be interpreted as signed or unsigned. // y is known to be nonnegative and to fit in an int. signed bool - code boundsErrorCode + code abi.BoundsErrorCode } -type boundsErrorCode uint8 - -const ( - boundsIndex boundsErrorCode = iota // s[x], 0 <= x < len(s) failed - - boundsSliceAlen // s[?:x], 0 <= x <= len(s) failed - boundsSliceAcap // s[?:x], 0 <= x <= cap(s) failed - boundsSliceB // s[x:y], 0 <= x <= y failed (but boundsSliceA didn't happen) - - boundsSlice3Alen // s[?:?:x], 0 <= x <= len(s) failed - boundsSlice3Acap // s[?:?:x], 0 <= x <= cap(s) failed - boundsSlice3B // s[?:x:y], 0 <= x <= y failed (but boundsSlice3A didn't happen) - boundsSlice3C // s[x:y:?], 0 <= x <= y failed (but boundsSlice3A/B didn't happen) - - boundsConvert // (*[x]T)(s), 0 <= x <= len(s) failed - // Note: in the above, len(s) and cap(s) are stored in y -) - // boundsErrorFmts provide error text for various out-of-bounds panics. // Note: if you change these strings, you should adjust the size of the buffer // in boundsError.Error below as well. var boundsErrorFmts = [...]string{ - boundsIndex: "index out of range [%x] with length %y", - boundsSliceAlen: "slice bounds out of range [:%x] with length %y", - boundsSliceAcap: "slice bounds out of range [:%x] with capacity %y", - boundsSliceB: "slice bounds out of range [%x:%y]", - boundsSlice3Alen: "slice bounds out of range [::%x] with length %y", - boundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y", - boundsSlice3B: "slice bounds out of range [:%x:%y]", - boundsSlice3C: "slice bounds out of range [%x:%y:]", - boundsConvert: "cannot convert slice with length %y to array or pointer to array with length %x", + abi.BoundsIndex: "index out of range [%x] with length %y", + abi.BoundsSliceAlen: "slice bounds out of range [:%x] with length %y", + abi.BoundsSliceAcap: "slice bounds out of range [:%x] with capacity %y", + abi.BoundsSliceB: "slice bounds out of range [%x:%y]", + abi.BoundsSlice3Alen: "slice bounds out of range [::%x] with length %y", + abi.BoundsSlice3Acap: "slice bounds out of range [::%x] with capacity %y", + abi.BoundsSlice3B: "slice bounds out of range [:%x:%y]", + abi.BoundsSlice3C: "slice bounds out of range [%x:%y:]", + abi.BoundsConvert: "cannot convert slice with length %y to array or pointer to array with length %x", } // boundsNegErrorFmts are overriding formats if x is negative. In this case there's no need to report y. var boundsNegErrorFmts = [...]string{ - boundsIndex: "index out of range [%x]", - boundsSliceAlen: "slice bounds out of range [:%x]", - boundsSliceAcap: "slice bounds out of range [:%x]", - boundsSliceB: "slice bounds out of range [%x:]", - boundsSlice3Alen: "slice bounds out of range [::%x]", - boundsSlice3Acap: "slice bounds out of range [::%x]", - boundsSlice3B: "slice bounds out of range [:%x:]", - boundsSlice3C: "slice bounds out of range [%x::]", + abi.BoundsIndex: "index out of range [%x]", + abi.BoundsSliceAlen: "slice bounds out of range [:%x]", + abi.BoundsSliceAcap: "slice bounds out of range [:%x]", + abi.BoundsSliceB: "slice bounds out of range [%x:]", + abi.BoundsSlice3Alen: "slice bounds out of range [::%x]", + abi.BoundsSlice3Acap: "slice bounds out of range [::%x]", + abi.BoundsSlice3B: "slice bounds out of range [:%x:]", + abi.BoundsSlice3C: "slice bounds out of range [%x::]", } func (e boundsError) RuntimeError() {} @@ -278,7 +260,7 @@ func printanycustomtype(i any) { eface := efaceOf(&i) typestring := toRType(eface._type).string() - switch eface._type.Kind_ { + switch eface._type.Kind() { case abi.String: print(typestring, `("`) printindented(*(*string)(eface.data)) diff --git a/src/runtime/export_debug_test.go b/src/runtime/export_debug_test.go index 96f6fd9eea..94dc974804 100644 --- a/src/runtime/export_debug_test.go +++ b/src/runtime/export_debug_test.go @@ -33,13 +33,13 @@ func InjectDebugCall(gp *g, fn any, regArgs *abi.RegArgs, stackArgs any, tkill f } f := efaceOf(&fn) - if f._type == nil || f._type.Kind_&abi.KindMask != abi.Func { + if f._type == nil || f._type.Kind() != abi.Func { return nil, plainError("fn must be a function") } fv := (*funcval)(f.data) a := efaceOf(&stackArgs) - if a._type != nil && a._type.Kind_&abi.KindMask != abi.Pointer { + if a._type != nil && a._type.Kind() != abi.Pointer { return nil, plainError("args must be a pointer or nil") } argp := a.data diff --git a/src/runtime/export_map_noswiss_test.go b/src/runtime/export_map_noswiss_test.go deleted file mode 100644 index 4638afa6b8..0000000000 --- a/src/runtime/export_map_noswiss_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "unsafe" -) - -const RuntimeHmapSize = unsafe.Sizeof(hmap{}) - -func OverLoadFactor(count int, B uint8) bool { - return overLoadFactor(count, B) -} - -func MapBucketsCount(m map[int]int) int { - h := *(**hmap)(unsafe.Pointer(&m)) - return 1 << h.B -} - -func MapBucketsPointerIsNil(m map[int]int) bool { - h := *(**hmap)(unsafe.Pointer(&m)) - return h.buckets == nil -} - -func MapTombstoneCheck(m map[int]int) { - // Make sure emptyOne and emptyRest are distributed correctly. - // We should have a series of filled and emptyOne cells, followed by - // a series of emptyRest cells. - h := *(**hmap)(unsafe.Pointer(&m)) - i := any(m) - t := *(**maptype)(unsafe.Pointer(&i)) - - for x := 0; x < 1<<h.B; x++ { - b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize))) - n := 0 - for b := b0; b != nil; b = b.overflow(t) { - for i := 0; i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != emptyRest { - n++ - } - } - } - k := 0 - for b := b0; b != nil; b = b.overflow(t) { - for i := 0; i < abi.OldMapBucketCount; i++ { - if k < n && b.tophash[i] == emptyRest { - panic("early emptyRest") - } - if k >= n && b.tophash[i] != emptyRest { - panic("late non-emptyRest") - } - if k == n-1 && b.tophash[i] == emptyOne { - panic("last non-emptyRest entry is emptyOne") - } - k++ - } - } - } -} diff --git a/src/runtime/export_map_swiss_test.go b/src/runtime/export_map_swiss_test.go deleted file mode 100644 index 55a7d6ff04..0000000000 --- a/src/runtime/export_map_swiss_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package runtime - -func MapTombstoneCheck(m map[int]int) { - // TODO -} diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 81542deb59..1f55717f0a 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -9,6 +9,7 @@ package runtime import ( "internal/abi" "internal/goarch" + "internal/goexperiment" "internal/goos" "internal/runtime/atomic" "internal/runtime/gc" @@ -58,9 +59,6 @@ const CrashStackImplemented = crashStackImplemented const TracebackInnerFrames = tracebackInnerFrames const TracebackOuterFrames = tracebackOuterFrames -var MapKeys = keys -var MapValues = values - var LockPartialOrder = lockPartialOrder type TimeTimer = timeTimer @@ -417,7 +415,8 @@ func ReadMemStatsSlow() (base, slow MemStats) { slow.HeapReleased += uint64(pg) * pageSize } for _, p := range allp { - pg := sys.OnesCount64(p.pcache.scav) + // Only count scav bits for pages in the cache + pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav) slow.HeapReleased += uint64(pg) * pageSize } @@ -1122,12 +1121,16 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { // Lock so that we can safely access the bitmap. lock(&mheap_.lock) + + heapBase := mheap_.pages.inUse.ranges[0].base.addr() + secondArenaBase := arenaBase(arenaIndex(heapBase) + 1) chunkLoop: for i := mheap_.pages.start; i < mheap_.pages.end; i++ { chunk := mheap_.pages.tryChunkOf(i) if chunk == nil { continue } + cb := chunkBase(i) for j := 0; j < pallocChunkPages/64; j++ { // Run over each 64-bit bitmap section and ensure // scavenged is being cleared properly on allocation. @@ -1137,12 +1140,20 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { want := chunk.scavenged[j] &^ chunk.pallocBits[j] got := chunk.scavenged[j] if want != got { + // When goexperiment.RandomizedHeapBase64 is set we use a + // series of padding pages to generate randomized heap base + // address which have both the alloc and scav bits set. If + // we see this for a chunk between the address of the heap + // base, and the address of the second arena continue. + if goexperiment.RandomizedHeapBase64 && (cb >= heapBase && cb < secondArenaBase) { + continue + } ok = false if n >= len(mismatches) { break chunkLoop } mismatches[n] = BitsMismatch{ - Base: chunkBase(i) + uintptr(j)*64*pageSize, + Base: cb + uintptr(j)*64*pageSize, Got: got, Want: want, } @@ -1761,7 +1772,7 @@ func NewUserArena() *UserArena { func (a *UserArena) New(out *any) { i := efaceOf(out) typ := i._type - if typ.Kind_&abi.KindMask != abi.Pointer { + if typ.Kind() != abi.Pointer { panic("new result of non-ptr type") } typ = (*ptrtype)(unsafe.Pointer(typ)).Elem diff --git a/src/runtime/export_windows_test.go b/src/runtime/export_windows_test.go index 13d30d4bc4..caaf2dae51 100644 --- a/src/runtime/export_windows_test.go +++ b/src/runtime/export_windows_test.go @@ -11,8 +11,6 @@ import ( "unsafe" ) -const MaxArgs = maxArgs - var ( OsYield = osyield TimeBeginPeriodRetValue = &timeBeginPeriodRetValue @@ -20,7 +18,7 @@ var ( func NumberOfProcessors() int32 { var info systeminfo - stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info))) + stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info))) return int32(info.dwnumberofprocessors) } diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 5476035b2e..d9474034c2 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -206,7 +206,7 @@ func dumptype(t *_type) { dwritebyte('.') dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name))) } - dumpbool(t.Kind_&abi.KindDirectIface == 0 || t.Pointers()) + dumpbool(!t.IsDirectIface() || t.Pointers()) } // dump an object. @@ -460,7 +460,7 @@ func dumproots() { continue } spf := (*specialfinalizer)(unsafe.Pointer(sp)) - p := unsafe.Pointer(s.base() + uintptr(spf.special.offset)) + p := unsafe.Pointer(s.base() + spf.special.offset) dumpfinalizer(p, spf.fn, spf.fint, spf.ot) } } @@ -659,7 +659,7 @@ func dumpmemprof() { continue } spp := (*specialprofile)(unsafe.Pointer(sp)) - p := s.base() + uintptr(spp.special.offset) + p := s.base() + spp.special.offset dumpint(tagAllocSample) dumpint(uint64(p)) dumpint(uint64(uintptr(unsafe.Pointer(spp.b)))) diff --git a/src/runtime/linkname_swiss.go b/src/runtime/linkname_shim.go index 1be724477e..4ba3d1fb78 100644 --- a/src/runtime/linkname_swiss.go +++ b/src/runtime/linkname_shim.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build goexperiment.swissmap - package runtime import ( @@ -16,8 +14,7 @@ import ( // Legacy //go:linkname compatibility shims // // The functions below are unused by the toolchain, and exist only for -// compatibility with existing //go:linkname use in the ecosystem (and in -// map_noswiss.go for normal use via GOEXPERIMENT=noswissmap). +// compatibility with existing //go:linkname use in the ecosystem. // linknameIter is the it argument to mapiterinit and mapiternext. // @@ -27,7 +24,7 @@ import ( // type hiter struct { // key unsafe.Pointer // elem unsafe.Pointer -// t *maptype +// t *maptype // old map abi.Type // h *hmap // buckets unsafe.Pointer // bptr *bmap @@ -64,7 +61,7 @@ type linknameIter struct { // Fields from hiter. key unsafe.Pointer elem unsafe.Pointer - typ *abi.SwissMapType + typ *abi.MapType // The real iterator. it *maps.Iter @@ -88,7 +85,7 @@ type linknameIter struct { // See go.dev/issue/67401. // //go:linkname mapiterinit -func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) { +func mapiterinit(t *abi.MapType, m *maps.Map, it *linknameIter) { if raceenabled && m != nil { callerpc := sys.GetCallerPC() racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapiterinit)) @@ -120,7 +117,7 @@ func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) { // See go.dev/issue/67401. // //go:linkname reflect_mapiterinit reflect.mapiterinit -func reflect_mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) { +func reflect_mapiterinit(t *abi.MapType, m *maps.Map, it *linknameIter) { mapiterinit(t, m, it) } diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index bc7dab9d20..d21b2c49b5 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -102,6 +102,7 @@ package runtime import ( "internal/goarch" + "internal/goexperiment" "internal/goos" "internal/runtime/atomic" "internal/runtime/gc" @@ -345,6 +346,14 @@ const ( // metadata mappings back to the OS. That would be quite complex to do in general // as the heap is likely fragmented after a reduction in heap size. minHeapForMetadataHugePages = 1 << 30 + + // randomizeHeapBase indicates if the heap base address should be randomized. + // See comment in mallocinit for how the randomization is performed. + randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform + + // randHeapBasePrefixMask is used to extract the top byte of the randomized + // heap base address. + randHeapBasePrefixMask = ^uintptr(0xff << (heapAddrBits - 8)) ) // physPageSize is the size in bytes of the OS's physical pages. @@ -372,6 +381,24 @@ var ( physHugePageShift uint ) +var ( + // heapRandSeed is a random value that is populated in mallocinit if + // randomizeHeapBase is set. It is used in mallocinit, and mheap.grow, to + // randomize the base heap address. + heapRandSeed uintptr + heapRandSeedBitsRemaining int +) + +func nextHeapRandBits(bits int) uintptr { + if bits > heapRandSeedBitsRemaining { + throw("not enough heapRandSeed bits remaining") + } + r := heapRandSeed >> (64 - bits) + heapRandSeed <<= bits + heapRandSeedBitsRemaining -= bits + return r +} + func mallocinit() { if gc.SizeClassToSize[tinySizeClass] != maxTinySize { throw("bad TinySizeClass") @@ -517,6 +544,42 @@ func mallocinit() { // // In race mode we have no choice but to just use the same hints because // the race detector requires that the heap be mapped contiguously. + // + // If randomizeHeapBase is set, we attempt to randomize the base address + // as much as possible. We do this by generating a random uint64 via + // bootstrapRand and using it's bits to randomize portions of the base + // address as follows: + // * We first generate a random heapArenaBytes aligned address that we use for + // generating the hints. + // * On the first call to mheap.grow, we then generate a random PallocChunkBytes + // aligned offset into the mmap'd heap region, which we use as the base for + // the heap region. + // * We then select a page offset in that PallocChunkBytes region to start the + // heap at, and mark all the pages up to that offset as allocated. + // + // Our final randomized "heap base address" becomes the first byte of + // the first available page returned by the page allocator. This results + // in an address with at least heapAddrBits-gc.PageShift-2-(1*goarch.IsAmd64) + // bits of entropy. + + var randHeapBase uintptr + var randHeapBasePrefix byte + // heapAddrBits is 48 on most platforms, but we only use 47 of those + // bits in order to provide a good amount of room for the heap to grow + // contiguously. On amd64, there are 48 bits, but the top bit is sign + // extended, so we throw away another bit, just to be safe. + randHeapAddrBits := heapAddrBits - 1 - (goarch.IsAmd64 * 1) + if randomizeHeapBase { + // Generate a random value, and take the bottom heapAddrBits-logHeapArenaBytes + // bits, using them as the top bits for randHeapBase. + heapRandSeed, heapRandSeedBitsRemaining = uintptr(bootstrapRand()), 64 + + topBits := (randHeapAddrBits - logHeapArenaBytes) + randHeapBase = nextHeapRandBits(topBits) << (randHeapAddrBits - topBits) + randHeapBase = alignUp(randHeapBase, heapArenaBytes) + randHeapBasePrefix = byte(randHeapBase >> (randHeapAddrBits - 8)) + } + for i := 0x7f; i >= 0; i-- { var p uintptr switch { @@ -528,6 +591,9 @@ func mallocinit() { if p >= uintptrMask&0x00e000000000 { continue } + case randomizeHeapBase: + prefix := uintptr(randHeapBasePrefix+byte(i)) << (randHeapAddrBits - 8) + p = prefix | (randHeapBase & randHeapBasePrefixMask) case GOARCH == "arm64" && GOOS == "ios": p = uintptr(i)<<40 | uintptrMask&(0x0013<<28) case GOARCH == "arm64": diff --git a/src/runtime/map_swiss.go b/src/runtime/map.go index c2cf08fcaa..4a0713cfc4 100644 --- a/src/runtime/map_swiss.go +++ b/src/runtime/map.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build goexperiment.swissmap - package runtime import ( @@ -19,12 +17,10 @@ const ( loadFactorDen = 8 ) -type maptype = abi.SwissMapType - //go:linkname maps_errNilAssign internal/runtime/maps.errNilAssign var maps_errNilAssign error = plainError("assignment to entry in nil map") -func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map { +func makemap64(t *abi.MapType, hint int64, m *maps.Map) *maps.Map { if int64(int(hint)) != hint { hint = 0 } @@ -32,7 +28,7 @@ func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map { } // makemap_small implements Go map creation for make(map[k]v) and -// make(map[k]v, hint) when hint is known to be at most abi.SwissMapGroupSlots +// make(map[k]v, hint) when hint is known to be at most abi.MapGroupSlots // at compile time and the map needs to be allocated on the heap. // // makemap_small should be an internal detail, @@ -63,7 +59,7 @@ func makemap_small() *maps.Map { // See go.dev/issue/67401. // //go:linkname makemap -func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map { +func makemap(t *abi.MapType, hint int, m *maps.Map) *maps.Map { if hint < 0 { hint = 0 } @@ -81,7 +77,7 @@ func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map { // we want to avoid one layer of call. // //go:linkname mapaccess1 -func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer +func mapaccess1(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer // mapaccess2 should be an internal detail, // but widely used packages access it using linkname. @@ -92,9 +88,9 @@ func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Poi // See go.dev/issue/67401. // //go:linkname mapaccess2 -func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool) +func mapaccess2(t *abi.MapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool) -func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer { +func mapaccess1_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer { e := mapaccess1(t, m, key) if e == unsafe.Pointer(&zeroVal[0]) { return zero @@ -102,7 +98,7 @@ func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) return e } -func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) { +func mapaccess2_fat(t *abi.MapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) { e := mapaccess1(t, m, key) if e == unsafe.Pointer(&zeroVal[0]) { return zero, false @@ -125,7 +121,7 @@ func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) // See go.dev/issue/67401. // //go:linkname mapassign -func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer +func mapassign(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer // mapdelete should be an internal detail, // but widely used packages access it using linkname. @@ -136,7 +132,7 @@ func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Poin // See go.dev/issue/67401. // //go:linkname mapdelete -func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) { +func mapdelete(t *abi.MapType, m *maps.Map, key unsafe.Pointer) { if raceenabled && m != nil { callerpc := sys.GetCallerPC() pc := abi.FuncPCABIInternal(mapdelete) @@ -157,7 +153,7 @@ func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) { // performs the first step of iteration. The Iter struct pointed to by 'it' is // allocated on the stack by the compilers order pass or on the heap by // reflect. Both need to have zeroed it since the struct contains pointers. -func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) { +func mapIterStart(t *abi.MapType, m *maps.Map, it *maps.Iter) { if raceenabled && m != nil { callerpc := sys.GetCallerPC() racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart)) @@ -179,7 +175,7 @@ func mapIterNext(it *maps.Iter) { } // mapclear deletes all keys from a map. -func mapclear(t *abi.SwissMapType, m *maps.Map) { +func mapclear(t *abi.MapType, m *maps.Map) { if raceenabled && m != nil { callerpc := sys.GetCallerPC() pc := abi.FuncPCABIInternal(mapclear) @@ -205,7 +201,7 @@ func mapclear(t *abi.SwissMapType, m *maps.Map) { // See go.dev/issue/67401. // //go:linkname reflect_makemap reflect.makemap -func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map { +func reflect_makemap(t *abi.MapType, cap int) *maps.Map { // Check invariants and reflects math. if t.Key.Equal == nil { throw("runtime.reflect_makemap: unsupported map key type") @@ -226,7 +222,7 @@ func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map { // See go.dev/issue/67401. // //go:linkname reflect_mapaccess reflect.mapaccess -func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer { +func reflect_mapaccess(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer { elem, ok := mapaccess2(t, m, key) if !ok { // reflect wants nil for a missing element @@ -236,7 +232,7 @@ func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) uns } //go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr -func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) unsafe.Pointer { +func reflect_mapaccess_faststr(t *abi.MapType, m *maps.Map, key string) unsafe.Pointer { elem, ok := mapaccess2_faststr(t, m, key) if !ok { // reflect wants nil for a missing element @@ -254,24 +250,24 @@ func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) uns // Do not remove or change the type signature. // //go:linkname reflect_mapassign reflect.mapassign0 -func reflect_mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) { +func reflect_mapassign(t *abi.MapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) { p := mapassign(t, m, key) typedmemmove(t.Elem, p, elem) } //go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0 -func reflect_mapassign_faststr(t *abi.SwissMapType, m *maps.Map, key string, elem unsafe.Pointer) { +func reflect_mapassign_faststr(t *abi.MapType, m *maps.Map, key string, elem unsafe.Pointer) { p := mapassign_faststr(t, m, key) typedmemmove(t.Elem, p, elem) } //go:linkname reflect_mapdelete reflect.mapdelete -func reflect_mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) { +func reflect_mapdelete(t *abi.MapType, m *maps.Map, key unsafe.Pointer) { mapdelete(t, m, key) } //go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr -func reflect_mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, key string) { +func reflect_mapdelete_faststr(t *abi.MapType, m *maps.Map, key string) { mapdelete_faststr(t, m, key) } @@ -297,7 +293,7 @@ func reflect_maplen(m *maps.Map) int { } //go:linkname reflect_mapclear reflect.mapclear -func reflect_mapclear(t *abi.SwissMapType, m *maps.Map) { +func reflect_mapclear(t *abi.MapType, m *maps.Map) { mapclear(t, m) } @@ -325,25 +321,9 @@ func mapinitnoop() //go:linkname mapclone maps.clone func mapclone(m any) any { e := efaceOf(&m) - typ := (*abi.SwissMapType)(unsafe.Pointer(e._type)) + typ := (*abi.MapType)(unsafe.Pointer(e._type)) map_ := (*maps.Map)(e.data) map_ = map_.Clone(typ) e.data = (unsafe.Pointer)(map_) return m } - -// keys for implementing maps.keys -// -//go:linkname keys maps.keys -func keys(m any, p unsafe.Pointer) { - // Currently unused in the maps package. - panic("unimplemented") -} - -// values for implementing maps.values -// -//go:linkname values maps.values -func values(m any, p unsafe.Pointer) { - // Currently unused in the maps package. - panic("unimplemented") -} diff --git a/src/runtime/map_fast32_swiss.go b/src/runtime/map_fast32.go index 0a241d3793..17b4c31d02 100644 --- a/src/runtime/map_fast32_swiss.go +++ b/src/runtime/map_fast32.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build goexperiment.swissmap - package runtime import ( @@ -15,7 +13,7 @@ import ( // Functions below pushed from internal/runtime/maps. //go:linkname mapaccess1_fast32 -func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer +func mapaccess1_fast32(t *abi.MapType, m *maps.Map, key uint32) unsafe.Pointer // mapaccess2_fast32 should be an internal detail, // but widely used packages access it using linkname. @@ -26,7 +24,7 @@ func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Poin // See go.dev/issue/67401. // //go:linkname mapaccess2_fast32 -func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Pointer, bool) +func mapaccess2_fast32(t *abi.MapType, m *maps.Map, key uint32) (unsafe.Pointer, bool) // mapassign_fast32 should be an internal detail, // but widely used packages access it using linkname. @@ -38,7 +36,7 @@ func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Poi // See go.dev/issue/67401. // //go:linkname mapassign_fast32 -func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer +func mapassign_fast32(t *abi.MapType, m *maps.Map, key uint32) unsafe.Pointer // mapassign_fast32ptr should be an internal detail, // but widely used packages access it using linkname. @@ -49,7 +47,7 @@ func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Point // See go.dev/issue/67401. // //go:linkname mapassign_fast32ptr -func mapassign_fast32ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer +func mapassign_fast32ptr(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer //go:linkname mapdelete_fast32 -func mapdelete_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) +func mapdelete_fast32(t *abi.MapType, m *maps.Map, key uint32) diff --git a/src/runtime/map_fast32_noswiss.go b/src/runtime/map_fast32_noswiss.go deleted file mode 100644 index 751717b6cd..0000000000 --- a/src/runtime/map_fast32_noswiss.go +++ /dev/null @@ -1,493 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/goarch" - "internal/runtime/sys" - "unsafe" -) - -func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - var b *bmap - if h.B == 0 { - // One-bucket table. No need to hash. - b = (*bmap)(h.buckets) - } else { - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - m := bucketMask(h.B) - b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - } - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) { - if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)) - } - } - } - return unsafe.Pointer(&zeroVal[0]) -} - -// mapaccess2_fast32 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2_fast32 -func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]), false - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - var b *bmap - if h.B == 0 { - // One-bucket table. No need to hash. - b = (*bmap)(h.buckets) - } else { - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - m := bucketMask(h.B) - b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - } - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) { - if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)), true - } - } - } - return unsafe.Pointer(&zeroVal[0]), false -} - -// mapassign_fast32 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast32 -func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapassign. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast32(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - - var insertb *bmap - var inserti uintptr - var insertk unsafe.Pointer - -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if isEmpty(b.tophash[i]) { - if insertb == nil { - inserti = i - insertb = b - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) - if k != key { - continue - } - inserti = i - insertb = b - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if insertb == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - insertb = h.newoverflow(t, b) - inserti = 0 // not necessary, but avoids needlessly spilling inserti - } - insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks - - insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4) - // store new key at insert position - *(*uint32)(insertk) = key - - h.count++ - -done: - elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize)) - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - return elem -} - -// mapassign_fast32ptr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast32ptr -func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapassign. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast32(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - - var insertb *bmap - var inserti uintptr - var insertk unsafe.Pointer - -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if isEmpty(b.tophash[i]) { - if insertb == nil { - inserti = i - insertb = b - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4))) - if k != key { - continue - } - inserti = i - insertb = b - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if insertb == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - insertb = h.newoverflow(t, b) - inserti = 0 // not necessary, but avoids needlessly spilling inserti - } - insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks - - insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4) - // store new key at insert position - *(*unsafe.Pointer)(insertk) = key - - h.count++ - -done: - elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize)) - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - return elem -} - -func mapdelete_fast32(t *maptype, h *hmap, key uint32) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32)) - } - if h == nil || h.count == 0 { - return - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapdelete - h.flags ^= hashWriting - - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast32(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - bOrig := b -search: - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) { - if key != *(*uint32)(k) || isEmpty(b.tophash[i]) { - continue - } - // Only clear key if there are pointers in it. - // This can only happen if pointers are 32 bit - // wide as 64 bit pointers do not fit into a 32 bit key. - if goarch.PtrSize == 4 && t.Key.Pointers() { - // The key must be a pointer as we checked pointers are - // 32 bits wide and the key is 32 bits wide also. - *(*unsafe.Pointer)(k) = nil - } - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)) - if t.Elem.Pointers() { - memclrHasPointers(e, t.Elem.Size_) - } else { - memclrNoHeapPointers(e, t.Elem.Size_) - } - b.tophash[i] = emptyOne - // If the bucket now ends in a bunch of emptyOne states, - // change those to emptyRest states. - if i == abi.OldMapBucketCount-1 { - if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { - goto notLast - } - } else { - if b.tophash[i+1] != emptyRest { - goto notLast - } - } - for { - b.tophash[i] = emptyRest - if i == 0 { - if b == bOrig { - break // beginning of initial bucket, we're done. - } - // Find previous bucket, continue at its last entry. - c := b - for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { - } - i = abi.OldMapBucketCount - 1 - } else { - i-- - } - if b.tophash[i] != emptyOne { - break - } - } - notLast: - h.count-- - // Reset the hash seed to make it more difficult for attackers to - // repeatedly trigger hash collisions. See issue 25237. - if h.count == 0 { - h.hash0 = uint32(rand()) - } - break search - } - } - - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting -} - -func growWork_fast32(t *maptype, h *hmap, bucket uintptr) { - // make sure we evacuate the oldbucket corresponding - // to the bucket we're about to use - evacuate_fast32(t, h, bucket&h.oldbucketmask()) - - // evacuate one more oldbucket to make progress on growing - if h.growing() { - evacuate_fast32(t, h, h.nevacuate) - } -} - -func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { - b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) - newbit := h.noldbuckets() - if !evacuated(b) { - // TODO: reuse overflow buckets instead of using new ones, if there - // is no iterator using the old buckets. (If !oldIterator.) - - // xy contains the x and y (low and high) evacuation destinations. - var xy [2]evacDst - x := &xy[0] - x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) - x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, abi.OldMapBucketCount*4) - - if !h.sameSizeGrow() { - // Only calculate y pointers if we're growing bigger. - // Otherwise GC can see bad pointers. - y := &xy[1] - y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) - y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, abi.OldMapBucketCount*4) - } - - for ; b != nil; b = b.overflow(t) { - k := add(unsafe.Pointer(b), dataOffset) - e := add(k, abi.OldMapBucketCount*4) - for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) { - top := b.tophash[i] - if isEmpty(top) { - b.tophash[i] = evacuatedEmpty - continue - } - if top < minTopHash { - throw("bad map state") - } - var useY uint8 - if !h.sameSizeGrow() { - // Compute hash to make our evacuation decision (whether we need - // to send this key/elem to bucket x or bucket y). - hash := t.Hasher(k, uintptr(h.hash0)) - if hash&newbit != 0 { - useY = 1 - } - } - - b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap - dst := &xy[useY] // evacuation destination - - if dst.i == abi.OldMapBucketCount { - dst.b = h.newoverflow(t, dst.b) - dst.i = 0 - dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, abi.OldMapBucketCount*4) - } - dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check - - // Copy key. - if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled { - // Write with a write barrier. - *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) - } else { - *(*uint32)(dst.k) = *(*uint32)(k) - } - - typedmemmove(t.Elem, dst.e, e) - dst.i++ - // These updates might push these pointers past the end of the - // key or elem arrays. That's ok, as we have the overflow pointer - // at the end of the bucket to protect against pointing past the - // end of the bucket. - dst.k = add(dst.k, 4) - dst.e = add(dst.e, uintptr(t.ValueSize)) - } - } - // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.Pointers() { - b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) - // Preserve b.tophash because the evacuation - // state is maintained there. - ptr := add(b, dataOffset) - n := uintptr(t.BucketSize) - dataOffset - memclrHasPointers(ptr, n) - } - } - - if oldbucket == h.nevacuate { - advanceEvacuationMark(h, t, newbit) - } -} diff --git a/src/runtime/map_fast64_swiss.go b/src/runtime/map_fast64.go index 8b7fcf88e8..8640acf6a6 100644 --- a/src/runtime/map_fast64_swiss.go +++ b/src/runtime/map_fast64.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build goexperiment.swissmap - package runtime import ( @@ -15,7 +13,7 @@ import ( // Functions below pushed from internal/runtime/maps. //go:linkname mapaccess1_fast64 -func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer +func mapaccess1_fast64(t *abi.MapType, m *maps.Map, key uint64) unsafe.Pointer // mapaccess2_fast64 should be an internal detail, // but widely used packages access it using linkname. @@ -26,7 +24,7 @@ func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Poin // See go.dev/issue/67401. // //go:linkname mapaccess2_fast64 -func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Pointer, bool) +func mapaccess2_fast64(t *abi.MapType, m *maps.Map, key uint64) (unsafe.Pointer, bool) // mapassign_fast64 should be an internal detail, // but widely used packages access it using linkname. @@ -38,7 +36,7 @@ func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Poi // See go.dev/issue/67401. // //go:linkname mapassign_fast64 -func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer +func mapassign_fast64(t *abi.MapType, m *maps.Map, key uint64) unsafe.Pointer // mapassign_fast64ptr should be an internal detail, // but widely used packages access it using linkname. @@ -50,7 +48,7 @@ func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Point // See go.dev/issue/67401. // //go:linkname mapassign_fast64ptr -func mapassign_fast64ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer +func mapassign_fast64ptr(t *abi.MapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer //go:linkname mapdelete_fast64 -func mapdelete_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) +func mapdelete_fast64(t *abi.MapType, m *maps.Map, key uint64) diff --git a/src/runtime/map_fast64_noswiss.go b/src/runtime/map_fast64_noswiss.go deleted file mode 100644 index abb272d2b6..0000000000 --- a/src/runtime/map_fast64_noswiss.go +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/goarch" - "internal/runtime/sys" - "unsafe" -) - -func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - var b *bmap - if h.B == 0 { - // One-bucket table. No need to hash. - b = (*bmap)(h.buckets) - } else { - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - m := bucketMask(h.B) - b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - } - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) { - if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)) - } - } - } - return unsafe.Pointer(&zeroVal[0]) -} - -// mapaccess2_fast64 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2_fast64 -func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]), false - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - var b *bmap - if h.B == 0 { - // One-bucket table. No need to hash. - b = (*bmap)(h.buckets) - } else { - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - m := bucketMask(h.B) - b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - } - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) { - if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)), true - } - } - } - return unsafe.Pointer(&zeroVal[0]), false -} - -// mapassign_fast64 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast64 -func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapassign. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast64(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - - var insertb *bmap - var inserti uintptr - var insertk unsafe.Pointer - -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if isEmpty(b.tophash[i]) { - if insertb == nil { - insertb = b - inserti = i - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) - if k != key { - continue - } - insertb = b - inserti = i - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if insertb == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - insertb = h.newoverflow(t, b) - inserti = 0 // not necessary, but avoids needlessly spilling inserti - } - insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks - - insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8) - // store new key at insert position - *(*uint64)(insertk) = key - - h.count++ - -done: - elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize)) - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - return elem -} - -// mapassign_fast64ptr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast64ptr -func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapassign. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast64(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - - var insertb *bmap - var inserti uintptr - var insertk unsafe.Pointer - -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if isEmpty(b.tophash[i]) { - if insertb == nil { - insertb = b - inserti = i - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8))) - if k != key { - continue - } - insertb = b - inserti = i - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if insertb == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - insertb = h.newoverflow(t, b) - inserti = 0 // not necessary, but avoids needlessly spilling inserti - } - insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks - - insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8) - // store new key at insert position - *(*unsafe.Pointer)(insertk) = key - - h.count++ - -done: - elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize)) - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - return elem -} - -func mapdelete_fast64(t *maptype, h *hmap, key uint64) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64)) - } - if h == nil || h.count == 0 { - return - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapdelete - h.flags ^= hashWriting - - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast64(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - bOrig := b -search: - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) { - if key != *(*uint64)(k) || isEmpty(b.tophash[i]) { - continue - } - // Only clear key if there are pointers in it. - if t.Key.Pointers() { - if goarch.PtrSize == 8 { - *(*unsafe.Pointer)(k) = nil - } else { - // There are three ways to squeeze at one or more 32 bit pointers into 64 bits. - // Just call memclrHasPointers instead of trying to handle all cases here. - memclrHasPointers(k, 8) - } - } - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)) - if t.Elem.Pointers() { - memclrHasPointers(e, t.Elem.Size_) - } else { - memclrNoHeapPointers(e, t.Elem.Size_) - } - b.tophash[i] = emptyOne - // If the bucket now ends in a bunch of emptyOne states, - // change those to emptyRest states. - if i == abi.OldMapBucketCount-1 { - if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { - goto notLast - } - } else { - if b.tophash[i+1] != emptyRest { - goto notLast - } - } - for { - b.tophash[i] = emptyRest - if i == 0 { - if b == bOrig { - break // beginning of initial bucket, we're done. - } - // Find previous bucket, continue at its last entry. - c := b - for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { - } - i = abi.OldMapBucketCount - 1 - } else { - i-- - } - if b.tophash[i] != emptyOne { - break - } - } - notLast: - h.count-- - // Reset the hash seed to make it more difficult for attackers to - // repeatedly trigger hash collisions. See issue 25237. - if h.count == 0 { - h.hash0 = uint32(rand()) - } - break search - } - } - - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting -} - -func growWork_fast64(t *maptype, h *hmap, bucket uintptr) { - // make sure we evacuate the oldbucket corresponding - // to the bucket we're about to use - evacuate_fast64(t, h, bucket&h.oldbucketmask()) - - // evacuate one more oldbucket to make progress on growing - if h.growing() { - evacuate_fast64(t, h, h.nevacuate) - } -} - -func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { - b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) - newbit := h.noldbuckets() - if !evacuated(b) { - // TODO: reuse overflow buckets instead of using new ones, if there - // is no iterator using the old buckets. (If !oldIterator.) - - // xy contains the x and y (low and high) evacuation destinations. - var xy [2]evacDst - x := &xy[0] - x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) - x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, abi.OldMapBucketCount*8) - - if !h.sameSizeGrow() { - // Only calculate y pointers if we're growing bigger. - // Otherwise GC can see bad pointers. - y := &xy[1] - y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) - y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, abi.OldMapBucketCount*8) - } - - for ; b != nil; b = b.overflow(t) { - k := add(unsafe.Pointer(b), dataOffset) - e := add(k, abi.OldMapBucketCount*8) - for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) { - top := b.tophash[i] - if isEmpty(top) { - b.tophash[i] = evacuatedEmpty - continue - } - if top < minTopHash { - throw("bad map state") - } - var useY uint8 - if !h.sameSizeGrow() { - // Compute hash to make our evacuation decision (whether we need - // to send this key/elem to bucket x or bucket y). - hash := t.Hasher(k, uintptr(h.hash0)) - if hash&newbit != 0 { - useY = 1 - } - } - - b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap - dst := &xy[useY] // evacuation destination - - if dst.i == abi.OldMapBucketCount { - dst.b = h.newoverflow(t, dst.b) - dst.i = 0 - dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, abi.OldMapBucketCount*8) - } - dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check - - // Copy key. - if t.Key.Pointers() && writeBarrier.enabled { - if goarch.PtrSize == 8 { - // Write with a write barrier. - *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) - } else { - // There are three ways to squeeze at least one 32 bit pointer into 64 bits. - // Give up and call typedmemmove. - typedmemmove(t.Key, dst.k, k) - } - } else { - *(*uint64)(dst.k) = *(*uint64)(k) - } - - typedmemmove(t.Elem, dst.e, e) - dst.i++ - // These updates might push these pointers past the end of the - // key or elem arrays. That's ok, as we have the overflow pointer - // at the end of the bucket to protect against pointing past the - // end of the bucket. - dst.k = add(dst.k, 8) - dst.e = add(dst.e, uintptr(t.ValueSize)) - } - } - // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.Pointers() { - b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) - // Preserve b.tophash because the evacuation - // state is maintained there. - ptr := add(b, dataOffset) - n := uintptr(t.BucketSize) - dataOffset - memclrHasPointers(ptr, n) - } - } - - if oldbucket == h.nevacuate { - advanceEvacuationMark(h, t, newbit) - } -} diff --git a/src/runtime/map_faststr_swiss.go b/src/runtime/map_faststr.go index 23f6c1e810..5a7b52d037 100644 --- a/src/runtime/map_faststr_swiss.go +++ b/src/runtime/map_faststr.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build goexperiment.swissmap - package runtime import ( @@ -15,7 +13,7 @@ import ( // Functions below pushed from internal/runtime/maps. //go:linkname mapaccess1_faststr -func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Pointer +func mapaccess1_faststr(t *abi.MapType, m *maps.Map, ky string) unsafe.Pointer // mapaccess2_faststr should be an internal detail, // but widely used packages access it using linkname. @@ -26,7 +24,7 @@ func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Poin // See go.dev/issue/67401. // //go:linkname mapaccess2_faststr -func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Pointer, bool) +func mapaccess2_faststr(t *abi.MapType, m *maps.Map, ky string) (unsafe.Pointer, bool) // mapassign_faststr should be an internal detail, // but widely used packages access it using linkname. @@ -38,7 +36,7 @@ func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Poi // See go.dev/issue/67401. // //go:linkname mapassign_faststr -func mapassign_faststr(t *abi.SwissMapType, m *maps.Map, s string) unsafe.Pointer +func mapassign_faststr(t *abi.MapType, m *maps.Map, s string) unsafe.Pointer //go:linkname mapdelete_faststr -func mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, ky string) +func mapdelete_faststr(t *abi.MapType, m *maps.Map, ky string) diff --git a/src/runtime/map_faststr_noswiss.go b/src/runtime/map_faststr_noswiss.go deleted file mode 100644 index e8b6a3f1ae..0000000000 --- a/src/runtime/map_faststr_noswiss.go +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/goarch" - "internal/runtime/sys" - "unsafe" -) - -func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - key := stringStructOf(&ky) - if h.B == 0 { - // One-bucket table. - b := (*bmap)(h.buckets) - if key.len < 32 { - // short key, doing lots of comparisons is ok - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || isEmpty(b.tophash[i]) { - if b.tophash[i] == emptyRest { - break - } - continue - } - if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) - } - } - return unsafe.Pointer(&zeroVal[0]) - } - // long key, try not to do more comparisons than necessary - keymaybe := uintptr(abi.OldMapBucketCount) - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || isEmpty(b.tophash[i]) { - if b.tophash[i] == emptyRest { - break - } - continue - } - if k.str == key.str { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) - } - // check first 4 bytes - if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { - continue - } - // check last 4 bytes - if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { - continue - } - if keymaybe != abi.OldMapBucketCount { - // Two keys are potential matches. Use hash to distinguish them. - goto dohash - } - keymaybe = i - } - if keymaybe != abi.OldMapBucketCount { - k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) - if memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)) - } - } - return unsafe.Pointer(&zeroVal[0]) - } -dohash: - hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) - m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - top := tophash(hash) - for ; b != nil; b = b.overflow(t) { - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || b.tophash[i] != top { - continue - } - if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) - } - } - } - return unsafe.Pointer(&zeroVal[0]) -} - -// mapaccess2_faststr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2_faststr -func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]), false - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - key := stringStructOf(&ky) - if h.B == 0 { - // One-bucket table. - b := (*bmap)(h.buckets) - if key.len < 32 { - // short key, doing lots of comparisons is ok - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || isEmpty(b.tophash[i]) { - if b.tophash[i] == emptyRest { - break - } - continue - } - if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true - } - } - return unsafe.Pointer(&zeroVal[0]), false - } - // long key, try not to do more comparisons than necessary - keymaybe := uintptr(abi.OldMapBucketCount) - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || isEmpty(b.tophash[i]) { - if b.tophash[i] == emptyRest { - break - } - continue - } - if k.str == key.str { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true - } - // check first 4 bytes - if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { - continue - } - // check last 4 bytes - if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { - continue - } - if keymaybe != abi.OldMapBucketCount { - // Two keys are potential matches. Use hash to distinguish them. - goto dohash - } - keymaybe = i - } - if keymaybe != abi.OldMapBucketCount { - k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) - if memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true - } - } - return unsafe.Pointer(&zeroVal[0]), false - } -dohash: - hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) - m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - top := tophash(hash) - for ; b != nil; b = b.overflow(t) { - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || b.tophash[i] != top { - continue - } - if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true - } - } - } - return unsafe.Pointer(&zeroVal[0]), false -} - -// mapassign_faststr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_faststr -func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - key := stringStructOf(&s) - hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapassign. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_faststr(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - top := tophash(hash) - - var insertb *bmap - var inserti uintptr - var insertk unsafe.Pointer - -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if isEmpty(b.tophash[i]) && insertb == nil { - insertb = b - inserti = i - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize)) - if k.len != key.len { - continue - } - if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { - continue - } - // already have a mapping for key. Update it. - inserti = i - insertb = b - // Overwrite existing key, so it can be garbage collected. - // The size is already guaranteed to be set correctly. - k.str = key.str - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if insertb == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - insertb = h.newoverflow(t, b) - inserti = 0 // not necessary, but avoids needlessly spilling inserti - } - insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = top // mask inserti to avoid bounds checks - - insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize) - // store new key at insert position - *((*stringStruct)(insertk)) = *key - h.count++ - -done: - elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize)) - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - return elem -} - -func mapdelete_faststr(t *maptype, h *hmap, ky string) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr)) - } - if h == nil || h.count == 0 { - return - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - - key := stringStructOf(&ky) - hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapdelete - h.flags ^= hashWriting - - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_faststr(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - bOrig := b - top := tophash(hash) -search: - for ; b != nil; b = b.overflow(t) { - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || b.tophash[i] != top { - continue - } - if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { - continue - } - // Clear key's pointer. - k.str = nil - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) - if t.Elem.Pointers() { - memclrHasPointers(e, t.Elem.Size_) - } else { - memclrNoHeapPointers(e, t.Elem.Size_) - } - b.tophash[i] = emptyOne - // If the bucket now ends in a bunch of emptyOne states, - // change those to emptyRest states. - if i == abi.OldMapBucketCount-1 { - if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { - goto notLast - } - } else { - if b.tophash[i+1] != emptyRest { - goto notLast - } - } - for { - b.tophash[i] = emptyRest - if i == 0 { - if b == bOrig { - break // beginning of initial bucket, we're done. - } - // Find previous bucket, continue at its last entry. - c := b - for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { - } - i = abi.OldMapBucketCount - 1 - } else { - i-- - } - if b.tophash[i] != emptyOne { - break - } - } - notLast: - h.count-- - // Reset the hash seed to make it more difficult for attackers to - // repeatedly trigger hash collisions. See issue 25237. - if h.count == 0 { - h.hash0 = uint32(rand()) - } - break search - } - } - - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting -} - -func growWork_faststr(t *maptype, h *hmap, bucket uintptr) { - // make sure we evacuate the oldbucket corresponding - // to the bucket we're about to use - evacuate_faststr(t, h, bucket&h.oldbucketmask()) - - // evacuate one more oldbucket to make progress on growing - if h.growing() { - evacuate_faststr(t, h, h.nevacuate) - } -} - -func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { - b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) - newbit := h.noldbuckets() - if !evacuated(b) { - // TODO: reuse overflow buckets instead of using new ones, if there - // is no iterator using the old buckets. (If !oldIterator.) - - // xy contains the x and y (low and high) evacuation destinations. - var xy [2]evacDst - x := &xy[0] - x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) - x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, abi.OldMapBucketCount*2*goarch.PtrSize) - - if !h.sameSizeGrow() { - // Only calculate y pointers if we're growing bigger. - // Otherwise GC can see bad pointers. - y := &xy[1] - y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) - y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, abi.OldMapBucketCount*2*goarch.PtrSize) - } - - for ; b != nil; b = b.overflow(t) { - k := add(unsafe.Pointer(b), dataOffset) - e := add(k, abi.OldMapBucketCount*2*goarch.PtrSize) - for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) { - top := b.tophash[i] - if isEmpty(top) { - b.tophash[i] = evacuatedEmpty - continue - } - if top < minTopHash { - throw("bad map state") - } - var useY uint8 - if !h.sameSizeGrow() { - // Compute hash to make our evacuation decision (whether we need - // to send this key/elem to bucket x or bucket y). - hash := t.Hasher(k, uintptr(h.hash0)) - if hash&newbit != 0 { - useY = 1 - } - } - - b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap - dst := &xy[useY] // evacuation destination - - if dst.i == abi.OldMapBucketCount { - dst.b = h.newoverflow(t, dst.b) - dst.i = 0 - dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, abi.OldMapBucketCount*2*goarch.PtrSize) - } - dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check - - // Copy key. - *(*string)(dst.k) = *(*string)(k) - - typedmemmove(t.Elem, dst.e, e) - dst.i++ - // These updates might push these pointers past the end of the - // key or elem arrays. That's ok, as we have the overflow pointer - // at the end of the bucket to protect against pointing past the - // end of the bucket. - dst.k = add(dst.k, 2*goarch.PtrSize) - dst.e = add(dst.e, uintptr(t.ValueSize)) - } - } - // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.Pointers() { - b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) - // Preserve b.tophash because the evacuation - // state is maintained there. - ptr := add(b, dataOffset) - n := uintptr(t.BucketSize) - dataOffset - memclrHasPointers(ptr, n) - } - } - - if oldbucket == h.nevacuate { - advanceEvacuationMark(h, t, newbit) - } -} diff --git a/src/runtime/map_noswiss.go b/src/runtime/map_noswiss.go deleted file mode 100644 index 7b3c98eb88..0000000000 --- a/src/runtime/map_noswiss.go +++ /dev/null @@ -1,1891 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime - -// This file contains the implementation of Go's map type. -// -// A map is just a hash table. The data is arranged -// into an array of buckets. Each bucket contains up to -// 8 key/elem pairs. The low-order bits of the hash are -// used to select a bucket. Each bucket contains a few -// high-order bits of each hash to distinguish the entries -// within a single bucket. -// -// If more than 8 keys hash to a bucket, we chain on -// extra buckets. -// -// When the hashtable grows, we allocate a new array -// of buckets twice as big. Buckets are incrementally -// copied from the old bucket array to the new bucket array. -// -// Map iterators walk through the array of buckets and -// return the keys in walk order (bucket #, then overflow -// chain order, then bucket index). To maintain iteration -// semantics, we never move keys within their bucket (if -// we did, keys might be returned 0 or 2 times). When -// growing the table, iterators remain iterating through the -// old table and must check the new table if the bucket -// they are iterating through has been moved ("evacuated") -// to the new table. - -// Picking loadFactor: too large and we have lots of overflow -// buckets, too small and we waste a lot of space. I wrote -// a simple program to check some stats for different loads: -// (64-bit, 8 byte keys and elems) -// loadFactor %overflow bytes/entry hitprobe missprobe -// 4.00 2.13 20.77 3.00 4.00 -// 4.50 4.05 17.30 3.25 4.50 -// 5.00 6.85 14.77 3.50 5.00 -// 5.50 10.55 12.94 3.75 5.50 -// 6.00 15.27 11.67 4.00 6.00 -// 6.50 20.90 10.79 4.25 6.50 -// 7.00 27.14 10.15 4.50 7.00 -// 7.50 34.03 9.73 4.75 7.50 -// 8.00 41.10 9.40 5.00 8.00 -// -// %overflow = percentage of buckets which have an overflow bucket -// bytes/entry = overhead bytes used per key/elem pair -// hitprobe = # of entries to check when looking up a present key -// missprobe = # of entries to check when looking up an absent key -// -// Keep in mind this data is for maximally loaded tables, i.e. just -// before the table grows. Typical tables will be somewhat less loaded. - -import ( - "internal/abi" - "internal/goarch" - "internal/runtime/atomic" - "internal/runtime/maps" - "internal/runtime/math" - "internal/runtime/sys" - "unsafe" -) - -type maptype = abi.OldMapType - -const ( - // Maximum number of key/elem pairs a bucket can hold. - bucketCntBits = abi.OldMapBucketCountBits - - // Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full) - // Because of minimum alignment rules, bucketCnt is known to be at least 8. - // Represent as loadFactorNum/loadFactorDen, to allow integer math. - loadFactorDen = 2 - loadFactorNum = loadFactorDen * abi.OldMapBucketCount * 13 / 16 - - // data offset should be the size of the bmap struct, but needs to be - // aligned correctly. For amd64p32 this means 64-bit alignment - // even though pointers are 32 bit. - dataOffset = unsafe.Offsetof(struct { - b bmap - v int64 - }{}.v) - - // Possible tophash values. We reserve a few possibilities for special marks. - // Each bucket (including its overflow buckets, if any) will have either all or none of its - // entries in the evacuated* states (except during the evacuate() method, which only happens - // during map writes and thus no one else can observe the map during that time). - emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows. - emptyOne = 1 // this cell is empty - evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table. - evacuatedY = 3 // same as above, but evacuated to second half of larger table. - evacuatedEmpty = 4 // cell is empty, bucket is evacuated. - minTopHash = 5 // minimum tophash for a normal filled cell. - - // flags - iterator = 1 // there may be an iterator using buckets - oldIterator = 2 // there may be an iterator using oldbuckets - hashWriting = 4 // a goroutine is writing to the map - sameSizeGrow = 8 // the current map growth is to a new map of the same size - - // sentinel bucket ID for iterator checks - noCheck = 1<<(8*goarch.PtrSize) - 1 -) - -// isEmpty reports whether the given tophash array entry represents an empty bucket entry. -func isEmpty(x uint8) bool { - return x <= emptyOne -} - -// A header for a Go map. -type hmap struct { - // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go. - // Make sure this stays in sync with the compiler's definition. - count int // # live cells == size of map. Must be first (used by len() builtin) - flags uint8 - B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) - noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details - hash0 uint32 // hash seed - - buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. - oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing - nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated) - clearSeq uint64 - - extra *mapextra // optional fields -} - -// mapextra holds fields that are not present on all maps. -type mapextra struct { - // If both key and elem do not contain pointers and are inline, then we mark bucket - // type as containing no pointers. This avoids scanning such maps. - // However, bmap.overflow is a pointer. In order to keep overflow buckets - // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow. - // overflow and oldoverflow are only used if key and elem do not contain pointers. - // overflow contains overflow buckets for hmap.buckets. - // oldoverflow contains overflow buckets for hmap.oldbuckets. - // The indirection allows to store a pointer to the slice in hiter. - overflow *[]*bmap - oldoverflow *[]*bmap - - // nextOverflow holds a pointer to a free overflow bucket. - nextOverflow *bmap -} - -// A bucket for a Go map. -type bmap struct { - // tophash generally contains the top byte of the hash value - // for each key in this bucket. If tophash[0] < minTopHash, - // tophash[0] is a bucket evacuation state instead. - tophash [abi.OldMapBucketCount]uint8 - // Followed by bucketCnt keys and then bucketCnt elems. - // NOTE: packing all the keys together and then all the elems together makes the - // code a bit more complicated than alternating key/elem/key/elem/... but it allows - // us to eliminate padding which would be needed for, e.g., map[int64]int8. - // Followed by an overflow pointer. -} - -// A hash iteration structure. -// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go -// and reflect/value.go to match the layout of this structure. -type hiter struct { - key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go). - elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go). - t *maptype - h *hmap - buckets unsafe.Pointer // bucket ptr at hash_iter initialization time - bptr *bmap // current bucket - overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive - oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive - startBucket uintptr // bucket iteration started at - offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) - wrapped bool // already wrapped around from end of bucket array to beginning - B uint8 - i uint8 - bucket uintptr - checkBucket uintptr - clearSeq uint64 -} - -// bucketShift returns 1<<b, optimized for code generation. -func bucketShift(b uint8) uintptr { - // Masking the shift amount allows overflow checks to be elided. - return uintptr(1) << (b & (goarch.PtrSize*8 - 1)) -} - -// bucketMask returns 1<<b - 1, optimized for code generation. -func bucketMask(b uint8) uintptr { - return bucketShift(b) - 1 -} - -// tophash calculates the tophash value for hash. -func tophash(hash uintptr) uint8 { - top := uint8(hash >> (goarch.PtrSize*8 - 8)) - if top < minTopHash { - top += minTopHash - } - return top -} - -func evacuated(b *bmap) bool { - h := b.tophash[0] - return h > emptyOne && h < minTopHash -} - -func (b *bmap) overflow(t *maptype) *bmap { - return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) -} - -func (b *bmap) setoverflow(t *maptype, ovf *bmap) { - *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf -} - -func (b *bmap) keys() unsafe.Pointer { - return add(unsafe.Pointer(b), dataOffset) -} - -// incrnoverflow increments h.noverflow. -// noverflow counts the number of overflow buckets. -// This is used to trigger same-size map growth. -// See also tooManyOverflowBuckets. -// To keep hmap small, noverflow is a uint16. -// When there are few buckets, noverflow is an exact count. -// When there are many buckets, noverflow is an approximate count. -func (h *hmap) incrnoverflow() { - // We trigger same-size map growth if there are - // as many overflow buckets as buckets. - // We need to be able to count to 1<<h.B. - if h.B < 16 { - h.noverflow++ - return - } - // Increment with probability 1/(1<<(h.B-15)). - // When we reach 1<<15 - 1, we will have approximately - // as many overflow buckets as buckets. - mask := uint32(1)<<(h.B-15) - 1 - // Example: if h.B == 18, then mask == 7, - // and rand() & 7 == 0 with probability 1/8. - if uint32(rand())&mask == 0 { - h.noverflow++ - } -} - -func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap { - var ovf *bmap - if h.extra != nil && h.extra.nextOverflow != nil { - // We have preallocated overflow buckets available. - // See makeBucketArray for more details. - ovf = h.extra.nextOverflow - if ovf.overflow(t) == nil { - // We're not at the end of the preallocated overflow buckets. Bump the pointer. - h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize))) - } else { - // This is the last preallocated overflow bucket. - // Reset the overflow pointer on this bucket, - // which was set to a non-nil sentinel value. - ovf.setoverflow(t, nil) - h.extra.nextOverflow = nil - } - } else { - ovf = (*bmap)(newobject(t.Bucket)) - } - h.incrnoverflow() - if !t.Bucket.Pointers() { - h.createOverflow() - *h.extra.overflow = append(*h.extra.overflow, ovf) - } - b.setoverflow(t, ovf) - return ovf -} - -func (h *hmap) createOverflow() { - if h.extra == nil { - h.extra = new(mapextra) - } - if h.extra.overflow == nil { - h.extra.overflow = new([]*bmap) - } -} - -func makemap64(t *maptype, hint int64, h *hmap) *hmap { - if int64(int(hint)) != hint { - hint = 0 - } - return makemap(t, int(hint), h) -} - -// makemap_small implements Go map creation for make(map[k]v) and -// make(map[k]v, hint) when hint is known to be at most bucketCnt -// at compile time and the map needs to be allocated on the heap. -// -// makemap_small should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname makemap_small -func makemap_small() *hmap { - h := new(hmap) - h.hash0 = uint32(rand()) - return h -} - -// makemap implements Go map creation for make(map[k]v, hint). -// If the compiler has determined that the map or the first bucket -// can be created on the stack, h and/or bucket may be non-nil. -// If h != nil, the map can be created directly in h. -// If h.buckets != nil, bucket pointed to can be used as the first bucket. -// -// makemap should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname makemap -func makemap(t *maptype, hint int, h *hmap) *hmap { - mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_) - if overflow || mem > maxAlloc { - hint = 0 - } - - // initialize Hmap - if h == nil { - h = new(hmap) - } - h.hash0 = uint32(rand()) - - // Find the size parameter B which will hold the requested # of elements. - // For hint < 0 overLoadFactor returns false since hint < bucketCnt. - B := uint8(0) - for overLoadFactor(hint, B) { - B++ - } - h.B = B - - // allocate initial hash table - // if B == 0, the buckets field is allocated lazily later (in mapassign) - // If hint is large zeroing this memory could take a while. - if h.B != 0 { - var nextOverflow *bmap - h.buckets, nextOverflow = makeBucketArray(t, h.B, nil) - if nextOverflow != nil { - h.extra = new(mapextra) - h.extra.nextOverflow = nextOverflow - } - } - - return h -} - -// makeBucketArray initializes a backing array for map buckets. -// 1<<b is the minimum number of buckets to allocate. -// dirtyalloc should either be nil or a bucket array previously -// allocated by makeBucketArray with the same t and b parameters. -// If dirtyalloc is nil a new backing array will be alloced and -// otherwise dirtyalloc will be cleared and reused as backing array. -func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) { - base := bucketShift(b) - nbuckets := base - // For small b, overflow buckets are unlikely. - // Avoid the overhead of the calculation. - if b >= 4 { - // Add on the estimated number of overflow buckets - // required to insert the median number of elements - // used with this value of b. - nbuckets += bucketShift(b - 4) - sz := t.Bucket.Size_ * nbuckets - up := roundupsize(sz, !t.Bucket.Pointers()) - if up != sz { - nbuckets = up / t.Bucket.Size_ - } - } - - if dirtyalloc == nil { - buckets = newarray(t.Bucket, int(nbuckets)) - } else { - // dirtyalloc was previously generated by - // the above newarray(t.Bucket, int(nbuckets)) - // but may not be empty. - buckets = dirtyalloc - size := t.Bucket.Size_ * nbuckets - if t.Bucket.Pointers() { - memclrHasPointers(buckets, size) - } else { - memclrNoHeapPointers(buckets, size) - } - } - - if base != nbuckets { - // We preallocated some overflow buckets. - // To keep the overhead of tracking these overflow buckets to a minimum, - // we use the convention that if a preallocated overflow bucket's overflow - // pointer is nil, then there are more available by bumping the pointer. - // We need a safe non-nil pointer for the last overflow bucket; just use buckets. - nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize))) - last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize))) - last.setoverflow(t, (*bmap)(buckets)) - } - return buckets, nextOverflow -} - -// mapaccess1 returns a pointer to h[key]. Never returns nil, instead -// it will return a reference to the zero object for the elem type if -// the key is not in the map. -// NOTE: The returned pointer may keep the whole map live, so don't -// hold onto it for very long. -func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapaccess1) - racereadpc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled && h != nil { - msanread(key, t.Key.Size_) - } - if asanenabled && h != nil { - asanread(key, t.Key.Size_) - } - if h == nil || h.count == 0 { - if err := maps.OldMapKeyError(t, key); err != nil { - panic(err) // see issue 23734 - } - return unsafe.Pointer(&zeroVal[0]) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - hash := t.Hasher(key, uintptr(h.hash0)) - m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - top := tophash(hash) -bucketloop: - for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - if t.Key.Equal(key, k) { - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - if t.IndirectElem() { - e = *((*unsafe.Pointer)(e)) - } - return e - } - } - } - return unsafe.Pointer(&zeroVal[0]) -} - -// mapaccess2 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2 -func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapaccess2) - racereadpc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled && h != nil { - msanread(key, t.Key.Size_) - } - if asanenabled && h != nil { - asanread(key, t.Key.Size_) - } - if h == nil || h.count == 0 { - if err := maps.OldMapKeyError(t, key); err != nil { - panic(err) // see issue 23734 - } - return unsafe.Pointer(&zeroVal[0]), false - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - hash := t.Hasher(key, uintptr(h.hash0)) - m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - top := tophash(hash) -bucketloop: - for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - if t.Key.Equal(key, k) { - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - if t.IndirectElem() { - e = *((*unsafe.Pointer)(e)) - } - return e, true - } - } - } - return unsafe.Pointer(&zeroVal[0]), false -} - -// returns both key and elem. Used by map iterator. -func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { - if h == nil || h.count == 0 { - return nil, nil - } - hash := t.Hasher(key, uintptr(h.hash0)) - m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - top := tophash(hash) -bucketloop: - for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - if t.Key.Equal(key, k) { - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - if t.IndirectElem() { - e = *((*unsafe.Pointer)(e)) - } - return k, e - } - } - } - return nil, nil -} - -func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer { - e := mapaccess1(t, h, key) - if e == unsafe.Pointer(&zeroVal[0]) { - return zero - } - return e -} - -func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) { - e := mapaccess1(t, h, key) - if e == unsafe.Pointer(&zeroVal[0]) { - return zero, false - } - return e, true -} - -// Like mapaccess, but allocates a slot for the key if it is not present in the map. -// -// mapassign should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign -func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapassign) - racewritepc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled { - msanread(key, t.Key.Size_) - } - if asanenabled { - asanread(key, t.Key.Size_) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - hash := t.Hasher(key, uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher, since t.hasher may panic, - // in which case we have not actually done a write. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - top := tophash(hash) - - var inserti *uint8 - var insertk unsafe.Pointer - var elem unsafe.Pointer -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if isEmpty(b.tophash[i]) && inserti == nil { - inserti = &b.tophash[i] - insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - if !t.Key.Equal(key, k) { - continue - } - // already have a mapping for key. Update it. - if t.NeedKeyUpdate() { - typedmemmove(t.Key, k, key) - } - elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if inserti == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - newb := h.newoverflow(t, b) - inserti = &newb.tophash[0] - insertk = add(unsafe.Pointer(newb), dataOffset) - elem = add(insertk, abi.OldMapBucketCount*uintptr(t.KeySize)) - } - - // store new key/elem at insert position - if t.IndirectKey() { - kmem := newobject(t.Key) - *(*unsafe.Pointer)(insertk) = kmem - insertk = kmem - } - if t.IndirectElem() { - vmem := newobject(t.Elem) - *(*unsafe.Pointer)(elem) = vmem - } - typedmemmove(t.Key, insertk, key) - *inserti = top - h.count++ - -done: - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - if t.IndirectElem() { - elem = *((*unsafe.Pointer)(elem)) - } - return elem -} - -// mapdelete should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapdelete -func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapdelete) - racewritepc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled && h != nil { - msanread(key, t.Key.Size_) - } - if asanenabled && h != nil { - asanread(key, t.Key.Size_) - } - if h == nil || h.count == 0 { - if err := maps.OldMapKeyError(t, key); err != nil { - panic(err) // see issue 23734 - } - return - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - - hash := t.Hasher(key, uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher, since t.hasher may panic, - // in which case we have not actually done a write (delete). - h.flags ^= hashWriting - - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - bOrig := b - top := tophash(hash) -search: - for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if b.tophash[i] == emptyRest { - break search - } - continue - } - k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - k2 := k - if t.IndirectKey() { - k2 = *((*unsafe.Pointer)(k2)) - } - if !t.Key.Equal(key, k2) { - continue - } - // Only clear key if there are pointers in it. - if t.IndirectKey() { - *(*unsafe.Pointer)(k) = nil - } else if t.Key.Pointers() { - memclrHasPointers(k, t.Key.Size_) - } - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - if t.IndirectElem() { - *(*unsafe.Pointer)(e) = nil - } else if t.Elem.Pointers() { - memclrHasPointers(e, t.Elem.Size_) - } else { - memclrNoHeapPointers(e, t.Elem.Size_) - } - b.tophash[i] = emptyOne - // If the bucket now ends in a bunch of emptyOne states, - // change those to emptyRest states. - // It would be nice to make this a separate function, but - // for loops are not currently inlineable. - if i == abi.OldMapBucketCount-1 { - if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { - goto notLast - } - } else { - if b.tophash[i+1] != emptyRest { - goto notLast - } - } - for { - b.tophash[i] = emptyRest - if i == 0 { - if b == bOrig { - break // beginning of initial bucket, we're done. - } - // Find previous bucket, continue at its last entry. - c := b - for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { - } - i = abi.OldMapBucketCount - 1 - } else { - i-- - } - if b.tophash[i] != emptyOne { - break - } - } - notLast: - h.count-- - // Reset the hash seed to make it more difficult for attackers to - // repeatedly trigger hash collisions. See issue 25237. - if h.count == 0 { - h.hash0 = uint32(rand()) - } - break search - } - } - - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting -} - -// mapiterinit initializes the hiter struct used for ranging over maps. -// The hiter struct pointed to by 'it' is allocated on the stack -// by the compilers order pass or on the heap by reflect_mapiterinit. -// Both need to have zeroed hiter since the struct contains pointers. -// -// mapiterinit should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/goccy/go-json -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/ugorji/go/codec -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapiterinit -func mapiterinit(t *maptype, h *hmap, it *hiter) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit)) - } - - it.t = t - if h == nil || h.count == 0 { - return - } - - if unsafe.Sizeof(hiter{}) != 8+12*goarch.PtrSize { - throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go - } - it.h = h - it.clearSeq = h.clearSeq - - // grab snapshot of bucket state - it.B = h.B - it.buckets = h.buckets - if !t.Bucket.Pointers() { - // Allocate the current slice and remember pointers to both current and old. - // This preserves all relevant overflow buckets alive even if - // the table grows and/or overflow buckets are added to the table - // while we are iterating. - h.createOverflow() - it.overflow = h.extra.overflow - it.oldoverflow = h.extra.oldoverflow - } - - // decide where to start - r := uintptr(rand()) - it.startBucket = r & bucketMask(h.B) - it.offset = uint8(r >> h.B & (abi.OldMapBucketCount - 1)) - - // iterator state - it.bucket = it.startBucket - - // Remember we have an iterator. - // Can run concurrently with another mapiterinit(). - if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { - atomic.Or8(&h.flags, iterator|oldIterator) - } - - mapiternext(it) -} - -// mapiternext should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/ugorji/go/codec -// - gonum.org/v1/gonum -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapiternext -func mapiternext(it *hiter) { - h := it.h - if raceenabled { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map iteration and map write") - } - t := it.t - bucket := it.bucket - b := it.bptr - i := it.i - checkBucket := it.checkBucket - -next: - if b == nil { - if bucket == it.startBucket && it.wrapped { - // end of iteration - it.key = nil - it.elem = nil - return - } - if h.growing() && it.B == h.B { - // Iterator was started in the middle of a grow, and the grow isn't done yet. - // If the bucket we're looking at hasn't been filled in yet (i.e. the old - // bucket hasn't been evacuated) then we need to iterate through the old - // bucket and only return the ones that will be migrated to this bucket. - oldbucket := bucket & it.h.oldbucketmask() - b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) - if !evacuated(b) { - checkBucket = bucket - } else { - b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize))) - checkBucket = noCheck - } - } else { - b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize))) - checkBucket = noCheck - } - bucket++ - if bucket == bucketShift(it.B) { - bucket = 0 - it.wrapped = true - } - i = 0 - } - for ; i < abi.OldMapBucketCount; i++ { - offi := (i + it.offset) & (abi.OldMapBucketCount - 1) - if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty { - // TODO: emptyRest is hard to use here, as we start iterating - // in the middle of a bucket. It's feasible, just tricky. - continue - } - k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize)) - if checkBucket != noCheck && !h.sameSizeGrow() { - // Special case: iterator was started during a grow to a larger size - // and the grow is not done yet. We're working on a bucket whose - // oldbucket has not been evacuated yet. Or at least, it wasn't - // evacuated when we started the bucket. So we're iterating - // through the oldbucket, skipping any keys that will go - // to the other new bucket (each oldbucket expands to two - // buckets during a grow). - if t.ReflexiveKey() || t.Key.Equal(k, k) { - // If the item in the oldbucket is not destined for - // the current new bucket in the iteration, skip it. - hash := t.Hasher(k, uintptr(h.hash0)) - if hash&bucketMask(it.B) != checkBucket { - continue - } - } else { - // Hash isn't repeatable if k != k (NaNs). We need a - // repeatable and randomish choice of which direction - // to send NaNs during evacuation. We'll use the low - // bit of tophash to decide which way NaNs go. - // NOTE: this case is why we need two evacuate tophash - // values, evacuatedX and evacuatedY, that differ in - // their low bit. - if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { - continue - } - } - } - if it.clearSeq == h.clearSeq && - ((b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || - !(t.ReflexiveKey() || t.Key.Equal(k, k))) { - // This is the golden data, we can return it. - // OR - // key!=key, so the entry can't be deleted or updated, so we can just return it. - // That's lucky for us because when key!=key we can't look it up successfully. - it.key = k - if t.IndirectElem() { - e = *((*unsafe.Pointer)(e)) - } - it.elem = e - } else { - // The hash table has grown since the iterator was started. - // The golden data for this key is now somewhere else. - // Check the current hash table for the data. - // This code handles the case where the key - // has been deleted, updated, or deleted and reinserted. - // NOTE: we need to regrab the key as it has potentially been - // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). - rk, re := mapaccessK(t, h, k) - if rk == nil { - continue // key has been deleted - } - it.key = rk - it.elem = re - } - it.bucket = bucket - if it.bptr != b { // avoid unnecessary write barrier; see issue 14921 - it.bptr = b - } - it.i = i + 1 - it.checkBucket = checkBucket - return - } - b = b.overflow(t) - i = 0 - goto next -} - -// mapclear deletes all keys from a map. -// It is called by the compiler. -func mapclear(t *maptype, h *hmap) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapclear) - racewritepc(unsafe.Pointer(h), callerpc, pc) - } - - if h == nil || h.count == 0 { - return - } - - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - - h.flags ^= hashWriting - h.flags &^= sameSizeGrow - h.oldbuckets = nil - h.nevacuate = 0 - h.noverflow = 0 - h.count = 0 - h.clearSeq++ - - // Reset the hash seed to make it more difficult for attackers to - // repeatedly trigger hash collisions. See issue 25237. - h.hash0 = uint32(rand()) - - // Keep the mapextra allocation but clear any extra information. - if h.extra != nil { - *h.extra = mapextra{} - } - - // makeBucketArray clears the memory pointed to by h.buckets - // and recovers any overflow buckets by generating them - // as if h.buckets was newly alloced. - _, nextOverflow := makeBucketArray(t, h.B, h.buckets) - if nextOverflow != nil { - // If overflow buckets are created then h.extra - // will have been allocated during initial bucket creation. - h.extra.nextOverflow = nextOverflow - } - - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting -} - -func hashGrow(t *maptype, h *hmap) { - // If we've hit the load factor, get bigger. - // Otherwise, there are too many overflow buckets, - // so keep the same number of buckets and "grow" laterally. - bigger := uint8(1) - if !overLoadFactor(h.count+1, h.B) { - bigger = 0 - h.flags |= sameSizeGrow - } - oldbuckets := h.buckets - newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil) - - flags := h.flags &^ (iterator | oldIterator) - if h.flags&iterator != 0 { - flags |= oldIterator - } - // commit the grow (atomic wrt gc) - h.B += bigger - h.flags = flags - h.oldbuckets = oldbuckets - h.buckets = newbuckets - h.nevacuate = 0 - h.noverflow = 0 - - if h.extra != nil && h.extra.overflow != nil { - // Promote current overflow buckets to the old generation. - if h.extra.oldoverflow != nil { - throw("oldoverflow is not nil") - } - h.extra.oldoverflow = h.extra.overflow - h.extra.overflow = nil - } - if nextOverflow != nil { - if h.extra == nil { - h.extra = new(mapextra) - } - h.extra.nextOverflow = nextOverflow - } - - // the actual copying of the hash table data is done incrementally - // by growWork() and evacuate(). -} - -// overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor. -func overLoadFactor(count int, B uint8) bool { - return count > abi.OldMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen) -} - -// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets. -// Note that most of these overflow buckets must be in sparse use; -// if use was dense, then we'd have already triggered regular map growth. -func tooManyOverflowBuckets(noverflow uint16, B uint8) bool { - // If the threshold is too low, we do extraneous work. - // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory. - // "too many" means (approximately) as many overflow buckets as regular buckets. - // See incrnoverflow for more details. - if B > 15 { - B = 15 - } - // The compiler doesn't see here that B < 16; mask B to generate shorter shift code. - return noverflow >= uint16(1)<<(B&15) -} - -// growing reports whether h is growing. The growth may be to the same size or bigger. -func (h *hmap) growing() bool { - return h.oldbuckets != nil -} - -// sameSizeGrow reports whether the current growth is to a map of the same size. -func (h *hmap) sameSizeGrow() bool { - return h.flags&sameSizeGrow != 0 -} - -//go:linkname sameSizeGrowForIssue69110Test -func sameSizeGrowForIssue69110Test(h *hmap) bool { - return h.sameSizeGrow() -} - -// noldbuckets calculates the number of buckets prior to the current map growth. -func (h *hmap) noldbuckets() uintptr { - oldB := h.B - if !h.sameSizeGrow() { - oldB-- - } - return bucketShift(oldB) -} - -// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets(). -func (h *hmap) oldbucketmask() uintptr { - return h.noldbuckets() - 1 -} - -func growWork(t *maptype, h *hmap, bucket uintptr) { - // make sure we evacuate the oldbucket corresponding - // to the bucket we're about to use - evacuate(t, h, bucket&h.oldbucketmask()) - - // evacuate one more oldbucket to make progress on growing - if h.growing() { - evacuate(t, h, h.nevacuate) - } -} - -func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool { - b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize))) - return evacuated(b) -} - -// evacDst is an evacuation destination. -type evacDst struct { - b *bmap // current destination bucket - i int // key/elem index into b - k unsafe.Pointer // pointer to current key storage - e unsafe.Pointer // pointer to current elem storage -} - -func evacuate(t *maptype, h *hmap, oldbucket uintptr) { - b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) - newbit := h.noldbuckets() - if !evacuated(b) { - // TODO: reuse overflow buckets instead of using new ones, if there - // is no iterator using the old buckets. (If !oldIterator.) - - // xy contains the x and y (low and high) evacuation destinations. - var xy [2]evacDst - x := &xy[0] - x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) - x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, abi.OldMapBucketCount*uintptr(t.KeySize)) - - if !h.sameSizeGrow() { - // Only calculate y pointers if we're growing bigger. - // Otherwise GC can see bad pointers. - y := &xy[1] - y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) - y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, abi.OldMapBucketCount*uintptr(t.KeySize)) - } - - for ; b != nil; b = b.overflow(t) { - k := add(unsafe.Pointer(b), dataOffset) - e := add(k, abi.OldMapBucketCount*uintptr(t.KeySize)) - for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) { - top := b.tophash[i] - if isEmpty(top) { - b.tophash[i] = evacuatedEmpty - continue - } - if top < minTopHash { - throw("bad map state") - } - k2 := k - if t.IndirectKey() { - k2 = *((*unsafe.Pointer)(k2)) - } - var useY uint8 - if !h.sameSizeGrow() { - // Compute hash to make our evacuation decision (whether we need - // to send this key/elem to bucket x or bucket y). - hash := t.Hasher(k2, uintptr(h.hash0)) - if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) { - // If key != key (NaNs), then the hash could be (and probably - // will be) entirely different from the old hash. Moreover, - // it isn't reproducible. Reproducibility is required in the - // presence of iterators, as our evacuation decision must - // match whatever decision the iterator made. - // Fortunately, we have the freedom to send these keys either - // way. Also, tophash is meaningless for these kinds of keys. - // We let the low bit of tophash drive the evacuation decision. - // We recompute a new random tophash for the next level so - // these keys will get evenly distributed across all buckets - // after multiple grows. - useY = top & 1 - top = tophash(hash) - } else { - if hash&newbit != 0 { - useY = 1 - } - } - } - - if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY { - throw("bad evacuatedN") - } - - b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY - dst := &xy[useY] // evacuation destination - - if dst.i == abi.OldMapBucketCount { - dst.b = h.newoverflow(t, dst.b) - dst.i = 0 - dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, abi.OldMapBucketCount*uintptr(t.KeySize)) - } - dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check - if t.IndirectKey() { - *(*unsafe.Pointer)(dst.k) = k2 // copy pointer - } else { - typedmemmove(t.Key, dst.k, k) // copy elem - } - if t.IndirectElem() { - *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e) - } else { - typedmemmove(t.Elem, dst.e, e) - } - dst.i++ - // These updates might push these pointers past the end of the - // key or elem arrays. That's ok, as we have the overflow pointer - // at the end of the bucket to protect against pointing past the - // end of the bucket. - dst.k = add(dst.k, uintptr(t.KeySize)) - dst.e = add(dst.e, uintptr(t.ValueSize)) - } - } - // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.Pointers() { - b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) - // Preserve b.tophash because the evacuation - // state is maintained there. - ptr := add(b, dataOffset) - n := uintptr(t.BucketSize) - dataOffset - memclrHasPointers(ptr, n) - } - } - - if oldbucket == h.nevacuate { - advanceEvacuationMark(h, t, newbit) - } -} - -func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) { - h.nevacuate++ - // Experiments suggest that 1024 is overkill by at least an order of magnitude. - // Put it in there as a safeguard anyway, to ensure O(1) behavior. - stop := h.nevacuate + 1024 - if stop > newbit { - stop = newbit - } - for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) { - h.nevacuate++ - } - if h.nevacuate == newbit { // newbit == # of oldbuckets - // Growing is all done. Free old main bucket array. - h.oldbuckets = nil - // Can discard old overflow buckets as well. - // If they are still referenced by an iterator, - // then the iterator holds a pointers to the slice. - if h.extra != nil { - h.extra.oldoverflow = nil - } - h.flags &^= sameSizeGrow - } -} - -// Reflect stubs. Called from ../reflect/asm_*.s - -// reflect_makemap is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/modern-go/reflect2 -// - github.com/goccy/go-json -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/v2pro/plz -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_makemap reflect.makemap -func reflect_makemap(t *maptype, cap int) *hmap { - // Check invariants and reflects math. - if t.Key.Equal == nil { - throw("runtime.reflect_makemap: unsupported map key type") - } - if t.Key.Size_ > abi.OldMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) || - t.Key.Size_ <= abi.OldMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) { - throw("key size wrong") - } - if t.Elem.Size_ > abi.OldMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) || - t.Elem.Size_ <= abi.OldMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) { - throw("elem size wrong") - } - if t.Key.Align_ > abi.OldMapBucketCount { - throw("key align too big") - } - if t.Elem.Align_ > abi.OldMapBucketCount { - throw("elem align too big") - } - if t.Key.Size_%uintptr(t.Key.Align_) != 0 { - throw("key size not a multiple of key align") - } - if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 { - throw("elem size not a multiple of elem align") - } - if abi.OldMapBucketCount < 8 { - throw("bucketsize too small for proper alignment") - } - if dataOffset%uintptr(t.Key.Align_) != 0 { - throw("need padding in bucket (key)") - } - if dataOffset%uintptr(t.Elem.Align_) != 0 { - throw("need padding in bucket (elem)") - } - - return makemap(t, cap, nil) -} - -// reflect_mapaccess is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/modern-go/reflect2 -// - github.com/v2pro/plz -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapaccess reflect.mapaccess -func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - elem, ok := mapaccess2(t, h, key) - if !ok { - // reflect wants nil for a missing element - elem = nil - } - return elem -} - -//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr -func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer { - elem, ok := mapaccess2_faststr(t, h, key) - if !ok { - // reflect wants nil for a missing element - elem = nil - } - return elem -} - -// reflect_mapassign is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/v2pro/plz -// -// Do not remove or change the type signature. -// -//go:linkname reflect_mapassign reflect.mapassign0 -func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) { - p := mapassign(t, h, key) - typedmemmove(t.Elem, p, elem) -} - -//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0 -func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) { - p := mapassign_faststr(t, h, key) - typedmemmove(t.Elem, p, elem) -} - -//go:linkname reflect_mapdelete reflect.mapdelete -func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { - mapdelete(t, h, key) -} - -//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr -func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) { - mapdelete_faststr(t, h, key) -} - -// reflect_mapiterinit is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/modern-go/reflect2 -// - gitee.com/quant1x/gox -// - github.com/v2pro/plz -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiterinit reflect.mapiterinit -func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) { - mapiterinit(t, h, it) -} - -// reflect_mapiternext is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/modern-go/reflect2 -// - github.com/goccy/go-json -// - github.com/v2pro/plz -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiternext reflect.mapiternext -func reflect_mapiternext(it *hiter) { - mapiternext(it) -} - -// reflect_mapiterkey was for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/goccy/go-json -// - gonum.org/v1/gonum -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiterkey reflect.mapiterkey -func reflect_mapiterkey(it *hiter) unsafe.Pointer { - return it.key -} - -// reflect_mapiterelem was for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/goccy/go-json -// - gonum.org/v1/gonum -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiterelem reflect.mapiterelem -func reflect_mapiterelem(it *hiter) unsafe.Pointer { - return it.elem -} - -// reflect_maplen is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/goccy/go-json -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_maplen reflect.maplen -func reflect_maplen(h *hmap) int { - if h == nil { - return 0 - } - if raceenabled { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen)) - } - return h.count -} - -//go:linkname reflect_mapclear reflect.mapclear -func reflect_mapclear(t *maptype, h *hmap) { - mapclear(t, h) -} - -//go:linkname reflectlite_maplen internal/reflectlite.maplen -func reflectlite_maplen(h *hmap) int { - if h == nil { - return 0 - } - if raceenabled { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen)) - } - return h.count -} - -// mapinitnoop is a no-op function known the Go linker; if a given global -// map (of the right size) is determined to be dead, the linker will -// rewrite the relocation (from the package init func) from the outlined -// map init function to this symbol. Defined in assembly so as to avoid -// complications with instrumentation (coverage, etc). -func mapinitnoop() - -// mapclone for implementing maps.Clone -// -//go:linkname mapclone maps.clone -func mapclone(m any) any { - e := efaceOf(&m) - e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data))) - return m -} - -// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows -// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket. -func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) { - for i := 0; i < abi.OldMapBucketCount; i++ { - if isEmpty(src.tophash[i]) { - continue - } - - for ; pos < abi.OldMapBucketCount; pos++ { - if isEmpty(dst.tophash[pos]) { - break - } - } - - if pos == abi.OldMapBucketCount { - dst = h.newoverflow(t, dst) - pos = 0 - } - - srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize)) - srcEle := add(unsafe.Pointer(src), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize)) - dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize)) - dstEle := add(unsafe.Pointer(dst), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize)) - - dst.tophash[pos] = src.tophash[i] - if t.IndirectKey() { - srcK = *(*unsafe.Pointer)(srcK) - if t.NeedKeyUpdate() { - kStore := newobject(t.Key) - typedmemmove(t.Key, kStore, srcK) - srcK = kStore - } - // Note: if NeedKeyUpdate is false, then the memory - // used to store the key is immutable, so we can share - // it between the original map and its clone. - *(*unsafe.Pointer)(dstK) = srcK - } else { - typedmemmove(t.Key, dstK, srcK) - } - if t.IndirectElem() { - srcEle = *(*unsafe.Pointer)(srcEle) - eStore := newobject(t.Elem) - typedmemmove(t.Elem, eStore, srcEle) - *(*unsafe.Pointer)(dstEle) = eStore - } else { - typedmemmove(t.Elem, dstEle, srcEle) - } - pos++ - h.count++ - } - return dst, pos -} - -func mapclone2(t *maptype, src *hmap) *hmap { - hint := src.count - if overLoadFactor(hint, src.B) { - // Note: in rare cases (e.g. during a same-sized grow) the map - // can be overloaded. Make sure we don't allocate a destination - // bucket array larger than the source bucket array. - // This will cause the cloned map to be overloaded also, - // but that's better than crashing. See issue 69110. - hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen)) - } - dst := makemap(t, hint, nil) - dst.hash0 = src.hash0 - dst.nevacuate = 0 - // flags do not need to be copied here, just like a new map has no flags. - - if src.count == 0 { - return dst - } - - if src.flags&hashWriting != 0 { - fatal("concurrent map clone and map write") - } - - if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() { - // Quick copy for small maps. - dst.buckets = newobject(t.Bucket) - dst.count = src.count - typedmemmove(t.Bucket, dst.buckets, src.buckets) - return dst - } - - if dst.B == 0 { - dst.buckets = newobject(t.Bucket) - } - dstArraySize := int(bucketShift(dst.B)) - srcArraySize := int(bucketShift(src.B)) - for i := 0; i < dstArraySize; i++ { - dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize)))) - pos := 0 - for j := 0; j < srcArraySize; j += dstArraySize { - srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize)))) - for srcBmap != nil { - dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap) - srcBmap = srcBmap.overflow(t) - } - } - } - - if src.oldbuckets == nil { - return dst - } - - oldB := src.B - srcOldbuckets := src.oldbuckets - if !src.sameSizeGrow() { - oldB-- - } - oldSrcArraySize := int(bucketShift(oldB)) - - for i := 0; i < oldSrcArraySize; i++ { - srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize)))) - if evacuated(srcBmap) { - continue - } - - if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src - dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize))) - for dstBmap.overflow(t) != nil { - dstBmap = dstBmap.overflow(t) - } - pos := 0 - for srcBmap != nil { - dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap) - srcBmap = srcBmap.overflow(t) - } - continue - } - - // oldB < dst.B, so a single source bucket may go to multiple destination buckets. - // Process entries one at a time. - for srcBmap != nil { - // move from oldBlucket to new bucket - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if isEmpty(srcBmap.tophash[i]) { - continue - } - - if src.flags&hashWriting != 0 { - fatal("concurrent map clone and map write") - } - - srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize)) - if t.IndirectKey() { - srcK = *((*unsafe.Pointer)(srcK)) - } - - srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - if t.IndirectElem() { - srcEle = *((*unsafe.Pointer)(srcEle)) - } - dstEle := mapassign(t, dst, srcK) - typedmemmove(t.Elem, dstEle, srcEle) - } - srcBmap = srcBmap.overflow(t) - } - } - return dst -} - -// keys for implementing maps.keys -// -//go:linkname keys maps.keys -func keys(m any, p unsafe.Pointer) { - e := efaceOf(&m) - t := (*maptype)(unsafe.Pointer(e._type)) - h := (*hmap)(e.data) - - if h == nil || h.count == 0 { - return - } - s := (*slice)(p) - r := int(rand()) - offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1)) - if h.B == 0 { - copyKeys(t, h, (*bmap)(h.buckets), s, offset) - return - } - arraySize := int(bucketShift(h.B)) - buckets := h.buckets - for i := 0; i < arraySize; i++ { - bucket := (i + r) & (arraySize - 1) - b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize))) - copyKeys(t, h, b, s, offset) - } - - if h.growing() { - oldArraySize := int(h.noldbuckets()) - for i := 0; i < oldArraySize; i++ { - bucket := (i + r) & (oldArraySize - 1) - b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize))) - if evacuated(b) { - continue - } - copyKeys(t, h, b, s, offset) - } - } - return -} - -func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) { - for b != nil { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1) - if isEmpty(b.tophash[offi]) { - continue - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - if s.len >= s.cap { - fatal("concurrent map read and map write") - } - typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k) - s.len++ - } - b = b.overflow(t) - } -} - -// values for implementing maps.values -// -//go:linkname values maps.values -func values(m any, p unsafe.Pointer) { - e := efaceOf(&m) - t := (*maptype)(unsafe.Pointer(e._type)) - h := (*hmap)(e.data) - if h == nil || h.count == 0 { - return - } - s := (*slice)(p) - r := int(rand()) - offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1)) - if h.B == 0 { - copyValues(t, h, (*bmap)(h.buckets), s, offset) - return - } - arraySize := int(bucketShift(h.B)) - buckets := h.buckets - for i := 0; i < arraySize; i++ { - bucket := (i + r) & (arraySize - 1) - b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize))) - copyValues(t, h, b, s, offset) - } - - if h.growing() { - oldArraySize := int(h.noldbuckets()) - for i := 0; i < oldArraySize; i++ { - bucket := (i + r) & (oldArraySize - 1) - b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize))) - if evacuated(b) { - continue - } - copyValues(t, h, b, s, offset) - } - } - return -} - -func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) { - for b != nil { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1) - if isEmpty(b.tophash[offi]) { - continue - } - - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - - ele := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize)) - if t.IndirectElem() { - ele = *((*unsafe.Pointer)(ele)) - } - if s.len >= s.cap { - fatal("concurrent map read and map write") - } - typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele) - s.len++ - } - b = b.overflow(t) - } -} diff --git a/src/runtime/map_noswiss_test.go b/src/runtime/map_noswiss_test.go deleted file mode 100644 index 5af7b7b8c8..0000000000 --- a/src/runtime/map_noswiss_test.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime_test - -import ( - "internal/abi" - "internal/goarch" - "runtime" - "slices" - "testing" -) - -func TestHmapSize(t *testing.T) { - // The structure of hmap is defined in runtime/map.go - // and in cmd/compile/internal/reflectdata/map.go and must be in sync. - // The size of hmap should be 56 bytes on 64 bit and 36 bytes on 32 bit platforms. - var hmapSize = uintptr(2*8 + 5*goarch.PtrSize) - if runtime.RuntimeHmapSize != hmapSize { - t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize) - } -} - -func TestLoadFactor(t *testing.T) { - for b := uint8(0); b < 20; b++ { - count := 13 * (1 << b) / 2 // 6.5 - if b == 0 { - count = 8 - } - if runtime.OverLoadFactor(count, b) { - t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b) - } - if !runtime.OverLoadFactor(count+1, b) { - t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b) - } - } -} - -func TestMapIterOrder(t *testing.T) { - sizes := []int{3, 7, 9, 15} - if abi.OldMapBucketCountBits >= 5 { - // it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5. - t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.OldMapBucketCountBits) - } - for _, n := range sizes { - for i := 0; i < 1000; i++ { - // Make m be {0: true, 1: true, ..., n-1: true}. - m := make(map[int]bool) - for i := 0; i < n; i++ { - m[i] = true - } - // Check that iterating over the map produces at least two different orderings. - ord := func() []int { - var s []int - for key := range m { - s = append(s, key) - } - return s - } - first := ord() - ok := false - for try := 0; try < 100; try++ { - if !slices.Equal(first, ord()) { - ok = true - break - } - } - if !ok { - t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) - break - } - } - } -} - -const bs = abi.OldMapBucketCount - -// belowOverflow should be a pretty-full pair of buckets; -// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets -// that are 13/16 full each, which is the overflow boundary. -// Adding one to that should ensure overflow to the next higher size. -const ( - belowOverflow = bs * 3 / 2 // 1.5 bs = 2 buckets @ 75% - atOverflow = belowOverflow + bs/8 // 2 buckets at 13/16 fill. -) - -var mapBucketTests = [...]struct { - n int // n is the number of map elements - noescape int // number of expected buckets for non-escaping map - escape int // number of expected buckets for escaping map -}{ - {-(1 << 30), 1, 1}, - {-1, 1, 1}, - {0, 1, 1}, - {1, 1, 1}, - {bs, 1, 1}, - {bs + 1, 2, 2}, - {belowOverflow, 2, 2}, // 1.5 bs = 2 buckets @ 75% - {atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4 - - {2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75% - {2*atOverflow + 1, 8, 8}, // 13/4 bs + 1 = overflow to 8 - - {4 * belowOverflow, 8, 8}, // 6 bs = 8 buckets @ 75% - {4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16 -} - -func TestMapBuckets(t *testing.T) { - // Test that maps of different sizes have the right number of buckets. - // Non-escaping maps with small buckets (like map[int]int) never - // have a nil bucket pointer due to starting with preallocated buckets - // on the stack. Escaping maps start with a non-nil bucket pointer if - // hint size is above bucketCnt and thereby have more than one bucket. - // These tests depend on bucketCnt and loadFactor* in map.go. - t.Run("mapliteral", func(t *testing.T) { - for _, tt := range mapBucketTests { - localMap := map[int]int{} - if runtime.MapBucketsPointerIsNil(localMap) { - t.Errorf("no escape: buckets pointer is nil for non-escaping map") - } - for i := 0; i < tt.n; i++ { - localMap[i] = i - } - if got := runtime.MapBucketsCount(localMap); got != tt.noescape { - t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) - } - escapingMap := runtime.Escape(map[int]int{}) - if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { - t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) - } - for i := 0; i < tt.n; i++ { - escapingMap[i] = i - } - if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { - t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got) - } - } - }) - t.Run("nohint", func(t *testing.T) { - for _, tt := range mapBucketTests { - localMap := make(map[int]int) - if runtime.MapBucketsPointerIsNil(localMap) { - t.Errorf("no escape: buckets pointer is nil for non-escaping map") - } - for i := 0; i < tt.n; i++ { - localMap[i] = i - } - if got := runtime.MapBucketsCount(localMap); got != tt.noescape { - t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) - } - escapingMap := runtime.Escape(make(map[int]int)) - if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { - t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) - } - for i := 0; i < tt.n; i++ { - escapingMap[i] = i - } - if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { - t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) - } - } - }) - t.Run("makemap", func(t *testing.T) { - for _, tt := range mapBucketTests { - localMap := make(map[int]int, tt.n) - if runtime.MapBucketsPointerIsNil(localMap) { - t.Errorf("no escape: buckets pointer is nil for non-escaping map") - } - for i := 0; i < tt.n; i++ { - localMap[i] = i - } - if got := runtime.MapBucketsCount(localMap); got != tt.noescape { - t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) - } - escapingMap := runtime.Escape(make(map[int]int, tt.n)) - if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { - t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) - } - for i := 0; i < tt.n; i++ { - escapingMap[i] = i - } - if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { - t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) - } - } - }) - t.Run("makemap64", func(t *testing.T) { - for _, tt := range mapBucketTests { - localMap := make(map[int]int, int64(tt.n)) - if runtime.MapBucketsPointerIsNil(localMap) { - t.Errorf("no escape: buckets pointer is nil for non-escaping map") - } - for i := 0; i < tt.n; i++ { - localMap[i] = i - } - if got := runtime.MapBucketsCount(localMap); got != tt.noescape { - t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) - } - escapingMap := runtime.Escape(make(map[int]int, tt.n)) - if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { - t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) - } - for i := 0; i < tt.n; i++ { - escapingMap[i] = i - } - if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { - t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) - } - } - }) -} diff --git a/src/runtime/map_swiss_test.go b/src/runtime/map_swiss_test.go deleted file mode 100644 index d5c9fdbe46..0000000000 --- a/src/runtime/map_swiss_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package runtime_test - -import ( - "internal/abi" - "internal/goarch" - "internal/runtime/maps" - "slices" - "testing" - "unsafe" -) - -func TestHmapSize(t *testing.T) { - // The structure of Map is defined in internal/runtime/maps/map.go - // and in cmd/compile/internal/reflectdata/map_swiss.go and must be in sync. - // The size of Map should be 48 bytes on 64 bit and 32 bytes on 32 bit platforms. - wantSize := uintptr(2*8 + 4*goarch.PtrSize) - gotSize := unsafe.Sizeof(maps.Map{}) - if gotSize != wantSize { - t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize) - } -} - -// See also reflect_test.TestGroupSizeZero. -func TestGroupSizeZero(t *testing.T) { - var m map[struct{}]struct{} - mTyp := abi.TypeOf(m) - mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp)) - - // internal/runtime/maps when create pointers to slots, even if slots - // are size 0. The compiler should have reserved an extra word to - // ensure that pointers to the zero-size type at the end of group are - // valid. - if mt.Group.Size() <= 8 { - t.Errorf("Group size got %d want >8", mt.Group.Size()) - } -} - -func TestMapIterOrder(t *testing.T) { - sizes := []int{3, 7, 9, 15} - for _, n := range sizes { - for i := 0; i < 1000; i++ { - // Make m be {0: true, 1: true, ..., n-1: true}. - m := make(map[int]bool) - for i := 0; i < n; i++ { - m[i] = true - } - // Check that iterating over the map produces at least two different orderings. - ord := func() []int { - var s []int - for key := range m { - s = append(s, key) - } - return s - } - first := ord() - ok := false - for try := 0; try < 100; try++ { - if !slices.Equal(first, ord()) { - ok = true - break - } - } - if !ok { - t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) - break - } - } - } -} diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go index b1ff02d851..7fe8399130 100644 --- a/src/runtime/map_test.go +++ b/src/runtime/map_test.go @@ -6,7 +6,9 @@ package runtime_test import ( "fmt" - "internal/goexperiment" + "internal/abi" + "internal/goarch" + "internal/runtime/maps" "internal/testenv" "math" "os" @@ -812,31 +814,6 @@ func TestIncrementAfterBulkClearKeyStringValueInt(t *testing.T) { } } -func TestMapTombstones(t *testing.T) { - m := map[int]int{} - const N = 10000 - // Fill a map. - for i := 0; i < N; i++ { - m[i] = i - } - runtime.MapTombstoneCheck(m) - // Delete half of the entries. - for i := 0; i < N; i += 2 { - delete(m, i) - } - runtime.MapTombstoneCheck(m) - // Add new entries to fill in holes. - for i := N; i < 3*N/2; i++ { - m[i] = i - } - runtime.MapTombstoneCheck(m) - // Delete everything. - for i := 0; i < 3*N/2; i++ { - delete(m, i) - } - runtime.MapTombstoneCheck(m) -} - type canString int func (c canString) String() string { @@ -1060,44 +1037,6 @@ func TestEmptyMapWithInterfaceKey(t *testing.T) { }) } -func TestMapKeys(t *testing.T) { - if goexperiment.SwissMap { - t.Skip("mapkeys not implemented for swissmaps") - } - - type key struct { - s string - pad [128]byte // sizeof(key) > abi.MapMaxKeyBytes - } - m := map[key]int{{s: "a"}: 1, {s: "b"}: 2} - keys := make([]key, 0, len(m)) - runtime.MapKeys(m, unsafe.Pointer(&keys)) - for _, k := range keys { - if len(k.s) != 1 { - t.Errorf("len(k.s) == %d, want 1", len(k.s)) - } - } -} - -func TestMapValues(t *testing.T) { - if goexperiment.SwissMap { - t.Skip("mapvalues not implemented for swissmaps") - } - - type val struct { - s string - pad [128]byte // sizeof(val) > abi.MapMaxElemBytes - } - m := map[int]val{1: {s: "a"}, 2: {s: "b"}} - vals := make([]val, 0, len(m)) - runtime.MapValues(m, unsafe.Pointer(&vals)) - for _, v := range vals { - if len(v.s) != 1 { - t.Errorf("len(v.s) == %d, want 1", len(v.s)) - } - } -} - func computeHash() uintptr { var v struct{} return runtime.MemHash(unsafe.Pointer(&v), 0, unsafe.Sizeof(v)) @@ -1202,3 +1141,62 @@ func TestMapIterDeleteReplace(t *testing.T) { }) } } + +func TestHmapSize(t *testing.T) { + // The structure of Map is defined in internal/runtime/maps/map.go + // and in cmd/compile/internal/reflectdata/map.go and must be in sync. + // The size of Map should be 48 bytes on 64 bit and 32 bytes on 32 bit platforms. + wantSize := uintptr(2*8 + 4*goarch.PtrSize) + gotSize := unsafe.Sizeof(maps.Map{}) + if gotSize != wantSize { + t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize) + } +} + +// See also reflect_test.TestGroupSizeZero. +func TestGroupSizeZero(t *testing.T) { + var m map[struct{}]struct{} + mTyp := abi.TypeOf(m) + mt := (*abi.MapType)(unsafe.Pointer(mTyp)) + + // internal/runtime/maps when create pointers to slots, even if slots + // are size 0. The compiler should have reserved an extra word to + // ensure that pointers to the zero-size type at the end of group are + // valid. + if mt.Group.Size() <= 8 { + t.Errorf("Group size got %d want >8", mt.Group.Size()) + } +} + +func TestMapIterOrder(t *testing.T) { + sizes := []int{3, 7, 9, 15} + for _, n := range sizes { + for i := 0; i < 1000; i++ { + // Make m be {0: true, 1: true, ..., n-1: true}. + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + // Check that iterating over the map produces at least two different orderings. + ord := func() []int { + var s []int + for key := range m { + s = append(s, key) + } + return s + } + first := ord() + ok := false + for try := 0; try < 100; try++ { + if !slices.Equal(first, ord()) { + ok = true + break + } + } + if !ok { + t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) + break + } + } + } +} diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 7331886af2..9872e5297f 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -692,7 +692,7 @@ func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize // malloc does not call heapSetType* when there are no pointers. // // There can be read-write races between heapSetType* and things -// that read the heap metadata like scanobject. However, since +// that read the heap metadata like scanObject. However, since // heapSetType* is only used for objects that have not yet been // made reachable, readers will ignore bits being modified by this // function. This does mean this function cannot transiently modify @@ -947,7 +947,7 @@ func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) { if typ == nil { return } - if typ.Kind_&abi.KindMask == abi.Interface { + if typ.Kind() == abi.Interface { // Interfaces are unfortunately inconsistently handled // when it comes to the type pointer, so it's easy to // produce a lot of false positives here. @@ -1776,7 +1776,7 @@ func pointerMask(ep any) (mask []byte) { t := e._type var et *_type - if t.Kind_&abi.KindMask != abi.Pointer { + if t.Kind() != abi.Pointer { throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried") } et = (*ptrtype)(unsafe.Pointer(t)).Elem diff --git a/src/runtime/mcleanup.go b/src/runtime/mcleanup.go index c368730c57..383217aa05 100644 --- a/src/runtime/mcleanup.go +++ b/src/runtime/mcleanup.go @@ -173,14 +173,14 @@ func (c Cleanup) Stop() { // Reached the end of the linked list. Stop searching at this point. break } - if offset == uintptr(s.offset) && _KindSpecialCleanup == s.kind && + if offset == s.offset && _KindSpecialCleanup == s.kind && (*specialCleanup)(unsafe.Pointer(s)).id == c.id { // The special is a cleanup and contains a matching cleanup id. *iter = s.next found = s break } - if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCleanup < s.kind) { + if offset < s.offset || (offset == s.offset && _KindSpecialCleanup < s.kind) { // The special is outside the region specified for that kind of // special. The specials are sorted by kind. break diff --git a/src/runtime/mem_windows.go b/src/runtime/mem_windows.go index 75860a4c1d..3db6fc2ba4 100644 --- a/src/runtime/mem_windows.go +++ b/src/runtime/mem_windows.go @@ -26,11 +26,11 @@ const ( // //go:nosplit func sysAllocOS(n uintptr, _ string) unsafe.Pointer { - return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE)) + return unsafe.Pointer(stdcall(_VirtualAlloc, 0, n, _MEM_COMMIT|_MEM_RESERVE, _PAGE_READWRITE)) } func sysUnusedOS(v unsafe.Pointer, n uintptr) { - r := stdcall3(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT) + r := stdcall(_VirtualFree, uintptr(v), n, _MEM_DECOMMIT) if r != 0 { return } @@ -46,7 +46,7 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) { // in the worst case, but that's fast enough. for n > 0 { small := n - for small >= 4096 && stdcall3(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 { + for small >= 4096 && stdcall(_VirtualFree, uintptr(v), small, _MEM_DECOMMIT) == 0 { small /= 2 small &^= 4096 - 1 } @@ -60,7 +60,7 @@ func sysUnusedOS(v unsafe.Pointer, n uintptr) { } func sysUsedOS(v unsafe.Pointer, n uintptr) { - p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE) + p := stdcall(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE) if p == uintptr(v) { return } @@ -71,7 +71,7 @@ func sysUsedOS(v unsafe.Pointer, n uintptr) { k := n for k > 0 { small := k - for small >= 4096 && stdcall4(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 { + for small >= 4096 && stdcall(_VirtualAlloc, uintptr(v), small, _MEM_COMMIT, _PAGE_READWRITE) == 0 { small /= 2 small &^= 4096 - 1 } @@ -105,7 +105,7 @@ func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr) { // //go:nosplit func sysFreeOS(v unsafe.Pointer, n uintptr) { - r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE) + r := stdcall(_VirtualFree, uintptr(v), 0, _MEM_RELEASE) if r == 0 { print("runtime: VirtualFree of ", n, " bytes failed with errno=", getlasterror(), "\n") throw("runtime: failed to release pages") @@ -121,13 +121,13 @@ func sysReserveOS(v unsafe.Pointer, n uintptr, _ string) unsafe.Pointer { // v is just a hint. // First try at v. // This will fail if any of [v, v+n) is already reserved. - v = unsafe.Pointer(stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE)) + v = unsafe.Pointer(stdcall(_VirtualAlloc, uintptr(v), n, _MEM_RESERVE, _PAGE_READWRITE)) if v != nil { return v } // Next let the kernel choose the address. - return unsafe.Pointer(stdcall4(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE)) + return unsafe.Pointer(stdcall(_VirtualAlloc, 0, n, _MEM_RESERVE, _PAGE_READWRITE)) } func sysMapOS(v unsafe.Pointer, n uintptr, _ string) { diff --git a/src/runtime/memclr_mips64x.s b/src/runtime/memclr_mips64x.s index cf3a9c4ab4..3df3728146 100644 --- a/src/runtime/memclr_mips64x.s +++ b/src/runtime/memclr_mips64x.s @@ -71,29 +71,93 @@ msa_large_loop: no_msa: // if less than 8 bytes, do one byte at a time SGTU $8, R2, R3 - BNE R3, out + BNE R3, check4 - // do one byte at a time until 8-aligned + // Check alignment AND $7, R1, R3 - BEQ R3, words + BEQ R3, aligned + + // Zero one byte at a time until we reach 8 byte alignment. + MOVV $8, R5 + SUBV R3, R5, R3 + SUBV R3, R2, R2 +align: + SUBV $1, R3 MOVB R0, (R1) ADDV $1, R1 - JMP -4(PC) + BNE R3, align -words: - // do 8 bytes at a time if there is room - ADDV $-7, R4, R2 +aligned: + SGTU $8, R2, R3 + BNE R3, check4 + SGTU $16, R2, R3 + BNE R3, zero8 + SGTU $32, R2, R3 + BNE R3, zero16 + SGTU $64, R2, R3 + BNE R3, zero32 +loop64: + MOVV R0, (R1) + MOVV R0, 8(R1) + MOVV R0, 16(R1) + MOVV R0, 24(R1) + MOVV R0, 32(R1) + MOVV R0, 40(R1) + MOVV R0, 48(R1) + MOVV R0, 56(R1) + ADDV $64, R1 + SUBV $64, R2 + SGTU $64, R2, R3 + BEQ R0, R3, loop64 + BEQ R2, done - SGTU R2, R1, R3 - BEQ R3, out +check32: + SGTU $32, R2, R3 + BNE R3, check16 +zero32: + MOVV R0, (R1) + MOVV R0, 8(R1) + MOVV R0, 16(R1) + MOVV R0, 24(R1) + ADDV $32, R1 + SUBV $32, R2 + BEQ R2, done + +check16: + SGTU $16, R2, R3 + BNE R3, check8 +zero16: + MOVV R0, (R1) + MOVV R0, 8(R1) + ADDV $16, R1 + SUBV $16, R2 + BEQ R2, done + +check8: + SGTU $8, R2, R3 + BNE R3, check4 +zero8: MOVV R0, (R1) ADDV $8, R1 - JMP -4(PC) + SUBV $8, R2 + BEQ R2, done -out: +check4: + SGTU $4, R2, R3 + BNE R3, loop1 +zero4: + MOVB R0, (R1) + MOVB R0, 1(R1) + MOVB R0, 2(R1) + MOVB R0, 3(R1) + ADDV $4, R1 + SUBV $4, R2 + +loop1: BEQ R1, R4, done MOVB R0, (R1) ADDV $1, R1 - JMP -3(PC) + JMP loop1 done: RET + diff --git a/src/runtime/memclr_s390x.s b/src/runtime/memclr_s390x.s index 656e96998c..392057565e 100644 --- a/src/runtime/memclr_s390x.s +++ b/src/runtime/memclr_s390x.s @@ -109,53 +109,23 @@ clearge32: // For size >= 4KB, XC is loop unrolled 16 times (4KB = 256B * 16) clearge4KB: XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 - XC $256, 0(R4), 0(R4) - ADD $256, R4 - ADD $-256, R5 + XC $256, 256(R4), 256(R4) + XC $256, 512(R4), 512(R4) + XC $256, 768(R4), 768(R4) + XC $256, 1024(R4), 1024(R4) + XC $256, 1280(R4), 1280(R4) + XC $256, 1536(R4), 1536(R4) + XC $256, 1792(R4), 1792(R4) + XC $256, 2048(R4), 2048(R4) + XC $256, 2304(R4), 2304(R4) + XC $256, 2560(R4), 2560(R4) + XC $256, 2816(R4), 2816(R4) + XC $256, 3072(R4), 3072(R4) + XC $256, 3328(R4), 3328(R4) + XC $256, 3584(R4), 3584(R4) + XC $256, 3840(R4), 3840(R4) + ADD $4096, R4 + ADD $-4096, R5 CMP R5, $4096 BGE clearge4KB @@ -180,7 +150,7 @@ clear32to255: clear32: VZERO V1 VST V1, 0(R4) - VST V1, 16(R4) + VST V1, 16(R4) RET clear33to64: diff --git a/src/runtime/memmove_test.go b/src/runtime/memmove_test.go index a2c3b72568..22905504d4 100644 --- a/src/runtime/memmove_test.go +++ b/src/runtime/memmove_test.go @@ -8,6 +8,8 @@ import ( "crypto/rand" "encoding/binary" "fmt" + "internal/asan" + "internal/msan" "internal/race" "internal/testenv" . "runtime" @@ -102,8 +104,8 @@ func TestMemmoveLarge0x180000(t *testing.T) { } t.Parallel() - if race.Enabled { - t.Skip("skipping large memmove test under race detector") + if race.Enabled || asan.Enabled || msan.Enabled { + t.Skip("skipping large memmove test under sanitizers") } testSize(t, 0x180000) } @@ -114,8 +116,8 @@ func TestMemmoveOverlapLarge0x120000(t *testing.T) { } t.Parallel() - if race.Enabled { - t.Skip("skipping large memmove test under race detector") + if race.Enabled || asan.Enabled || msan.Enabled { + t.Skip("skipping large memmove test under sanitizers") } testOverlap(t, 0x120000) } @@ -518,6 +520,42 @@ func BenchmarkMemclrRange(b *testing.B) { } } +func BenchmarkClearFat3(b *testing.B) { + p := new([3]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [3]byte{} + } +} + +func BenchmarkClearFat4(b *testing.B) { + p := new([4 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [4 / 4]uint32{} + } +} + +func BenchmarkClearFat5(b *testing.B) { + p := new([5]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [5]byte{} + } +} + +func BenchmarkClearFat6(b *testing.B) { + p := new([6]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [6]byte{} + } +} + func BenchmarkClearFat7(b *testing.B) { p := new([7]byte) Escape(p) @@ -536,6 +574,24 @@ func BenchmarkClearFat8(b *testing.B) { } } +func BenchmarkClearFat9(b *testing.B) { + p := new([9]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [9]byte{} + } +} + +func BenchmarkClearFat10(b *testing.B) { + p := new([10]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [10]byte{} + } +} + func BenchmarkClearFat11(b *testing.B) { p := new([11]byte) Escape(p) @@ -590,6 +646,24 @@ func BenchmarkClearFat16(b *testing.B) { } } +func BenchmarkClearFat18(b *testing.B) { + p := new([18]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [18]byte{} + } +} + +func BenchmarkClearFat20(b *testing.B) { + p := new([20 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = [20 / 4]uint32{} + } +} + func BenchmarkClearFat24(b *testing.B) { p := new([24 / 4]uint32) Escape(p) @@ -707,6 +781,46 @@ func BenchmarkClearFat1040(b *testing.B) { } } +func BenchmarkCopyFat3(b *testing.B) { + var x [3]byte + p := new([3]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat4(b *testing.B) { + var x [4 / 4]uint32 + p := new([4 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat5(b *testing.B) { + var x [5]byte + p := new([5]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat6(b *testing.B) { + var x [6]byte + p := new([6]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + func BenchmarkCopyFat7(b *testing.B) { var x [7]byte p := new([7]byte) @@ -727,6 +841,26 @@ func BenchmarkCopyFat8(b *testing.B) { } } +func BenchmarkCopyFat9(b *testing.B) { + var x [9]byte + p := new([9]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat10(b *testing.B) { + var x [10]byte + p := new([10]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + func BenchmarkCopyFat11(b *testing.B) { var x [11]byte p := new([11]byte) @@ -787,6 +921,26 @@ func BenchmarkCopyFat16(b *testing.B) { } } +func BenchmarkCopyFat18(b *testing.B) { + var x [18]byte + p := new([18]byte) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + +func BenchmarkCopyFat20(b *testing.B) { + var x [20 / 4]uint32 + p := new([20 / 4]uint32) + Escape(p) + b.ResetTimer() + for i := 0; i < b.N; i++ { + *p = x + } +} + func BenchmarkCopyFat24(b *testing.B) { var x [24 / 4]uint32 p := new([24 / 4]uint32) diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go index 48da745521..ef3782b783 100644 --- a/src/runtime/metrics.go +++ b/src/runtime/metrics.go @@ -169,6 +169,20 @@ func initMetrics() { out.scalar = float64bits(nsToSec(in.cpuStats.UserTime)) }, }, + "/gc/cleanups/executed:cleanups": { + deps: makeStatDepSet(finalStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.finalStats.cleanupsExecuted + }, + }, + "/gc/cleanups/queued:cleanups": { + deps: makeStatDepSet(finalStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.finalStats.cleanupsQueued + }, + }, "/gc/cycles/automatic:gc-cycles": { deps: makeStatDepSet(sysStatsDep), compute: func(in *statAggregate, out *metricValue) { @@ -190,6 +204,20 @@ func initMetrics() { out.scalar = in.sysStats.gcCyclesDone }, }, + "/gc/finalizers/executed:finalizers": { + deps: makeStatDepSet(finalStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.finalStats.finalizersExecuted + }, + }, + "/gc/finalizers/queued:finalizers": { + deps: makeStatDepSet(finalStatsDep), + compute: func(in *statAggregate, out *metricValue) { + out.kind = metricKindUint64 + out.scalar = in.finalStats.finalizersQueued + }, + }, "/gc/scan/globals:bytes": { deps: makeStatDepSet(gcStatsDep), compute: func(in *statAggregate, out *metricValue) { @@ -514,10 +542,11 @@ func godebug_registerMetric(name string, read func() uint64) { type statDep uint const ( - heapStatsDep statDep = iota // corresponds to heapStatsAggregate - sysStatsDep // corresponds to sysStatsAggregate - cpuStatsDep // corresponds to cpuStatsAggregate - gcStatsDep // corresponds to gcStatsAggregate + heapStatsDep statDep = iota // corresponds to heapStatsAggregate + sysStatsDep // corresponds to sysStatsAggregate + cpuStatsDep // corresponds to cpuStatsAggregate + gcStatsDep // corresponds to gcStatsAggregate + finalStatsDep // corresponds to finalStatsAggregate numStatsDeps ) @@ -696,6 +725,21 @@ func (a *gcStatsAggregate) compute() { a.totalScan = a.heapScan + a.stackScan + a.globalsScan } +// finalStatsAggregate represents various finalizer/cleanup stats obtained +// from the runtime acquired together to avoid skew and inconsistencies. +type finalStatsAggregate struct { + finalizersQueued uint64 + finalizersExecuted uint64 + cleanupsQueued uint64 + cleanupsExecuted uint64 +} + +// compute populates the finalStatsAggregate with values from the runtime. +func (a *finalStatsAggregate) compute() { + a.finalizersQueued, a.finalizersExecuted = finReadQueueStats() + a.cleanupsQueued, a.cleanupsExecuted = gcCleanups.readQueueStats() +} + // nsToSec takes a duration in nanoseconds and converts it to seconds as // a float64. func nsToSec(ns int64) float64 { @@ -708,11 +752,12 @@ func nsToSec(ns int64) float64 { // as a set of these aggregates that it has populated. The aggregates // are populated lazily by its ensure method. type statAggregate struct { - ensured statDepSet - heapStats heapStatsAggregate - sysStats sysStatsAggregate - cpuStats cpuStatsAggregate - gcStats gcStatsAggregate + ensured statDepSet + heapStats heapStatsAggregate + sysStats sysStatsAggregate + cpuStats cpuStatsAggregate + gcStats gcStatsAggregate + finalStats finalStatsAggregate } // ensure populates statistics aggregates determined by deps if they @@ -735,6 +780,8 @@ func (a *statAggregate) ensure(deps *statDepSet) { a.cpuStats.compute() case gcStatsDep: a.gcStats.compute() + case finalStatsDep: + a.finalStats.compute() } } a.ensured = a.ensured.union(missing) diff --git a/src/runtime/metrics/description.go b/src/runtime/metrics/description.go index 19a7dbf07a..4587f791e1 100644 --- a/src/runtime/metrics/description.go +++ b/src/runtime/metrics/description.go @@ -175,6 +175,22 @@ var allDesc = []Description{ Cumulative: true, }, { + Name: "/gc/cleanups/executed:cleanups", + Description: "Approximate total count of cleanup functions (created by runtime.AddCleanup) " + + "executed by the runtime. Subtract /gc/cleanups/queued:cleanups to approximate " + + "cleanup queue length. Useful for detecting slow cleanups holding up the queue.", + Kind: KindUint64, + Cumulative: true, + }, + { + Name: "/gc/cleanups/queued:cleanups", + Description: "Approximate total count of cleanup functions (created by runtime.AddCleanup) " + + "queued by the runtime for execution. Subtract from /gc/cleanups/executed:cleanups " + + "to approximate cleanup queue length. Useful for detecting slow cleanups holding up the queue.", + Kind: KindUint64, + Cumulative: true, + }, + { Name: "/gc/cycles/automatic:gc-cycles", Description: "Count of completed GC cycles generated by the Go runtime.", Kind: KindUint64, @@ -193,6 +209,23 @@ var allDesc = []Description{ Cumulative: true, }, { + Name: "/gc/finalizers/executed:finalizers", + Description: "Total count of finalizer functions (created by runtime.SetFinalizer) " + + "executed by the runtime. Subtract /gc/finalizers/queued:finalizers to approximate " + + "finalizer queue length. Useful for detecting finalizers overwhelming the queue, " + + "either by being too slow, or by there being too many of them.", + Kind: KindUint64, + Cumulative: true, + }, + { + Name: "/gc/finalizers/queued:finalizers", + Description: "Total count of finalizer functions (created by runtime.SetFinalizer) and " + + "queued by the runtime for execution. Subtract from /gc/finalizers/executed:finalizers " + + "to approximate finalizer queue length. Useful for detecting slow finalizers holding up the queue.", + Kind: KindUint64, + Cumulative: true, + }, + { Name: "/gc/gogc:percent", Description: "Heap size target percentage configured by the user, otherwise 100. This " + "value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent " + diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go index a1902bc6d7..058769ac3a 100644 --- a/src/runtime/metrics/doc.go +++ b/src/runtime/metrics/doc.go @@ -137,6 +137,19 @@ Below is the full list of supported metrics, ordered lexicographically. to system CPU time measurements. Compare only with other /cpu/classes metrics. + /gc/cleanups/executed:cleanups + Approximate total count of cleanup functions (created + by runtime.AddCleanup) executed by the runtime. Subtract + /gc/cleanups/queued:cleanups to approximate cleanup queue + length. Useful for detecting slow cleanups holding up the queue. + + /gc/cleanups/queued:cleanups + Approximate total count of cleanup functions (created by + runtime.AddCleanup) queued by the runtime for execution. + Subtract from /gc/cleanups/executed:cleanups to approximate + cleanup queue length. Useful for detecting slow cleanups holding + up the queue. + /gc/cycles/automatic:gc-cycles Count of completed GC cycles generated by the Go runtime. @@ -146,6 +159,20 @@ Below is the full list of supported metrics, ordered lexicographically. /gc/cycles/total:gc-cycles Count of all completed GC cycles. + /gc/finalizers/executed:finalizers + Total count of finalizer functions (created by + runtime.SetFinalizer) executed by the runtime. Subtract + /gc/finalizers/queued:finalizers to approximate finalizer queue + length. Useful for detecting finalizers overwhelming the queue, + either by being too slow, or by there being too many of them. + + /gc/finalizers/queued:finalizers + Total count of finalizer functions (created by + runtime.SetFinalizer) and queued by the runtime for execution. + Subtract from /gc/finalizers/executed:finalizers to approximate + finalizer queue length. Useful for detecting slow finalizers + holding up the queue. + /gc/gogc:percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the diff --git a/src/runtime/metrics_test.go b/src/runtime/metrics_test.go index 5fc022efc6..5787c96084 100644 --- a/src/runtime/metrics_test.go +++ b/src/runtime/metrics_test.go @@ -499,6 +499,10 @@ func TestReadMetricsCumulative(t *testing.T) { defer wg.Done() for { // Add more things here that could influence metrics. + for i := 0; i < 10; i++ { + runtime.AddCleanup(new(*int), func(_ struct{}) {}, struct{}{}) + runtime.SetFinalizer(new(*int), func(_ **int) {}) + } for i := 0; i < len(readMetricsSink); i++ { readMetricsSink[i] = make([]byte, 1024) select { @@ -1512,3 +1516,62 @@ func TestMetricHeapUnusedLargeObjectOverflow(t *testing.T) { done <- struct{}{} wg.Wait() } + +func TestReadMetricsCleanups(t *testing.T) { + runtime.GC() // End any in-progress GC. + runtime.BlockUntilEmptyCleanupQueue(int64(1 * time.Second)) // Flush any queued cleanups. + + var before [2]metrics.Sample + before[0].Name = "/gc/cleanups/queued:cleanups" + before[1].Name = "/gc/cleanups/executed:cleanups" + after := before + + metrics.Read(before[:]) + + const N = 10 + for i := 0; i < N; i++ { + runtime.AddCleanup(new(*int), func(_ struct{}) {}, struct{}{}) + } + + runtime.GC() + runtime.BlockUntilEmptyCleanupQueue(int64(1 * time.Second)) + + metrics.Read(after[:]) + + if v0, v1 := before[0].Value.Uint64(), after[0].Value.Uint64(); v0+N != v1 { + t.Errorf("expected %s difference to be exactly %d, got %d -> %d", before[0].Name, N, v0, v1) + } + if v0, v1 := before[1].Value.Uint64(), after[1].Value.Uint64(); v0+N != v1 { + t.Errorf("expected %s difference to be exactly %d, got %d -> %d", before[1].Name, N, v0, v1) + } +} + +func TestReadMetricsFinalizers(t *testing.T) { + runtime.GC() // End any in-progress GC. + runtime.BlockUntilEmptyFinalizerQueue(int64(1 * time.Second)) // Flush any queued finalizers. + + var before [2]metrics.Sample + before[0].Name = "/gc/finalizers/queued:finalizers" + before[1].Name = "/gc/finalizers/executed:finalizers" + after := before + + metrics.Read(before[:]) + + const N = 10 + for i := 0; i < N; i++ { + runtime.SetFinalizer(new(*int), func(_ **int) {}) + } + + runtime.GC() + runtime.GC() + runtime.BlockUntilEmptyFinalizerQueue(int64(1 * time.Second)) + + metrics.Read(after[:]) + + if v0, v1 := before[0].Value.Uint64(), after[0].Value.Uint64(); v0+N != v1 { + t.Errorf("expected %s difference to be exactly %d, got %d -> %d", before[0].Name, N, v0, v1) + } + if v0, v1 := before[1].Value.Uint64(), after[1].Value.Uint64(); v0+N != v1 { + t.Errorf("expected %s difference to be exactly %d, got %d -> %d", before[1].Name, N, v0, v1) + } +} diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index 2d4a54c933..bafdb01603 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -251,7 +251,7 @@ func runFinalizers() { // confusing the write barrier. *(*[2]uintptr)(frame) = [2]uintptr{} } - switch f.fint.Kind_ & abi.KindMask { + switch f.fint.Kind() { case abi.Pointer: // direct use of pointer *(*unsafe.Pointer)(r) = f.arg @@ -435,7 +435,7 @@ func SetFinalizer(obj any, finalizer any) { if etyp == nil { throw("runtime.SetFinalizer: first argument is nil") } - if etyp.Kind_&abi.KindMask != abi.Pointer { + if etyp.Kind() != abi.Pointer { throw("runtime.SetFinalizer: first argument is " + toRType(etyp).string() + ", not pointer") } ot := (*ptrtype)(unsafe.Pointer(etyp)) @@ -490,7 +490,7 @@ func SetFinalizer(obj any, finalizer any) { return } - if ftyp.Kind_&abi.KindMask != abi.Func { + if ftyp.Kind() != abi.Func { throw("runtime.SetFinalizer: second argument is " + toRType(ftyp).string() + ", not a function") } ft := (*functype)(unsafe.Pointer(ftyp)) @@ -505,13 +505,13 @@ func SetFinalizer(obj any, finalizer any) { case fint == etyp: // ok - same type goto okarg - case fint.Kind_&abi.KindMask == abi.Pointer: + case fint.Kind() == abi.Pointer: if (fint.Uncommon() == nil || etyp.Uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).Elem == ot.Elem { // ok - not same type, but both pointers, // one or the other is unnamed, and same element type, so assignable. goto okarg } - case fint.Kind_&abi.KindMask == abi.Interface: + case fint.Kind() == abi.Interface: ityp := (*interfacetype)(unsafe.Pointer(fint)) if len(ityp.Methods) == 0 { // ok - satisfies empty interface diff --git a/src/runtime/mgclimit.go b/src/runtime/mgclimit.go index ad86fbd65b..80aeb71cad 100644 --- a/src/runtime/mgclimit.go +++ b/src/runtime/mgclimit.go @@ -209,14 +209,12 @@ func (l *gcCPULimiterState) updateLocked(now int64) { for _, pp := range allp { typ, duration := pp.limiterEvent.consume(now) switch typ { - case limiterEventIdleMarkWork: - fallthrough case limiterEventIdle: - idleTime += duration sched.idleTime.Add(duration) - case limiterEventMarkAssist: - fallthrough - case limiterEventScavengeAssist: + idleTime += duration + case limiterEventIdleMarkWork: + idleTime += duration + case limiterEventMarkAssist, limiterEventScavengeAssist: assistTime += duration case limiterEventNone: break @@ -470,14 +468,12 @@ func (e *limiterEvent) stop(typ limiterEventType, now int64) { } // Account for the event. switch typ { - case limiterEventIdleMarkWork: - gcCPULimiter.addIdleTime(duration) case limiterEventIdle: - gcCPULimiter.addIdleTime(duration) sched.idleTime.Add(duration) - case limiterEventMarkAssist: - fallthrough - case limiterEventScavengeAssist: + gcCPULimiter.addIdleTime(duration) + case limiterEventIdleMarkWork: + gcCPULimiter.addIdleTime(duration) + case limiterEventMarkAssist, limiterEventScavengeAssist: gcCPULimiter.addAssistTime(duration) default: throw("limiterEvent.stop: invalid limiter event type found") diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index a136c7aeac..8b306045c5 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -415,13 +415,13 @@ func gcScanFinalizer(spf *specialfinalizer, s *mspan, gcw *gcWork) { // Don't mark finalized object, but scan it so we retain everything it points to. // A finalizer can be set for an inner byte of an object, find object beginning. - p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize + p := s.base() + spf.special.offset/s.elemsize*s.elemsize // Mark everything that can be reached from // the object (but *not* the object itself or // we'll never collect it). if !s.spanclass.noscan() { - scanobject(p, gcw) + scanObject(p, gcw) } // The special itself is also a root. @@ -1255,7 +1255,7 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) { } } if b != 0 { - scanobject(b, gcw) + scanObject(b, gcw) } else if s != 0 { scanSpan(s, gcw) } else { @@ -1359,7 +1359,7 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 { } } if b != 0 { - scanobject(b, gcw) + scanObject(b, gcw) } else if s != 0 { scanSpan(s, gcw) } else { @@ -1390,7 +1390,7 @@ func gcDrainN(gcw *gcWork, scanWork int64) int64 { return workFlushed + gcw.heapScanWork } -// scanblock scans b as scanobject would, but using an explicit +// scanblock scans b as scanObject would, but using an explicit // pointer bitmap instead of the heap bitmap. // // This is used to scan non-heap roots, so it does not update @@ -1415,7 +1415,7 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) } for j := 0; j < 8 && i < n; j++ { if bits&1 != 0 { - // Same work as in scanobject; see comments there. + // Same work as in scanObject; see comments there. p := *(*uintptr)(unsafe.Pointer(b + i)) if p != 0 { if stk != nil && p >= stk.stack.lo && p < stk.stack.hi { @@ -1435,107 +1435,6 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) } } -// scanobject scans the object starting at b, adding pointers to gcw. -// b must point to the beginning of a heap object or an oblet. -// scanobject consults the GC bitmap for the pointer mask and the -// spans for the size of the object. -// -//go:nowritebarrier -func scanobject(b uintptr, gcw *gcWork) { - // Prefetch object before we scan it. - // - // This will overlap fetching the beginning of the object with initial - // setup before we start scanning the object. - sys.Prefetch(b) - - // Find the bits for b and the size of the object at b. - // - // b is either the beginning of an object, in which case this - // is the size of the object to scan, or it points to an - // oblet, in which case we compute the size to scan below. - s := spanOfUnchecked(b) - n := s.elemsize - if n == 0 { - throw("scanobject n == 0") - } - if s.spanclass.noscan() { - // Correctness-wise this is ok, but it's inefficient - // if noscan objects reach here. - throw("scanobject of a noscan object") - } - - var tp typePointers - if n > maxObletBytes { - // Large object. Break into oblets for better - // parallelism and lower latency. - if b == s.base() { - // Enqueue the other oblets to scan later. - // Some oblets may be in b's scalar tail, but - // these will be marked as "no more pointers", - // so we'll drop out immediately when we go to - // scan those. - for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes { - if !gcw.putObjFast(oblet) { - gcw.putObj(oblet) - } - } - } - - // Compute the size of the oblet. Since this object - // must be a large object, s.base() is the beginning - // of the object. - n = s.base() + s.elemsize - b - n = min(n, maxObletBytes) - tp = s.typePointersOfUnchecked(s.base()) - tp = tp.fastForward(b-tp.addr, b+n) - } else { - tp = s.typePointersOfUnchecked(b) - } - - var scanSize uintptr - for { - var addr uintptr - if tp, addr = tp.nextFast(); addr == 0 { - if tp, addr = tp.next(b + n); addr == 0 { - break - } - } - - // Keep track of farthest pointer we found, so we can - // update heapScanWork. TODO: is there a better metric, - // now that we can skip scalar portions pretty efficiently? - scanSize = addr - b + goarch.PtrSize - - // Work here is duplicated in scanblock and above. - // If you make changes here, make changes there too. - obj := *(*uintptr)(unsafe.Pointer(addr)) - - // At this point we have extracted the next potential pointer. - // Quickly filter out nil and pointers back to the current object. - if obj != 0 && obj-b >= n { - // Test if obj points into the Go heap and, if so, - // mark the object. - // - // Note that it's possible for findObject to - // fail if obj points to a just-allocated heap - // object because of a race with growing the - // heap. In this case, we know the object was - // just allocated and hence will be marked by - // allocation itself. - if !tryDeferToSpanScan(obj, gcw) { - if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 { - greyobject(obj, b, addr-b, span, gcw, objIndex) - } - } - } - } - gcw.bytesMarked += uint64(n) - gcw.heapScanWork += int64(scanSize) - if debug.gctrace > 1 { - gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++ - } -} - // scanConservative scans block [b, b+n) conservatively, treating any // pointer-like value in the block as a pointer. // diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go index ac2b1732f9..845857a817 100644 --- a/src/runtime/mgcmark_greenteagc.go +++ b/src/runtime/mgcmark_greenteagc.go @@ -110,7 +110,7 @@ func (o *spanScanOwnership) or(v spanScanOwnership) spanScanOwnership { return spanScanOwnership(atomic.Or32(o32, uint32(v)<<off) >> off) } -func (imb *spanInlineMarkBits) init(class spanClass) { +func (imb *spanInlineMarkBits) init(class spanClass, needzero bool) { if imb == nil { // This nil check and throw is almost pointless. Normally we would // expect imb to never be nil. However, this is called on potentially @@ -131,7 +131,13 @@ func (imb *spanInlineMarkBits) init(class spanClass) { // See go.dev/issue/74375 for details. throw("runtime: span inline mark bits nil?") } - *imb = spanInlineMarkBits{} + if needzero { + // Use memclrNoHeapPointers to avoid having the compiler make a worse + // decision. We know that imb is both aligned and a nice power-of-two + // size that works well for wider SIMD instructions. The compiler likely + // has no idea that imb is aligned to 128 bytes. + memclrNoHeapPointers(unsafe.Pointer(imb), unsafe.Sizeof(spanInlineMarkBits{})) + } imb.class = class } @@ -180,25 +186,33 @@ func (s *mspan) initInlineMarkBits() { if doubleCheckGreenTea && !gcUsesSpanInlineMarkBits(s.elemsize) { throw("expected span with inline mark bits") } - s.inlineMarkBits().init(s.spanclass) + // Zeroing is only necessary if this span wasn't just freshly allocated from the OS. + s.inlineMarkBits().init(s.spanclass, s.needzero != 0) } -// mergeInlineMarks merges the span's inline mark bits into dst. +// moveInlineMarks merges the span's inline mark bits into dst and clears them. // // gcUsesSpanInlineMarkBits(s.elemsize) must be true. -func (s *mspan) mergeInlineMarks(dst *gcBits) { +func (s *mspan) moveInlineMarks(dst *gcBits) { if doubleCheckGreenTea && !gcUsesSpanInlineMarkBits(s.elemsize) { throw("expected span with inline mark bits") } bytes := divRoundUp(uintptr(s.nelems), 8) imb := s.inlineMarkBits() - _ = imb.marks[bytes-1] - for i := uintptr(0); i < bytes; i++ { - *dst.bytep(i) |= imb.marks[i] + imbMarks := (*gc.ObjMask)(unsafe.Pointer(&imb.marks)) + for i := uintptr(0); i < bytes; i += goarch.PtrSize { + marks := bswapIfBigEndian(imbMarks[i/goarch.PtrSize]) + if i/goarch.PtrSize == uintptr(len(imb.marks)+1)/goarch.PtrSize-1 { + marks &^= 0xff << ((goarch.PtrSize - 1) * 8) // mask out class + } + *(*uintptr)(unsafe.Pointer(dst.bytep(i))) |= bswapIfBigEndian(marks) } if doubleCheckGreenTea && !s.spanclass.noscan() && imb.marks != imb.scans { throw("marks don't match scans for span with pointer") } + + // Reset the inline mark bits. + imb.init(s.spanclass, true /* We know these bits are always dirty now. */) } // inlineMarkBits returns the inline mark bits for the span. @@ -652,7 +666,7 @@ func spanSetScans(spanBase uintptr, nelems uint16, imb *spanInlineMarkBits, toSc marks := imbMarks[i/goarch.PtrSize] scans = bswapIfBigEndian(scans) marks = bswapIfBigEndian(marks) - if i/goarch.PtrSize == 64/goarch.PtrSize-1 { + if i/goarch.PtrSize == uintptr(len(imb.marks)+1)/goarch.PtrSize-1 { scans &^= 0xff << ((goarch.PtrSize - 1) * 8) // mask out owned marks &^= 0xff << ((goarch.PtrSize - 1) * 8) // mask out class } @@ -837,3 +851,107 @@ func (w *gcWork) flushScanStats(dst *[gc.NumSizeClasses]sizeClassScanStats) { } clear(w.stats[:]) } + +// scanObject scans the object starting at b, adding pointers to gcw. +// b must point to the beginning of a heap object or an oblet. +// scanObject consults the GC bitmap for the pointer mask and the +// spans for the size of the object. +// +// Used only for !gcUsesSpanInlineMarkBits spans, but supports all +// object sizes and is safe to be called on all heap objects. +// +//go:nowritebarrier +func scanObject(b uintptr, gcw *gcWork) { + // Prefetch object before we scan it. + // + // This will overlap fetching the beginning of the object with initial + // setup before we start scanning the object. + sys.Prefetch(b) + + // Find the bits for b and the size of the object at b. + // + // b is either the beginning of an object, in which case this + // is the size of the object to scan, or it points to an + // oblet, in which case we compute the size to scan below. + s := spanOfUnchecked(b) + n := s.elemsize + if n == 0 { + throw("scanObject n == 0") + } + if s.spanclass.noscan() { + // Correctness-wise this is ok, but it's inefficient + // if noscan objects reach here. + throw("scanObject of a noscan object") + } + + var tp typePointers + if n > maxObletBytes { + // Large object. Break into oblets for better + // parallelism and lower latency. + if b == s.base() { + // Enqueue the other oblets to scan later. + // Some oblets may be in b's scalar tail, but + // these will be marked as "no more pointers", + // so we'll drop out immediately when we go to + // scan those. + for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes { + if !gcw.putObjFast(oblet) { + gcw.putObj(oblet) + } + } + } + + // Compute the size of the oblet. Since this object + // must be a large object, s.base() is the beginning + // of the object. + n = s.base() + s.elemsize - b + n = min(n, maxObletBytes) + tp = s.typePointersOfUnchecked(s.base()) + tp = tp.fastForward(b-tp.addr, b+n) + } else { + tp = s.typePointersOfUnchecked(b) + } + + var scanSize uintptr + for { + var addr uintptr + if tp, addr = tp.nextFast(); addr == 0 { + if tp, addr = tp.next(b + n); addr == 0 { + break + } + } + + // Keep track of farthest pointer we found, so we can + // update heapScanWork. TODO: is there a better metric, + // now that we can skip scalar portions pretty efficiently? + scanSize = addr - b + goarch.PtrSize + + // Work here is duplicated in scanblock and above. + // If you make changes here, make changes there too. + obj := *(*uintptr)(unsafe.Pointer(addr)) + + // At this point we have extracted the next potential pointer. + // Quickly filter out nil and pointers back to the current object. + if obj != 0 && obj-b >= n { + // Test if obj points into the Go heap and, if so, + // mark the object. + // + // Note that it's possible for findObject to + // fail if obj points to a just-allocated heap + // object because of a race with growing the + // heap. In this case, we know the object was + // just allocated and hence will be marked by + // allocation itself. + if !tryDeferToSpanScan(obj, gcw) { + if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 { + greyobject(obj, b, addr-b, span, gcw, objIndex) + } + } + } + } + gcw.bytesMarked += uint64(n) + gcw.heapScanWork += int64(scanSize) + if debug.gctrace > 1 { + gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++ + } +} diff --git a/src/runtime/mgcmark_nogreenteagc.go b/src/runtime/mgcmark_nogreenteagc.go index c0ca5c21ea..6375773123 100644 --- a/src/runtime/mgcmark_nogreenteagc.go +++ b/src/runtime/mgcmark_nogreenteagc.go @@ -6,7 +6,12 @@ package runtime -import "internal/runtime/gc" +import ( + "internal/goarch" + "internal/runtime/gc" + "internal/runtime/sys" + "unsafe" +) func (s *mspan) markBitsForIndex(objIndex uintptr) markBits { bytep, mask := s.gcmarkBits.bitp(objIndex) @@ -24,7 +29,7 @@ func tryDeferToSpanScan(p uintptr, gcw *gcWork) bool { func (s *mspan) initInlineMarkBits() { } -func (s *mspan) mergeInlineMarks(to *gcBits) { +func (s *mspan) moveInlineMarks(to *gcBits) { throw("unimplemented") } @@ -110,3 +115,104 @@ func (w *gcWork) flushScanStats(dst *[gc.NumSizeClasses]sizeClassScanStats) { } clear(w.stats[:]) } + +// scanObject scans the object starting at b, adding pointers to gcw. +// b must point to the beginning of a heap object or an oblet. +// scanObject consults the GC bitmap for the pointer mask and the +// spans for the size of the object. +// +//go:nowritebarrier +func scanObject(b uintptr, gcw *gcWork) { + // Prefetch object before we scan it. + // + // This will overlap fetching the beginning of the object with initial + // setup before we start scanning the object. + sys.Prefetch(b) + + // Find the bits for b and the size of the object at b. + // + // b is either the beginning of an object, in which case this + // is the size of the object to scan, or it points to an + // oblet, in which case we compute the size to scan below. + s := spanOfUnchecked(b) + n := s.elemsize + if n == 0 { + throw("scanObject n == 0") + } + if s.spanclass.noscan() { + // Correctness-wise this is ok, but it's inefficient + // if noscan objects reach here. + throw("scanObject of a noscan object") + } + + var tp typePointers + if n > maxObletBytes { + // Large object. Break into oblets for better + // parallelism and lower latency. + if b == s.base() { + // Enqueue the other oblets to scan later. + // Some oblets may be in b's scalar tail, but + // these will be marked as "no more pointers", + // so we'll drop out immediately when we go to + // scan those. + for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes { + if !gcw.putObjFast(oblet) { + gcw.putObj(oblet) + } + } + } + + // Compute the size of the oblet. Since this object + // must be a large object, s.base() is the beginning + // of the object. + n = s.base() + s.elemsize - b + n = min(n, maxObletBytes) + tp = s.typePointersOfUnchecked(s.base()) + tp = tp.fastForward(b-tp.addr, b+n) + } else { + tp = s.typePointersOfUnchecked(b) + } + + var scanSize uintptr + for { + var addr uintptr + if tp, addr = tp.nextFast(); addr == 0 { + if tp, addr = tp.next(b + n); addr == 0 { + break + } + } + + // Keep track of farthest pointer we found, so we can + // update heapScanWork. TODO: is there a better metric, + // now that we can skip scalar portions pretty efficiently? + scanSize = addr - b + goarch.PtrSize + + // Work here is duplicated in scanblock and above. + // If you make changes here, make changes there too. + obj := *(*uintptr)(unsafe.Pointer(addr)) + + // At this point we have extracted the next potential pointer. + // Quickly filter out nil and pointers back to the current object. + if obj != 0 && obj-b >= n { + // Test if obj points into the Go heap and, if so, + // mark the object. + // + // Note that it's possible for findObject to + // fail if obj points to a just-allocated heap + // object because of a race with growing the + // heap. In this case, we know the object was + // just allocated and hence will be marked by + // allocation itself. + if !tryDeferToSpanScan(obj, gcw) { + if obj, span, objIndex := findObject(obj, b, addr-b); obj != 0 { + greyobject(obj, b, addr-b, span, gcw, objIndex) + } + } + } + } + gcw.bytesMarked += uint64(n) + gcw.heapScanWork += int64(scanSize) + if debug.gctrace > 1 { + gcw.stats[s.spanclass.sizeclass()].sparseObjsScanned++ + } +} diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index a3bf2989df..b72cc461ba 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -553,7 +553,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool { siter := newSpecialsIter(s) for siter.valid() { // A finalizer can be set for an inner byte of an object, find object beginning. - objIndex := uintptr(siter.s.offset) / size + objIndex := siter.s.offset / size p := s.base() + objIndex*size mbits := s.markBitsForIndex(objIndex) if !mbits.isMarked() { @@ -561,7 +561,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool { // Pass 1: see if it has a finalizer. hasFinAndRevived := false endOffset := p - s.base() + size - for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next { + for tmp := siter.s; tmp != nil && tmp.offset < endOffset; tmp = tmp.next { if tmp.kind == _KindSpecialFinalizer { // Stop freeing of object if it has a finalizer. mbits.setMarkedNonAtomic() @@ -573,11 +573,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool { // Pass 2: queue all finalizers and clear any weak handles. Weak handles are cleared // before finalization as specified by the weak package. See the documentation // for that package for more details. - for siter.valid() && uintptr(siter.s.offset) < endOffset { + for siter.valid() && siter.s.offset < endOffset { // Find the exact byte for which the special was setup // (as opposed to object beginning). special := siter.s - p := s.base() + uintptr(special.offset) + p := s.base() + special.offset if special.kind == _KindSpecialFinalizer || special.kind == _KindSpecialWeakHandle { siter.unlinkAndNext() freeSpecial(special, unsafe.Pointer(p), size) @@ -589,11 +589,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool { } } else { // Pass 2: the object is truly dead, free (and handle) all specials. - for siter.valid() && uintptr(siter.s.offset) < endOffset { + for siter.valid() && siter.s.offset < endOffset { // Find the exact byte for which the special was setup // (as opposed to object beginning). special := siter.s - p := s.base() + uintptr(special.offset) + p := s.base() + special.offset siter.unlinkAndNext() freeSpecial(special, unsafe.Pointer(p), size) } @@ -650,9 +650,9 @@ func (sl *sweepLocked) sweep(preserve bool) bool { } } - // Copy over the inline mark bits if necessary. + // Copy over and clear the inline mark bits if necessary. if gcUsesSpanInlineMarkBits(s.elemsize) { - s.mergeInlineMarks(s.gcmarkBits) + s.moveInlineMarks(s.gcmarkBits) } // Check for zombie objects. @@ -704,11 +704,6 @@ func (sl *sweepLocked) sweep(preserve bool) bool { // Initialize alloc bits cache. s.refillAllocCache(0) - // Reset the object queue, if we have one. - if gcUsesSpanInlineMarkBits(s.elemsize) { - s.initInlineMarkBits() - } - // The span must be in our exclusive ownership until we update sweepgen, // check for potential races. if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 { diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 358de2f376..1776206573 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1490,7 +1490,7 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, s.allocBits = newAllocBits(uintptr(s.nelems)) // Adjust s.limit down to the object-containing part of the span. - s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems) + s.limit = s.base() + s.elemsize*uintptr(s.nelems) // It's safe to access h.sweepgen without the heap lock because it's // only ever updated with the world stopped and we run on the @@ -1549,6 +1549,8 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, func (h *mheap) grow(npage uintptr) (uintptr, bool) { assertLockHeld(&h.lock) + firstGrow := h.curArena.base == 0 + // We must grow the heap in whole palloc chunks. // We call sysMap below but note that because we // round up to pallocChunkPages which is on the order @@ -1597,6 +1599,16 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) { // Switch to the new space. h.curArena.base = uintptr(av) h.curArena.end = uintptr(av) + asize + + if firstGrow && randomizeHeapBase { + // The top heapAddrBits-logHeapArenaBytes are randomized, we now + // want to randomize the next + // logHeapArenaBytes-log2(pallocChunkBytes) bits, making sure + // h.curArena.base is aligned to pallocChunkBytes. + bits := logHeapArenaBytes - logPallocChunkBytes + offset := nextHeapRandBits(bits) + h.curArena.base = alignDown(h.curArena.base|(offset<<logPallocChunkBytes), pallocChunkBytes) + } } // Recalculate nBase. @@ -1627,6 +1639,22 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) { // space ready for allocation. h.pages.grow(v, nBase-v) totalGrowth += nBase - v + + if firstGrow && randomizeHeapBase { + // The top heapAddrBits-log2(pallocChunkBytes) bits are now randomized, + // we finally want to randomize the next + // log2(pallocChunkBytes)-log2(pageSize) bits, while maintaining + // alignment to pageSize. We do this by calculating a random number of + // pages into the current arena, and marking them as allocated. The + // address of the next available page becomes our fully randomized base + // heap address. + randOffset := nextHeapRandBits(logPallocChunkBytes) + randNumPages := alignDown(randOffset, pageSize) / pageSize + if randNumPages != 0 { + h.pages.markRandomPaddingPages(v, randNumPages) + } + } + return totalGrowth, true } @@ -2126,11 +2154,11 @@ func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special, if s == nil { break } - if offset == uintptr(s.offset) && kind == s.kind { + if offset == s.offset && kind == s.kind { found = true break } - if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && kind < s.kind) { + if offset < s.offset || (offset == s.offset && kind < s.kind) { break } iter = &s.next @@ -2173,7 +2201,7 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p // Mark everything reachable from the object // so it's retained for the finalizer. if !span.spanclass.noscan() { - scanobject(base, gcw) + scanObject(base, gcw) } // Mark the finalizer itself, since the // special isn't part of the GC'd heap. @@ -2297,14 +2325,14 @@ func getCleanupContext(ptr uintptr, cleanupID uint64) *specialCheckFinalizer { // Reached the end of the linked list. Stop searching at this point. break } - if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind && + if offset == s.offset && _KindSpecialCheckFinalizer == s.kind && (*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID { // The special is a cleanup and contains a matching cleanup id. *iter = s.next found = (*specialCheckFinalizer)(unsafe.Pointer(s)) break } - if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) { + if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) { // The special is outside the region specified for that kind of // special. The specials are sorted by kind. break @@ -2347,14 +2375,14 @@ func clearCleanupContext(ptr uintptr, cleanupID uint64) { // Reached the end of the linked list. Stop searching at this point. break } - if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind && + if offset == s.offset && _KindSpecialCheckFinalizer == s.kind && (*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID { // The special is a cleanup and contains a matching cleanup id. *iter = s.next found = s break } - if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) { + if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) { // The special is outside the region specified for that kind of // special. The specials are sorted by kind. break @@ -2450,7 +2478,7 @@ type specialWeakHandle struct { //go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer { - return unsafe.Pointer(getOrAddWeakHandle(unsafe.Pointer(p))) + return unsafe.Pointer(getOrAddWeakHandle(p)) } //go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go index 4c58fb6e02..83db005051 100644 --- a/src/runtime/mpagealloc.go +++ b/src/runtime/mpagealloc.go @@ -972,6 +972,45 @@ func (p *pageAlloc) free(base, npages uintptr) { p.update(base, npages, true, false) } +// markRandomPaddingPages marks the range of memory [base, base+npages*pageSize] +// as both allocated and scavenged. This is used for randomizing the base heap +// address. Both the alloc and scav bits are set so that the pages are not used +// and so the memory accounting stats are correctly calculated. +// +// Similar to allocRange, it also updates the summaries to reflect the +// newly-updated bitmap. +// +// p.mheapLock must be held. +func (p *pageAlloc) markRandomPaddingPages(base uintptr, npages uintptr) { + assertLockHeld(p.mheapLock) + + limit := base + npages*pageSize - 1 + sc, ec := chunkIndex(base), chunkIndex(limit) + si, ei := chunkPageIndex(base), chunkPageIndex(limit) + if sc == ec { + chunk := p.chunkOf(sc) + chunk.allocRange(si, ei+1-si) + p.scav.index.alloc(sc, ei+1-si) + chunk.scavenged.setRange(si, ei+1-si) + } else { + chunk := p.chunkOf(sc) + chunk.allocRange(si, pallocChunkPages-si) + p.scav.index.alloc(sc, pallocChunkPages-si) + chunk.scavenged.setRange(si, pallocChunkPages-si) + for c := sc + 1; c < ec; c++ { + chunk := p.chunkOf(c) + chunk.allocAll() + p.scav.index.alloc(c, pallocChunkPages) + chunk.scavenged.setAll() + } + chunk = p.chunkOf(ec) + chunk.allocRange(0, ei+1) + p.scav.index.alloc(ec, ei+1) + chunk.scavenged.setRange(0, ei+1) + } + p.update(base, npages, true, true) +} + const ( pallocSumBytes = unsafe.Sizeof(pallocSum(0)) diff --git a/src/runtime/mwbbuf.go b/src/runtime/mwbbuf.go index 537d558592..e8c6064905 100644 --- a/src/runtime/mwbbuf.go +++ b/src/runtime/mwbbuf.go @@ -215,7 +215,7 @@ func wbBufFlush1(pp *p) { // pointers we greyed. We use the buffer itself to temporarily // record greyed pointers. // - // TODO: Should scanobject/scanblock just stuff pointers into + // TODO: Should scanObject/scanblock just stuff pointers into // the wbBuf? Then this would become the sole greying path. // // TODO: We could avoid shading any of the "new" pointers in diff --git a/src/runtime/netpoll_epoll.go b/src/runtime/netpoll_epoll.go index c43bab0882..48c03d119f 100644 --- a/src/runtime/netpoll_epoll.go +++ b/src/runtime/netpoll_epoll.go @@ -8,7 +8,7 @@ package runtime import ( "internal/runtime/atomic" - "internal/runtime/syscall" + "internal/runtime/syscall/linux" "unsafe" ) @@ -20,21 +20,21 @@ var ( func netpollinit() { var errno uintptr - epfd, errno = syscall.EpollCreate1(syscall.EPOLL_CLOEXEC) + epfd, errno = linux.EpollCreate1(linux.EPOLL_CLOEXEC) if errno != 0 { println("runtime: epollcreate failed with", errno) throw("runtime: netpollinit failed") } - efd, errno := syscall.Eventfd(0, syscall.EFD_CLOEXEC|syscall.EFD_NONBLOCK) + efd, errno := linux.Eventfd(0, linux.EFD_CLOEXEC|linux.EFD_NONBLOCK) if errno != 0 { println("runtime: eventfd failed with", -errno) throw("runtime: eventfd failed") } - ev := syscall.EpollEvent{ - Events: syscall.EPOLLIN, + ev := linux.EpollEvent{ + Events: linux.EPOLLIN, } *(**uintptr)(unsafe.Pointer(&ev.Data)) = &netpollEventFd - errno = syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, efd, &ev) + errno = linux.EpollCtl(epfd, linux.EPOLL_CTL_ADD, efd, &ev) if errno != 0 { println("runtime: epollctl failed with", errno) throw("runtime: epollctl failed") @@ -47,16 +47,16 @@ func netpollIsPollDescriptor(fd uintptr) bool { } func netpollopen(fd uintptr, pd *pollDesc) uintptr { - var ev syscall.EpollEvent - ev.Events = syscall.EPOLLIN | syscall.EPOLLOUT | syscall.EPOLLRDHUP | syscall.EPOLLET + var ev linux.EpollEvent + ev.Events = linux.EPOLLIN | linux.EPOLLOUT | linux.EPOLLRDHUP | linux.EPOLLET tp := taggedPointerPack(unsafe.Pointer(pd), pd.fdseq.Load()) *(*taggedPointer)(unsafe.Pointer(&ev.Data)) = tp - return syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, int32(fd), &ev) + return linux.EpollCtl(epfd, linux.EPOLL_CTL_ADD, int32(fd), &ev) } func netpollclose(fd uintptr) uintptr { - var ev syscall.EpollEvent - return syscall.EpollCtl(epfd, syscall.EPOLL_CTL_DEL, int32(fd), &ev) + var ev linux.EpollEvent + return linux.EpollCtl(epfd, linux.EPOLL_CTL_DEL, int32(fd), &ev) } func netpollarm(pd *pollDesc, mode int) { @@ -114,9 +114,9 @@ func netpoll(delay int64) (gList, int32) { // 1e9 ms == ~11.5 days. waitms = 1e9 } - var events [128]syscall.EpollEvent + var events [128]linux.EpollEvent retry: - n, errno := syscall.EpollWait(epfd, events[:], int32(len(events)), waitms) + n, errno := linux.EpollWait(epfd, events[:], int32(len(events)), waitms) if errno != 0 { if errno != _EINTR { println("runtime: epollwait on fd", epfd, "failed with", errno) @@ -138,7 +138,7 @@ retry: } if *(**uintptr)(unsafe.Pointer(&ev.Data)) == &netpollEventFd { - if ev.Events != syscall.EPOLLIN { + if ev.Events != linux.EPOLLIN { println("runtime: netpoll: eventfd ready for", ev.Events) throw("runtime: netpoll: eventfd ready for something unexpected") } @@ -156,10 +156,10 @@ retry: } var mode int32 - if ev.Events&(syscall.EPOLLIN|syscall.EPOLLRDHUP|syscall.EPOLLHUP|syscall.EPOLLERR) != 0 { + if ev.Events&(linux.EPOLLIN|linux.EPOLLRDHUP|linux.EPOLLHUP|linux.EPOLLERR) != 0 { mode += 'r' } - if ev.Events&(syscall.EPOLLOUT|syscall.EPOLLHUP|syscall.EPOLLERR) != 0 { + if ev.Events&(linux.EPOLLOUT|linux.EPOLLHUP|linux.EPOLLERR) != 0 { mode += 'w' } if mode != 0 { @@ -167,7 +167,7 @@ retry: pd := (*pollDesc)(tp.pointer()) tag := tp.tag() if pd.fdseq.Load() == tag { - pd.setEventErr(ev.Events == syscall.EPOLLERR, tag) + pd.setEventErr(ev.Events == linux.EPOLLERR, tag) delta += netpollready(&toRun, pd, mode) } } diff --git a/src/runtime/netpoll_windows.go b/src/runtime/netpoll_windows.go index fb35d41c0c..93137e4709 100644 --- a/src/runtime/netpoll_windows.go +++ b/src/runtime/netpoll_windows.go @@ -102,7 +102,7 @@ var ( ) func netpollinit() { - iocphandle = stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX) + iocphandle = stdcall(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX) if iocphandle == 0 { println("runtime: CreateIoCompletionPort failed (errno=", getlasterror(), ")") throw("runtime: netpollinit failed") @@ -115,7 +115,7 @@ func netpollIsPollDescriptor(fd uintptr) bool { func netpollopen(fd uintptr, pd *pollDesc) int32 { key := packNetpollKey(netpollSourceReady, pd) - if stdcall4(_CreateIoCompletionPort, fd, iocphandle, key, 0) == 0 { + if stdcall(_CreateIoCompletionPort, fd, iocphandle, key, 0) == 0 { return int32(getlasterror()) } return 0 @@ -137,7 +137,7 @@ func netpollBreak() { } key := packNetpollKey(netpollSourceBreak, nil) - if stdcall4(_PostQueuedCompletionStatus, iocphandle, 0, key, 0) == 0 { + if stdcall(_PostQueuedCompletionStatus, iocphandle, 0, key, 0) == 0 { println("runtime: netpoll: PostQueuedCompletionStatus failed (errno=", getlasterror(), ")") throw("runtime: netpoll: PostQueuedCompletionStatus failed") } @@ -197,7 +197,7 @@ func netpoll(delay int64) (gList, int32) { if delay != 0 { mp.blocked = true } - if stdcall6(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 { + if stdcall(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 { mp.blocked = false errno := getlasterror() if errno == _WAIT_TIMEOUT { @@ -256,7 +256,7 @@ func netpollQueueTimer(delay int64) (signaled bool) { // such as a netpollBreak, so we can get to this point with a timer that hasn't // expired yet. In this case, the completion packet can still be picked up by // another thread, so defer the cancellation until it is really necessary. - errno := stdcall2(_NtCancelWaitCompletionPacket, mp.waitIocpHandle, 1) + errno := stdcall(_NtCancelWaitCompletionPacket, mp.waitIocpHandle, 1) switch errno { case STATUS_CANCELLED: // STATUS_CANCELLED is returned when the associated timer has already expired, @@ -264,12 +264,12 @@ func netpollQueueTimer(delay int64) (signaled bool) { fallthrough case STATUS_SUCCESS: dt := -delay / 100 // relative sleep (negative), 100ns units - if stdcall6(_SetWaitableTimer, mp.waitIocpTimer, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) == 0 { + if stdcall(_SetWaitableTimer, mp.waitIocpTimer, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) == 0 { println("runtime: SetWaitableTimer failed; errno=", getlasterror()) throw("runtime: netpoll failed") } key := packNetpollKey(netpollSourceTimer, nil) - if errno := stdcall8(_NtAssociateWaitCompletionPacket, mp.waitIocpHandle, iocphandle, mp.waitIocpTimer, key, 0, 0, 0, uintptr(unsafe.Pointer(&signaled))); errno != 0 { + if errno := stdcall(_NtAssociateWaitCompletionPacket, mp.waitIocpHandle, iocphandle, mp.waitIocpTimer, key, 0, 0, 0, uintptr(unsafe.Pointer(&signaled))); errno != 0 { println("runtime: NtAssociateWaitCompletionPacket failed; errno=", errno) throw("runtime: netpoll failed") } diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go index 116995e5f6..3197c66537 100644 --- a/src/runtime/os3_solaris.go +++ b/src/runtime/os3_solaris.go @@ -238,6 +238,7 @@ func unminit() { // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. // // This always runs without a P, so //go:nowritebarrierrec is required. +// //go:nowritebarrierrec func mdestroy(mp *m) { } diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go index 3847b7671a..4bb8576f42 100644 --- a/src/runtime/os_aix.go +++ b/src/runtime/os_aix.go @@ -27,6 +27,7 @@ type funcDescriptor struct { type mOS struct { waitsema uintptr // semaphore for parking on locks perrno uintptr // pointer to tls errno + libcall libcall } //go:nosplit @@ -194,6 +195,7 @@ func unminit() { // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. // // This always runs without a P, so //go:nowritebarrierrec is required. +// //go:nowritebarrierrec func mdestroy(mp *m) { } diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go index 5aef34ff8f..0c7144e9d0 100644 --- a/src/runtime/os_darwin.go +++ b/src/runtime/os_darwin.go @@ -348,6 +348,7 @@ func unminit() { // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. // // This always runs without a P, so //go:nowritebarrierrec is required. +// //go:nowritebarrierrec func mdestroy(mp *m) { } diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go index e22fd9b42f..fbbee64fd3 100644 --- a/src/runtime/os_dragonfly.go +++ b/src/runtime/os_dragonfly.go @@ -220,6 +220,7 @@ func unminit() { // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. // // This always runs without a P, so //go:nowritebarrierrec is required. +// //go:nowritebarrierrec func mdestroy(mp *m) { } diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go index 54f98ef4f8..0ec5e43007 100644 --- a/src/runtime/os_linux.go +++ b/src/runtime/os_linux.go @@ -9,7 +9,7 @@ import ( "internal/goarch" "internal/runtime/atomic" "internal/runtime/strconv" - "internal/runtime/syscall" + "internal/runtime/syscall/linux" "unsafe" ) @@ -417,6 +417,7 @@ func unminit() { // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. // // This always runs without a P, so //go:nowritebarrierrec is required. +// //go:nowritebarrierrec func mdestroy(mp *m) { } @@ -469,7 +470,7 @@ func pipe2(flags int32) (r, w int32, errno int32) //go:nosplit func fcntl(fd, cmd, arg int32) (ret int32, errno int32) { - r, _, err := syscall.Syscall6(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + r, _, err := linux.Syscall6(linux.SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) return int32(r), int32(err) } @@ -772,7 +773,7 @@ func syscall_runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) ( // ensuring all threads execute system calls from multiple calls in the // same order. - r1, r2, errno := syscall.Syscall6(trap, a1, a2, a3, a4, a5, a6) + r1, r2, errno := linux.Syscall6(trap, a1, a2, a3, a4, a5, a6) if GOARCH == "ppc64" || GOARCH == "ppc64le" { // TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2. r2 = 0 @@ -883,7 +884,7 @@ func runPerThreadSyscall() { } args := perThreadSyscall - r1, r2, errno := syscall.Syscall6(args.trap, args.a1, args.a2, args.a3, args.a4, args.a5, args.a6) + r1, r2, errno := linux.Syscall6(args.trap, args.a1, args.a2, args.a3, args.a4, args.a5, args.a6) if GOARCH == "ppc64" || GOARCH == "ppc64le" { // TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2. r2 = 0 @@ -922,6 +923,6 @@ func (c *sigctxt) sigFromSeccomp() bool { //go:nosplit func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32) { - r, _, err := syscall.Syscall6(syscall.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0) + r, _, err := linux.Syscall6(linux.SYS_MPROTECT, uintptr(addr), n, uintptr(prot), 0, 0, 0) return int32(r), int32(err) } diff --git a/src/runtime/os_linux_riscv64.go b/src/runtime/os_linux_riscv64.go index c4a4d4e50d..65fa601a29 100644 --- a/src/runtime/os_linux_riscv64.go +++ b/src/runtime/os_linux_riscv64.go @@ -5,7 +5,7 @@ package runtime import ( - "internal/runtime/syscall" + "internal/runtime/syscall/linux" "unsafe" ) @@ -32,6 +32,6 @@ func internal_cpu_riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { } // Passing in a cpuCount of 0 and a cpu of nil ensures that only extensions supported by all the // cores are returned, which is the behaviour we want in internal/cpu. - _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(unsafe.Pointer(&pairs[0])), uintptr(len(pairs)), uintptr(0), uintptr(unsafe.Pointer(nil)), uintptr(flags), 0) + _, _, e1 := linux.Syscall6(sys_RISCV_HWPROBE, uintptr(unsafe.Pointer(&pairs[0])), uintptr(len(pairs)), uintptr(0), uintptr(unsafe.Pointer(nil)), uintptr(flags), 0) return e1 == 0 } diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go index 342ede9c53..f117253f34 100644 --- a/src/runtime/os_netbsd.go +++ b/src/runtime/os_netbsd.go @@ -101,9 +101,6 @@ var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0) // From NetBSD's <sys/sysctl.h> const ( - _CTL_KERN = 1 - _KERN_OSREV = 3 - _CTL_HW = 6 _HW_NCPU = 3 _HW_PAGESIZE = 7 @@ -141,13 +138,6 @@ func getPageSize() uintptr { return 0 } -func getOSRev() int { - if osrev, ok := sysctlInt([]uint32{_CTL_KERN, _KERN_OSREV}); ok { - return int(osrev) - } - return 0 -} - //go:nosplit func semacreate(mp *m) { } @@ -268,7 +258,6 @@ func osinit() { if physPageSize == 0 { physPageSize = getPageSize() } - needSysmonWorkaround = getOSRev() < 902000000 // NetBSD 9.2 } var urandom_dev = []byte("/dev/urandom\x00") @@ -324,6 +313,7 @@ func unminit() { // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. // // This always runs without a P, so //go:nowritebarrierrec is required. +// //go:nowritebarrierrec func mdestroy(mp *m) { } diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go index 02846851d6..3943111853 100644 --- a/src/runtime/os_openbsd.go +++ b/src/runtime/os_openbsd.go @@ -134,6 +134,54 @@ func semawakeup(mp *m) { } } +// mstart_stub provides glue code to call mstart from pthread_create. +func mstart_stub() + +// May run with m.p==nil, so write barriers are not allowed. +// +//go:nowritebarrierrec +func newosproc(mp *m) { + if false { + print("newosproc m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n") + } + + // Initialize an attribute object. + var attr pthreadattr + if err := pthread_attr_init(&attr); err != 0 { + writeErrStr(failthreadcreate) + exit(1) + } + + // Find out OS stack size for our own stack guard. + var stacksize uintptr + if pthread_attr_getstacksize(&attr, &stacksize) != 0 { + writeErrStr(failthreadcreate) + exit(1) + } + mp.g0.stack.hi = stacksize // for mstart + + // Tell the pthread library we won't join with this thread. + if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 { + writeErrStr(failthreadcreate) + exit(1) + } + + // Finally, create the thread. It starts at mstart_stub, which does some low-level + // setup and then calls mstart. + var oset sigset + sigprocmask(_SIG_SETMASK, &sigset_all, &oset) + err := retryOnEAGAIN(func() int32 { + return pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp)) + }) + sigprocmask(_SIG_SETMASK, &oset, nil) + if err != 0 { + writeErrStr(failthreadcreate) + exit(1) + } + + pthread_attr_destroy(&attr) +} + func osinit() { numCPUStartup = getCPUCount() physPageSize = getPageSize() @@ -160,9 +208,6 @@ func goenvs() { // Called on the parent thread (main thread in case of bootstrap), can allocate memory. func mpreinit(mp *m) { gsignalSize := int32(32 * 1024) - if GOARCH == "mips64" { - gsignalSize = int32(64 * 1024) - } mp.gsignal = malg(gsignalSize) mp.gsignal.m = mp } @@ -186,6 +231,7 @@ func unminit() { // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. // // This always runs without a P, so //go:nowritebarrierrec is required. +// //go:nowritebarrierrec func mdestroy(mp *m) { } diff --git a/src/runtime/os_openbsd_libc.go b/src/runtime/os_openbsd_libc.go deleted file mode 100644 index 201f1629d9..0000000000 --- a/src/runtime/os_openbsd_libc.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build openbsd && !mips64 - -package runtime - -import ( - "internal/abi" - "unsafe" -) - -// mstart_stub provides glue code to call mstart from pthread_create. -func mstart_stub() - -// May run with m.p==nil, so write barriers are not allowed. -// -//go:nowritebarrierrec -func newosproc(mp *m) { - if false { - print("newosproc m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n") - } - - // Initialize an attribute object. - var attr pthreadattr - if err := pthread_attr_init(&attr); err != 0 { - writeErrStr(failthreadcreate) - exit(1) - } - - // Find out OS stack size for our own stack guard. - var stacksize uintptr - if pthread_attr_getstacksize(&attr, &stacksize) != 0 { - writeErrStr(failthreadcreate) - exit(1) - } - mp.g0.stack.hi = stacksize // for mstart - - // Tell the pthread library we won't join with this thread. - if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 { - writeErrStr(failthreadcreate) - exit(1) - } - - // Finally, create the thread. It starts at mstart_stub, which does some low-level - // setup and then calls mstart. - var oset sigset - sigprocmask(_SIG_SETMASK, &sigset_all, &oset) - err := retryOnEAGAIN(func() int32 { - return pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp)) - }) - sigprocmask(_SIG_SETMASK, &oset, nil) - if err != 0 { - writeErrStr(failthreadcreate) - exit(1) - } - - pthread_attr_destroy(&attr) -} diff --git a/src/runtime/os_openbsd_mips64.go b/src/runtime/os_openbsd_mips64.go deleted file mode 100644 index e5eeb2dcd1..0000000000 --- a/src/runtime/os_openbsd_mips64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package runtime - -//go:nosplit -func cputicks() int64 { - // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - return nanotime() -} diff --git a/src/runtime/os_openbsd_syscall.go b/src/runtime/os_openbsd_syscall.go deleted file mode 100644 index d784f76475..0000000000 --- a/src/runtime/os_openbsd_syscall.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build openbsd && mips64 - -package runtime - -import ( - "internal/abi" - "internal/goarch" - "unsafe" -) - -//go:noescape -func tfork(param *tforkt, psize uintptr, mm *m, gg *g, fn uintptr) int32 - -// May run with m.p==nil, so write barriers are not allowed. -// -//go:nowritebarrier -func newosproc(mp *m) { - stk := unsafe.Pointer(mp.g0.stack.hi) - if false { - print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n") - } - - // Stack pointer must point inside stack area (as marked with MAP_STACK), - // rather than at the top of it. - param := tforkt{ - tf_tcb: unsafe.Pointer(&mp.tls[0]), - tf_tid: nil, // minit will record tid - tf_stack: uintptr(stk) - goarch.PtrSize, - } - - var oset sigset - sigprocmask(_SIG_SETMASK, &sigset_all, &oset) - ret := retryOnEAGAIN(func() int32 { - errno := tfork(¶m, unsafe.Sizeof(param), mp, mp.g0, abi.FuncPCABI0(mstart)) - // tfork returns negative errno - return -errno - }) - sigprocmask(_SIG_SETMASK, &oset, nil) - - if ret != 0 { - print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", ret, ")\n") - if ret == _EAGAIN { - println("runtime: may need to increase max user processes (ulimit -p)") - } - throw("runtime.newosproc") - } -} diff --git a/src/runtime/os_openbsd_syscall1.go b/src/runtime/os_openbsd_syscall1.go deleted file mode 100644 index d32894ba6a..0000000000 --- a/src/runtime/os_openbsd_syscall1.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build openbsd && mips64 - -package runtime - -//go:noescape -func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32 - -//go:noescape -func thrwakeup(ident uintptr, n int32) int32 - -func osyield() - -//go:nosplit -func osyield_no_g() { - osyield() -} diff --git a/src/runtime/os_openbsd_syscall2.go b/src/runtime/os_openbsd_syscall2.go deleted file mode 100644 index 072f53320d..0000000000 --- a/src/runtime/os_openbsd_syscall2.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build openbsd && mips64 - -package runtime - -import ( - "internal/runtime/atomic" - "unsafe" -) - -//go:noescape -func sigaction(sig uint32, new, old *sigactiont) - -func kqueue() int32 - -//go:noescape -func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 - -func raiseproc(sig uint32) - -func getthrid() int32 -func thrkill(tid int32, sig int) - -// read calls the read system call. -// It returns a non-negative number of bytes written or a negative errno value. -func read(fd int32, p unsafe.Pointer, n int32) int32 - -func closefd(fd int32) int32 - -func exit(code int32) -func usleep(usec uint32) - -//go:nosplit -func usleep_no_g(usec uint32) { - usleep(usec) -} - -// write1 calls the write system call. -// It returns a non-negative number of bytes written or a negative errno value. -// -//go:noescape -func write1(fd uintptr, p unsafe.Pointer, n int32) int32 - -//go:noescape -func open(name *byte, mode, perm int32) int32 - -// return value is only set on linux to be used in osinit(). -func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32 - -// exitThread terminates the current thread, writing *wait = freeMStack when -// the stack is safe to reclaim. -// -//go:noescape -func exitThread(wait *atomic.Uint32) - -//go:noescape -func obsdsigprocmask(how int32, new sigset) sigset - -//go:nosplit -//go:nowritebarrierrec -func sigprocmask(how int32, new, old *sigset) { - n := sigset(0) - if new != nil { - n = *new - } - r := obsdsigprocmask(how, n) - if old != nil { - *old = r - } -} - -func pipe2(flags int32) (r, w int32, errno int32) - -//go:noescape -func setitimer(mode int32, new, old *itimerval) - -//go:noescape -func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 - -// mmap calls the mmap system call. It is implemented in assembly. -// We only pass the lower 32 bits of file offset to the -// assembly routine; the higher bits (if required), should be provided -// by the assembly routine as 0. -// The err result is an OS error code such as ENOMEM. -func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int) - -// munmap calls the munmap system call. It is implemented in assembly. -func munmap(addr unsafe.Pointer, n uintptr) - -func nanotime1() int64 - -//go:noescape -func sigaltstack(new, old *stackt) - -func fcntl(fd, cmd, arg int32) (ret int32, errno int32) - -func walltime() (sec int64, nsec int32) - -func issetugid() int32 diff --git a/src/runtime/os_solaris.go b/src/runtime/os_solaris.go index 5f6163f131..42b7e4a6bc 100644 --- a/src/runtime/os_solaris.go +++ b/src/runtime/os_solaris.go @@ -21,9 +21,8 @@ type mscratch struct { type mOS struct { waitsema uintptr // semaphore for parking on locks perrno *int32 // pointer to tls errno - // these are here because they are too large to be on the stack - // of low-level NOSPLIT functions. - //LibCall libcall; + // This is here to avoid using the G stack so the stack can move during the call. + libcall libcall ts mts scratch mscratch } diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go index 8f77cd50f8..ab4e165bae 100644 --- a/src/runtime/os_windows.go +++ b/src/runtime/os_windows.go @@ -8,6 +8,7 @@ import ( "internal/abi" "internal/runtime/atomic" "internal/runtime/sys" + "internal/runtime/syscall/windows" "unsafe" ) @@ -40,7 +41,6 @@ const ( //go:cgo_import_dynamic runtime._GetThreadContext GetThreadContext%2 "kernel32.dll" //go:cgo_import_dynamic runtime._SetThreadContext SetThreadContext%2 "kernel32.dll" //go:cgo_import_dynamic runtime._LoadLibraryExW LoadLibraryExW%3 "kernel32.dll" -//go:cgo_import_dynamic runtime._LoadLibraryW LoadLibraryW%1 "kernel32.dll" //go:cgo_import_dynamic runtime._PostQueuedCompletionStatus PostQueuedCompletionStatus%4 "kernel32.dll" //go:cgo_import_dynamic runtime._QueryPerformanceCounter QueryPerformanceCounter%1 "kernel32.dll" //go:cgo_import_dynamic runtime._QueryPerformanceFrequency QueryPerformanceFrequency%1 "kernel32.dll" @@ -98,7 +98,6 @@ var ( _GetThreadContext, _SetThreadContext, _LoadLibraryExW, - _LoadLibraryW, _PostQueuedCompletionStatus, _QueryPerformanceCounter, _QueryPerformanceFrequency, @@ -160,6 +159,9 @@ func tstart_stdcall(newm *m) func wintls() type mOS struct { + // This is here to avoid using the G stack so the stack can move during the call. + stdCallInfo windows.StdCallInfo + threadLock mutex // protects "thread" and prevents closing thread uintptr // thread handle @@ -210,19 +212,15 @@ func read(fd int32, p unsafe.Pointer, n int32) int32 { type sigset struct{} -// Call a Windows function with stdcall conventions, -// and switch to os stack during the call. -func asmstdcall(fn unsafe.Pointer) - var asmstdcallAddr unsafe.Pointer -type winlibcall libcall +type winlibcall windows.StdCallInfo func windowsFindfunc(lib uintptr, name []byte) stdFunction { if name[len(name)-1] != 0 { throw("usage") } - f := stdcall2(_GetProcAddress, lib, uintptr(unsafe.Pointer(&name[0]))) + f := stdcall(_GetProcAddress, lib, uintptr(unsafe.Pointer(&name[0]))) return stdFunction(unsafe.Pointer(f)) } @@ -231,7 +229,7 @@ var sysDirectory [_MAX_PATH + 1]byte var sysDirectoryLen uintptr func initSysDirectory() { - l := stdcall2(_GetSystemDirectoryA, uintptr(unsafe.Pointer(&sysDirectory[0])), uintptr(len(sysDirectory)-1)) + l := stdcall(_GetSystemDirectoryA, uintptr(unsafe.Pointer(&sysDirectory[0])), uintptr(len(sysDirectory)-1)) if l == 0 || l > uintptr(len(sysDirectory)-1) { throw("Unable to determine system directory") } @@ -245,20 +243,21 @@ func windows_GetSystemDirectory() string { } func windowsLoadSystemLib(name []uint16) uintptr { - return stdcall3(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32) + const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 + return stdcall(_LoadLibraryExW, uintptr(unsafe.Pointer(&name[0])), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32) } //go:linkname windows_QueryPerformanceCounter internal/syscall/windows.QueryPerformanceCounter func windows_QueryPerformanceCounter() int64 { var counter int64 - stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter))) + stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter))) return counter } //go:linkname windows_QueryPerformanceFrequency internal/syscall/windows.QueryPerformanceFrequency func windows_QueryPerformanceFrequency() int64 { var frequency int64 - stdcall1(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency))) + stdcall(_QueryPerformanceFrequency, uintptr(unsafe.Pointer(&frequency))) return frequency } @@ -309,7 +308,7 @@ func monitorSuspendResume() { var fn any = func(context uintptr, changeType uint32, setting uintptr) uintptr { for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink { if mp.resumesema != 0 { - stdcall1(_SetEvent, mp.resumesema) + stdcall(_SetEvent, mp.resumesema) } } return 0 @@ -318,13 +317,13 @@ func monitorSuspendResume() { callback: compileCallback(*efaceOf(&fn), true), } handle := uintptr(0) - stdcall3(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK, + stdcall(powerRegisterSuspendResumeNotification, _DEVICE_NOTIFY_CALLBACK, uintptr(unsafe.Pointer(¶ms)), uintptr(unsafe.Pointer(&handle))) } func getCPUCount() int32 { var mask, sysmask uintptr - ret := stdcall3(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) + ret := stdcall(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) if ret != 0 { n := 0 maskbits := int(unsafe.Sizeof(mask) * 8) @@ -339,13 +338,13 @@ func getCPUCount() int32 { } // use GetSystemInfo if GetProcessAffinityMask fails var info systeminfo - stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info))) + stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info))) return int32(info.dwnumberofprocessors) } func getPageSize() uintptr { var info systeminfo - stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info))) + stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info))) return uintptr(info.dwpagesize) } @@ -384,9 +383,9 @@ func osRelax(relax bool) uint32 { } if relax { - return uint32(stdcall1(_timeEndPeriod, 1)) + return uint32(stdcall(_timeEndPeriod, 1)) } else { - return uint32(stdcall1(_timeBeginPeriod, 1)) + return uint32(stdcall(_timeBeginPeriod, 1)) } } @@ -415,7 +414,7 @@ func createHighResTimer() uintptr { _TIMER_QUERY_STATE = 0x0001 _TIMER_MODIFY_STATE = 0x0002 ) - return stdcall4(_CreateWaitableTimerExW, 0, 0, + return stdcall(_CreateWaitableTimerExW, 0, 0, _CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, _SYNCHRONIZE|_TIMER_QUERY_STATE|_TIMER_MODIFY_STATE) } @@ -425,7 +424,7 @@ func initHighResTimer() { if h != 0 { haveHighResTimer = true haveHighResSleep = _NtCreateWaitCompletionPacket != nil - stdcall1(_CloseHandle, h) + stdcall(_CloseHandle, h) } else { // Only load winmm.dll if we need it. // This avoids a dependency on winmm.dll for Go programs @@ -457,7 +456,7 @@ func initLongPathSupport() { // Check that we're ≥ 10.0.15063. info := _OSVERSIONINFOW{} info.osVersionInfoSize = uint32(unsafe.Sizeof(info)) - stdcall1(_RtlGetVersion, uintptr(unsafe.Pointer(&info))) + stdcall(_RtlGetVersion, uintptr(unsafe.Pointer(&info))) if info.majorVersion < 10 || (info.majorVersion == 10 && info.minorVersion == 0 && info.buildNumber < 15063) { return } @@ -465,14 +464,14 @@ func initLongPathSupport() { // Set the IsLongPathAwareProcess flag of the PEB's bit field. // This flag is not documented, but it's known to be used // by Windows to enable long path support. - bitField := (*byte)(unsafe.Pointer(stdcall0(_RtlGetCurrentPeb) + PebBitFieldOffset)) + bitField := (*byte)(unsafe.Pointer(stdcall(_RtlGetCurrentPeb) + PebBitFieldOffset)) *bitField |= IsLongPathAwareProcess canUseLongPaths = true } func osinit() { - asmstdcallAddr = unsafe.Pointer(abi.FuncPCABI0(asmstdcall)) + asmstdcallAddr = unsafe.Pointer(windows.AsmStdCallAddr()) loadOptionalSyscalls() @@ -494,13 +493,13 @@ func osinit() { // of dedicated threads -- GUI, IO, computational, etc. Go processes use // equivalent threads that all do a mix of GUI, IO, computations, etc. // In such context dynamic priority boosting does nothing but harm, so we turn it off. - stdcall2(_SetProcessPriorityBoost, currentProcess, 1) + stdcall(_SetProcessPriorityBoost, currentProcess, 1) } //go:nosplit func readRandom(r []byte) int { n := 0 - if stdcall2(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 { + if stdcall(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 { n = len(r) } return n @@ -510,7 +509,7 @@ func goenvs() { // strings is a pointer to environment variable pairs in the form: // "envA=valA\x00envB=valB\x00\x00" (in UTF-16) // Two consecutive zero bytes end the list. - strings := unsafe.Pointer(stdcall0(_GetEnvironmentStringsW)) + strings := unsafe.Pointer(stdcall(_GetEnvironmentStringsW)) p := (*[1 << 24]uint16)(strings)[:] n := 0 @@ -534,13 +533,13 @@ func goenvs() { p = p[1:] // skip nil byte } - stdcall1(_FreeEnvironmentStringsW, uintptr(strings)) + stdcall(_FreeEnvironmentStringsW, uintptr(strings)) // We call these all the way here, late in init, so that malloc works // for the callback functions these generate. var fn any = ctrlHandler ctrlHandlerPC := compileCallback(*efaceOf(&fn), true) - stdcall2(_SetConsoleCtrlHandler, ctrlHandlerPC, 1) + stdcall(_SetConsoleCtrlHandler, ctrlHandlerPC, 1) monitorSuspendResume() } @@ -556,7 +555,7 @@ func exit(code int32) { // kills the suspending thread, and then this thread suspends. lock(&suspendLock) atomic.Store(&exiting, 1) - stdcall1(_ExitProcess, uintptr(code)) + stdcall(_ExitProcess, uintptr(code)) } // write1 must be nosplit because it's used as a last resort in @@ -572,9 +571,9 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 { var handle uintptr switch fd { case 1: - handle = stdcall1(_GetStdHandle, _STD_OUTPUT_HANDLE) + handle = stdcall(_GetStdHandle, _STD_OUTPUT_HANDLE) case 2: - handle = stdcall1(_GetStdHandle, _STD_ERROR_HANDLE) + handle = stdcall(_GetStdHandle, _STD_ERROR_HANDLE) default: // assume fd is real windows handle. handle = fd @@ -590,7 +589,7 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 { if !isASCII { var m uint32 - isConsole := stdcall2(_GetConsoleMode, handle, uintptr(unsafe.Pointer(&m))) != 0 + isConsole := stdcall(_GetConsoleMode, handle, uintptr(unsafe.Pointer(&m))) != 0 // If this is a console output, various non-unicode code pages can be in use. // Use the dedicated WriteConsole call to ensure unicode is printed correctly. if isConsole { @@ -598,7 +597,7 @@ func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 { } } var written uint32 - stdcall5(_WriteFile, handle, uintptr(buf), uintptr(n), uintptr(unsafe.Pointer(&written)), 0) + stdcall(_WriteFile, handle, uintptr(buf), uintptr(n), uintptr(unsafe.Pointer(&written)), 0) return int32(written) } @@ -651,7 +650,7 @@ func writeConsoleUTF16(handle uintptr, b []uint16) { return } var written uint32 - stdcall5(_WriteConsoleW, + stdcall(_WriteConsoleW, handle, uintptr(unsafe.Pointer(&b[0])), uintptr(l), @@ -672,7 +671,7 @@ func semasleep(ns int64) int32 { var result uintptr if ns < 0 { - result = stdcall2(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE)) + result = stdcall(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE)) } else { start := nanotime() elapsed := int64(0) @@ -681,7 +680,7 @@ func semasleep(ns int64) int32 { if ms == 0 { ms = 1 } - result = stdcall4(_WaitForMultipleObjects, 2, + result = stdcall(_WaitForMultipleObjects, 2, uintptr(unsafe.Pointer(&[2]uintptr{getg().m.waitsema, getg().m.resumesema})), 0, uintptr(ms)) if result != _WAIT_OBJECT_0+1 { @@ -724,7 +723,7 @@ func semasleep(ns int64) int32 { //go:nosplit func semawakeup(mp *m) { - if stdcall1(_SetEvent, mp.waitsema) == 0 { + if stdcall(_SetEvent, mp.waitsema) == 0 { systemstack(func() { print("runtime: setevent failed; errno=", getlasterror(), "\n") throw("runtime.semawakeup") @@ -737,20 +736,20 @@ func semacreate(mp *m) { if mp.waitsema != 0 { return } - mp.waitsema = stdcall4(_CreateEventA, 0, 0, 0, 0) + mp.waitsema = stdcall(_CreateEventA, 0, 0, 0, 0) if mp.waitsema == 0 { systemstack(func() { print("runtime: createevent failed; errno=", getlasterror(), "\n") throw("runtime.semacreate") }) } - mp.resumesema = stdcall4(_CreateEventA, 0, 0, 0, 0) + mp.resumesema = stdcall(_CreateEventA, 0, 0, 0, 0) if mp.resumesema == 0 { systemstack(func() { print("runtime: createevent failed; errno=", getlasterror(), "\n") throw("runtime.semacreate") }) - stdcall1(_CloseHandle, mp.waitsema) + stdcall(_CloseHandle, mp.waitsema) mp.waitsema = 0 } } @@ -763,7 +762,7 @@ func semacreate(mp *m) { //go:nosplit func newosproc(mp *m) { // We pass 0 for the stack size to use the default for this binary. - thandle := stdcall6(_CreateThread, 0, 0, + thandle := stdcall(_CreateThread, 0, 0, abi.FuncPCABI0(tstart_stdcall), uintptr(unsafe.Pointer(mp)), 0, 0) @@ -781,7 +780,7 @@ func newosproc(mp *m) { } // Close thandle to avoid leaking the thread object if it exits. - stdcall1(_CloseHandle, thandle) + stdcall(_CloseHandle, thandle) } // Used by the C library build mode. On Linux this function would allocate a @@ -829,7 +828,7 @@ func sigblock(exiting bool) { // Called on the new thread, cannot allocate Go memory. func minit() { var thandle uintptr - if stdcall7(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 { + if stdcall(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 { print("runtime.minit: duplicatehandle failed; errno=", getlasterror(), "\n") throw("runtime.minit: duplicatehandle failed") } @@ -837,7 +836,7 @@ func minit() { mp := getg().m lock(&mp.threadLock) mp.thread = thandle - mp.procid = uint64(stdcall0(_GetCurrentThreadId)) + mp.procid = uint64(stdcall(_GetCurrentThreadId)) // Configure usleep timer, if possible. if mp.highResTimer == 0 && haveHighResTimer { @@ -854,7 +853,7 @@ func minit() { throw("CreateWaitableTimerEx when creating timer failed") } const GENERIC_ALL = 0x10000000 - errno := stdcall3(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0) + errno := stdcall(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0) if mp.waitIocpHandle == 0 { print("runtime: NtCreateWaitCompletionPacket failed; errno=", errno, "\n") throw("NtCreateWaitCompletionPacket failed") @@ -865,7 +864,7 @@ func minit() { // Query the true stack base from the OS. Currently we're // running on a small assumed stack. var mbi memoryBasicInformation - res := stdcall3(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi)) + res := stdcall(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi)) if res == 0 { print("runtime: VirtualQuery failed; errno=", getlasterror(), "\n") throw("VirtualQuery for stack base failed") @@ -897,7 +896,7 @@ func unminit() { mp := getg().m lock(&mp.threadLock) if mp.thread != 0 { - stdcall1(_CloseHandle, mp.thread) + stdcall(_CloseHandle, mp.thread) mp.thread = 0 } unlock(&mp.threadLock) @@ -909,56 +908,64 @@ func unminit() { // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. // // This always runs without a P, so //go:nowritebarrierrec is required. +// //go:nowritebarrierrec //go:nosplit func mdestroy(mp *m) { if mp.highResTimer != 0 { - stdcall1(_CloseHandle, mp.highResTimer) + stdcall(_CloseHandle, mp.highResTimer) mp.highResTimer = 0 } if mp.waitIocpTimer != 0 { - stdcall1(_CloseHandle, mp.waitIocpTimer) + stdcall(_CloseHandle, mp.waitIocpTimer) mp.waitIocpTimer = 0 } if mp.waitIocpHandle != 0 { - stdcall1(_CloseHandle, mp.waitIocpHandle) + stdcall(_CloseHandle, mp.waitIocpHandle) mp.waitIocpHandle = 0 } if mp.waitsema != 0 { - stdcall1(_CloseHandle, mp.waitsema) + stdcall(_CloseHandle, mp.waitsema) mp.waitsema = 0 } if mp.resumesema != 0 { - stdcall1(_CloseHandle, mp.resumesema) + stdcall(_CloseHandle, mp.resumesema) mp.resumesema = 0 } } -// asmstdcall_trampoline calls asmstdcall converting from Go to C calling convention. -func asmstdcall_trampoline(args unsafe.Pointer) - -// stdcall_no_g calls asmstdcall on os stack without using g. +// stdcall_no_g is like [stdcall] but can be called without a G. // +//go:nowritebarrier //go:nosplit -func stdcall_no_g(fn stdFunction, n int, args uintptr) uintptr { - libcall := libcall{ - fn: uintptr(unsafe.Pointer(fn)), - n: uintptr(n), - args: args, +//go:uintptrkeepalive +func stdcall_no_g(fn stdFunction, args ...uintptr) uintptr { + call := windows.StdCallInfo{ + Fn: uintptr(unsafe.Pointer(fn)), + N: uintptr(len(args)), + } + if len(args) > 0 { + call.Args = uintptr(abi.NoEscape(unsafe.Pointer(&args[0]))) } - asmstdcall_trampoline(noescape(unsafe.Pointer(&libcall))) - return libcall.r1 + windows.StdCall(&call) + return call.R1 } -// Calling stdcall on os stack. +// stdcall calls fn with the given arguments using the stdcall calling convention. +// Must be called from the system stack. // May run during STW, so write barriers are not allowed. // //go:nowritebarrier //go:nosplit -func stdcall(fn stdFunction) uintptr { +//go:uintptrkeepalive +func stdcall(fn stdFunction, args ...uintptr) uintptr { gp := getg() mp := gp.m - mp.libcall.fn = uintptr(unsafe.Pointer(fn)) + mp.stdCallInfo.Fn = uintptr(unsafe.Pointer(fn)) + mp.stdCallInfo.N = uintptr(len(args)) + if len(args) > 0 { + mp.stdCallInfo.Args = uintptr(abi.NoEscape(unsafe.Pointer(&args[0]))) + } resetLibcall := false if mp.profilehz != 0 && mp.libcallsp == 0 { // leave pc/sp for cpu profiler @@ -969,112 +976,31 @@ func stdcall(fn stdFunction) uintptr { mp.libcallsp = sys.GetCallerSP() resetLibcall = true // See comment in sys_darwin.go:libcCall } - asmcgocall(asmstdcallAddr, unsafe.Pointer(&mp.libcall)) + asmcgocall(asmstdcallAddr, unsafe.Pointer(&mp.stdCallInfo)) if resetLibcall { mp.libcallsp = 0 } - return mp.libcall.r1 -} - -//go:nosplit -func stdcall0(fn stdFunction) uintptr { - mp := getg().m - mp.libcall.n = 0 - mp.libcall.args = 0 - return stdcall(fn) -} - -//go:nosplit -//go:cgo_unsafe_args -func stdcall1(fn stdFunction, a0 uintptr) uintptr { - mp := getg().m - mp.libcall.n = 1 - mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0))) - return stdcall(fn) -} - -//go:nosplit -//go:cgo_unsafe_args -func stdcall2(fn stdFunction, a0, a1 uintptr) uintptr { - mp := getg().m - mp.libcall.n = 2 - mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0))) - return stdcall(fn) -} - -//go:nosplit -//go:cgo_unsafe_args -func stdcall3(fn stdFunction, a0, a1, a2 uintptr) uintptr { - mp := getg().m - mp.libcall.n = 3 - mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0))) - return stdcall(fn) -} - -//go:nosplit -//go:cgo_unsafe_args -func stdcall4(fn stdFunction, a0, a1, a2, a3 uintptr) uintptr { - mp := getg().m - mp.libcall.n = 4 - mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0))) - return stdcall(fn) -} - -//go:nosplit -//go:cgo_unsafe_args -func stdcall5(fn stdFunction, a0, a1, a2, a3, a4 uintptr) uintptr { - mp := getg().m - mp.libcall.n = 5 - mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0))) - return stdcall(fn) -} - -//go:nosplit -//go:cgo_unsafe_args -func stdcall6(fn stdFunction, a0, a1, a2, a3, a4, a5 uintptr) uintptr { - mp := getg().m - mp.libcall.n = 6 - mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0))) - return stdcall(fn) -} - -//go:nosplit -//go:cgo_unsafe_args -func stdcall7(fn stdFunction, a0, a1, a2, a3, a4, a5, a6 uintptr) uintptr { - mp := getg().m - mp.libcall.n = 7 - mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0))) - return stdcall(fn) -} - -//go:nosplit -//go:cgo_unsafe_args -func stdcall8(fn stdFunction, a0, a1, a2, a3, a4, a5, a6, a7 uintptr) uintptr { - mp := getg().m - mp.libcall.n = 8 - mp.libcall.args = uintptr(noescape(unsafe.Pointer(&a0))) - return stdcall(fn) + return mp.stdCallInfo.R1 } // These must run on the system stack only. //go:nosplit func osyield_no_g() { - stdcall_no_g(_SwitchToThread, 0, 0) + stdcall_no_g(_SwitchToThread) } //go:nosplit func osyield() { systemstack(func() { - stdcall0(_SwitchToThread) + stdcall(_SwitchToThread) }) } //go:nosplit func usleep_no_g(us uint32) { timeout := uintptr(us) / 1000 // ms units - args := [...]uintptr{_INVALID_HANDLE_VALUE, timeout} - stdcall_no_g(_WaitForSingleObject, len(args), uintptr(noescape(unsafe.Pointer(&args[0])))) + stdcall_no_g(_WaitForSingleObject, _INVALID_HANDLE_VALUE, timeout) } //go:nosplit @@ -1086,13 +1012,13 @@ func usleep(us uint32) { if haveHighResTimer && getg().m.highResTimer != 0 { h = getg().m.highResTimer dt := -10 * int64(us) // relative sleep (negative), 100ns units - stdcall6(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) + stdcall(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) timeout = _INFINITE } else { h = _INVALID_HANDLE_VALUE timeout = uintptr(us) / 1000 // ms units } - stdcall2(_WaitForSingleObject, h, timeout) + stdcall(_WaitForSingleObject, h, timeout) }) } @@ -1133,7 +1059,7 @@ func profilem(mp *m, thread uintptr) { c = (*context)(unsafe.Pointer((uintptr(unsafe.Pointer(&cbuf[15]))) &^ 15)) c.contextflags = _CONTEXT_CONTROL - stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c))) + stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c))) gp := gFromSP(mp, c.sp()) @@ -1154,10 +1080,10 @@ func gFromSP(mp *m, sp uintptr) *g { } func profileLoop() { - stdcall2(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST) + stdcall(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST) for { - stdcall2(_WaitForSingleObject, profiletimer, _INFINITE) + stdcall(_WaitForSingleObject, profiletimer, _INFINITE) first := (*m)(atomic.Loadp(unsafe.Pointer(&allm))) for mp := first; mp != nil; mp = mp.alllink { if mp == getg().m { @@ -1175,7 +1101,7 @@ func profileLoop() { } // Acquire our own handle to the thread. var thread uintptr - if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 { + if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 { print("runtime: duplicatehandle failed; errno=", getlasterror(), "\n") throw("duplicatehandle failed") } @@ -1185,9 +1111,9 @@ func profileLoop() { // above and the SuspendThread. The handle // will remain valid, but SuspendThread may // fail. - if int32(stdcall1(_SuspendThread, thread)) == -1 { + if int32(stdcall(_SuspendThread, thread)) == -1 { // The thread no longer exists. - stdcall1(_CloseHandle, thread) + stdcall(_CloseHandle, thread) continue } if mp.profilehz != 0 && !mp.blocked { @@ -1195,8 +1121,8 @@ func profileLoop() { // was in the process of shutting down. profilem(mp, thread) } - stdcall1(_ResumeThread, thread) - stdcall1(_CloseHandle, thread) + stdcall(_ResumeThread, thread) + stdcall(_CloseHandle, thread) } } } @@ -1207,7 +1133,7 @@ func setProcessCPUProfiler(hz int32) { if haveHighResTimer { timer = createHighResTimer() } else { - timer = stdcall3(_CreateWaitableTimerA, 0, 0, 0) + timer = stdcall(_CreateWaitableTimerA, 0, 0, 0) } atomic.Storeuintptr(&profiletimer, timer) newm(profileLoop, nil, -1) @@ -1224,7 +1150,7 @@ func setThreadCPUProfiler(hz int32) { } due = int64(ms) * -10000 } - stdcall6(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0) + stdcall(_SetWaitableTimer, profiletimer, uintptr(unsafe.Pointer(&due)), uintptr(ms), 0, 0, 0) atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz)) } @@ -1257,7 +1183,7 @@ func preemptM(mp *m) { return } var thread uintptr - if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 { + if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 { print("runtime.preemptM: duplicatehandle failed; errno=", getlasterror(), "\n") throw("runtime.preemptM: duplicatehandle failed") } @@ -1277,9 +1203,9 @@ func preemptM(mp *m) { lock(&suspendLock) // Suspend the thread. - if int32(stdcall1(_SuspendThread, thread)) == -1 { + if int32(stdcall(_SuspendThread, thread)) == -1 { unlock(&suspendLock) - stdcall1(_CloseHandle, thread) + stdcall(_CloseHandle, thread) atomic.Store(&mp.preemptExtLock, 0) // The thread no longer exists. This shouldn't be // possible, but just acknowledge the request. @@ -1296,7 +1222,7 @@ func preemptM(mp *m) { // We have to get the thread context before inspecting the M // because SuspendThread only requests a suspend. // GetThreadContext actually blocks until it's suspended. - stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c))) + stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c))) unlock(&suspendLock) @@ -1307,7 +1233,7 @@ func preemptM(mp *m) { // Inject call to asyncPreempt targetPC := abi.FuncPCABI0(asyncPreempt) c.pushCall(targetPC, resumePC) - stdcall2(_SetThreadContext, thread, uintptr(unsafe.Pointer(c))) + stdcall(_SetThreadContext, thread, uintptr(unsafe.Pointer(c))) } } @@ -1316,8 +1242,8 @@ func preemptM(mp *m) { // Acknowledge the preemption. mp.preemptGen.Add(1) - stdcall1(_ResumeThread, thread) - stdcall1(_CloseHandle, thread) + stdcall(_ResumeThread, thread) + stdcall(_CloseHandle, thread) } // osPreemptExtEnter is called before entering external code that may diff --git a/src/runtime/os_windows_arm.go b/src/runtime/os_windows_arm.go index 10aff75e31..bc29843241 100644 --- a/src/runtime/os_windows_arm.go +++ b/src/runtime/os_windows_arm.go @@ -9,7 +9,7 @@ import "unsafe" //go:nosplit func cputicks() int64 { var counter int64 - stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter))) + stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter))) return counter } diff --git a/src/runtime/os_windows_arm64.go b/src/runtime/os_windows_arm64.go index 7e413445ba..bd80c08b0e 100644 --- a/src/runtime/os_windows_arm64.go +++ b/src/runtime/os_windows_arm64.go @@ -9,6 +9,6 @@ import "unsafe" //go:nosplit func cputicks() int64 { var counter int64 - stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter))) + stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter))) return counter } diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 95305b84bc..8f9ab4dd47 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -112,13 +112,13 @@ func panicCheck2(err string) { //go:yeswritebarrierrec func goPanicIndex(x int, y int) { panicCheck1(sys.GetCallerPC(), "index out of range") - panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex}) + panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsIndex}) } //go:yeswritebarrierrec func goPanicIndexU(x uint, y int) { panicCheck1(sys.GetCallerPC(), "index out of range") - panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex}) + panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsIndex}) } // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) @@ -126,25 +126,25 @@ func goPanicIndexU(x uint, y int) { //go:yeswritebarrierrec func goPanicSliceAlen(x int, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen}) + panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceAlen}) } //go:yeswritebarrierrec func goPanicSliceAlenU(x uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen}) + panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceAlen}) } //go:yeswritebarrierrec func goPanicSliceAcap(x int, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap}) + panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceAcap}) } //go:yeswritebarrierrec func goPanicSliceAcapU(x uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap}) + panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceAcap}) } // failures in the comparisons for s[x:y], 0 <= x <= y @@ -152,57 +152,57 @@ func goPanicSliceAcapU(x uint, y int) { //go:yeswritebarrierrec func goPanicSliceB(x int, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB}) + panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSliceB}) } //go:yeswritebarrierrec func goPanicSliceBU(x uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB}) + panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSliceB}) } // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) func goPanicSlice3Alen(x int, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen}) + panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3Alen}) } func goPanicSlice3AlenU(x uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen}) + panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3Alen}) } func goPanicSlice3Acap(x int, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap}) + panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3Acap}) } func goPanicSlice3AcapU(x uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap}) + panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3Acap}) } // failures in the comparisons for s[:x:y], 0 <= x <= y func goPanicSlice3B(x int, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B}) + panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3B}) } func goPanicSlice3BU(x uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B}) + panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3B}) } // failures in the comparisons for s[x:y:], 0 <= x <= y func goPanicSlice3C(x int, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C}) + panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsSlice3C}) } func goPanicSlice3CU(x uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C}) + panic(boundsError{x: int64(x), signed: false, y: y, code: abi.BoundsSlice3C}) } // failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s) func goPanicSliceConvert(x int, y int) { panicCheck1(sys.GetCallerPC(), "slice length too short to convert to array or pointer to array") - panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert}) + panic(boundsError{x: int64(x), signed: true, y: y, code: abi.BoundsConvert}) } // Implemented in assembly, as they take arguments in registers. @@ -225,6 +225,99 @@ func panicSlice3C(x int, y int) func panicSlice3CU(x uint, y int) func panicSliceConvert(x int, y int) +func panicBounds() // in asm_GOARCH.s files, called from generated code +func panicExtend() // in asm_GOARCH.s files, called from generated code (on 32-bit archs) +func panicBounds64(pc uintptr, regs *[16]int64) { // called from panicBounds on 64-bit archs + f := findfunc(pc) + v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1) + + code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v)) + + if code == abi.BoundsIndex { + panicCheck1(pc, "index out of range") + } else { + panicCheck1(pc, "slice bounds out of range") + } + + var e boundsError + e.code = code + e.signed = signed + if xIsReg { + e.x = regs[xVal] + } else { + e.x = int64(xVal) + } + if yIsReg { + e.y = int(regs[yVal]) + } else { + e.y = yVal + } + panic(e) +} + +func panicBounds32(pc uintptr, regs *[16]int32) { // called from panicBounds on 32-bit archs + f := findfunc(pc) + v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1) + + code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v)) + + if code == abi.BoundsIndex { + panicCheck1(pc, "index out of range") + } else { + panicCheck1(pc, "slice bounds out of range") + } + + var e boundsError + e.code = code + e.signed = signed + if xIsReg { + if signed { + e.x = int64(regs[xVal]) + } else { + e.x = int64(uint32(regs[xVal])) + } + } else { + e.x = int64(xVal) + } + if yIsReg { + e.y = int(regs[yVal]) + } else { + e.y = yVal + } + panic(e) +} + +func panicBounds32X(pc uintptr, regs *[16]int32) { // called from panicExtend on 32-bit archs + f := findfunc(pc) + v := pcdatavalue(f, abi.PCDATA_PanicBounds, pc-1) + + code, signed, xIsReg, yIsReg, xVal, yVal := abi.BoundsDecode(int(v)) + + if code == abi.BoundsIndex { + panicCheck1(pc, "index out of range") + } else { + panicCheck1(pc, "slice bounds out of range") + } + + var e boundsError + e.code = code + e.signed = signed + if xIsReg { + // Our 4-bit register numbers are actually 2 2-bit register numbers. + lo := xVal & 3 + hi := xVal >> 2 + e.x = int64(regs[hi])<<32 + int64(uint32(regs[lo])) + } else { + e.x = int64(xVal) + } + if yIsReg { + e.y = int(regs[yVal]) + } else { + e.y = yVal + } + panic(e) +} + var shiftError = error(errorString("negative shift amount")) //go:yeswritebarrierrec @@ -771,6 +864,7 @@ func gopanic(e any) { var p _panic p.arg = e + p.gopanicFP = unsafe.Pointer(sys.GetCallerSP()) runningPanicDefers.Add(1) @@ -865,10 +959,6 @@ func (p *_panic) nextDefer() (func(), bool) { } } - // The assembler adjusts p.argp in wrapper functions that shouldn't - // be visible to recover(), so we need to restore it each iteration. - p.argp = add(p.startSP, sys.MinFrameSize) - for { for p.deferBitsPtr != nil { bits := *p.deferBitsPtr @@ -993,27 +1083,89 @@ func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool { } // The implementation of the predeclared function recover. -// Cannot split the stack because it needs to reliably -// find the stack segment of its caller. -// -// TODO(rsc): Once we commit to CopyStackAlways, -// this doesn't need to be nosplit. -// -//go:nosplit -func gorecover(argp uintptr) any { - // Must be in a function running as part of a deferred call during the panic. - // Must be called from the topmost function of the call - // (the function used in the defer statement). - // p.argp is the argument pointer of that topmost deferred function call. - // Compare against argp reported by caller. - // If they match, the caller is the one who can recover. +func gorecover() any { gp := getg() p := gp._panic - if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) { - p.recovered = true - return p.arg + if p == nil || p.goexit || p.recovered { + return nil + } + + // Check to see if the function that called recover() was + // deferred directly from the panicking function. + // For code like: + // func foo() { + // defer bar() + // panic("panic") + // } + // func bar() { + // recover() + // } + // Normally the stack would look like this: + // foo + // runtime.gopanic + // bar + // runtime.gorecover + // + // However, if the function we deferred requires a wrapper + // of some sort, we need to ignore the wrapper. In that case, + // the stack looks like: + // foo + // runtime.gopanic + // wrapper + // bar + // runtime.gorecover + // And we should also successfully recover. + // + // Finally, in the weird case "defer recover()", the stack looks like: + // foo + // runtime.gopanic + // wrapper + // runtime.gorecover + // And we should not recover in that case. + // + // So our criteria is, there must be exactly one non-wrapper + // frame between gopanic and gorecover. + // + // We don't recover this: + // defer func() { func() { recover() }() } + // because there are 2 non-wrapper frames. + // + // We don't recover this: + // defer recover() + // because there are 0 non-wrapper frames. + canRecover := false + systemstack(func() { + var u unwinder + u.init(gp, 0) + u.next() // skip systemstack_switch + u.next() // skip gorecover + nonWrapperFrames := 0 + loop: + for ; u.valid(); u.next() { + for iu, f := newInlineUnwinder(u.frame.fn, u.symPC()); f.valid(); f = iu.next(f) { + sf := iu.srcFunc(f) + switch sf.funcID { + case abi.FuncIDWrapper: + continue + case abi.FuncID_gopanic: + if u.frame.fp == uintptr(p.gopanicFP) && nonWrapperFrames > 0 { + canRecover = true + } + break loop + default: + nonWrapperFrames++ + if nonWrapperFrames > 1 { + break loop + } + } + } + } + }) + if !canRecover { + return nil } - return nil + p.recovered = true + return p.arg } //go:linkname sync_throw sync.throw diff --git a/src/runtime/panic32.go b/src/runtime/panic32.go index cd34485a96..9dd4c0eb2e 100644 --- a/src/runtime/panic32.go +++ b/src/runtime/panic32.go @@ -7,6 +7,7 @@ package runtime import ( + "internal/abi" "internal/runtime/sys" ) @@ -16,77 +17,77 @@ import ( // failures in the comparisons for s[x], 0 <= x < y (y == len(s)) func goPanicExtendIndex(hi int, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "index out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsIndex}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsIndex}) } func goPanicExtendIndexU(hi uint, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "index out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsIndex}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsIndex}) } // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) func goPanicExtendSliceAlen(hi int, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSliceAlen}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSliceAlen}) } func goPanicExtendSliceAlenU(hi uint, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSliceAlen}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSliceAlen}) } func goPanicExtendSliceAcap(hi int, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSliceAcap}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSliceAcap}) } func goPanicExtendSliceAcapU(hi uint, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSliceAcap}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSliceAcap}) } // failures in the comparisons for s[x:y], 0 <= x <= y func goPanicExtendSliceB(hi int, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSliceB}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSliceB}) } func goPanicExtendSliceBU(hi uint, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSliceB}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSliceB}) } // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) func goPanicExtendSlice3Alen(hi int, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3Alen}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSlice3Alen}) } func goPanicExtendSlice3AlenU(hi uint, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3Alen}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSlice3Alen}) } func goPanicExtendSlice3Acap(hi int, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3Acap}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSlice3Acap}) } func goPanicExtendSlice3AcapU(hi uint, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3Acap}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSlice3Acap}) } // failures in the comparisons for s[:x:y], 0 <= x <= y func goPanicExtendSlice3B(hi int, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3B}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSlice3B}) } func goPanicExtendSlice3BU(hi uint, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3B}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSlice3B}) } // failures in the comparisons for s[x:y:], 0 <= x <= y func goPanicExtendSlice3C(hi int, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3C}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: abi.BoundsSlice3C}) } func goPanicExtendSlice3CU(hi uint, lo uint, y int) { panicCheck1(sys.GetCallerPC(), "slice bounds out of range") - panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3C}) + panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: abi.BoundsSlice3C}) } // Implemented in assembly, as they take arguments in registers. diff --git a/src/runtime/pinner.go b/src/runtime/pinner.go index 543bfdb7a4..424dd065ef 100644 --- a/src/runtime/pinner.go +++ b/src/runtime/pinner.go @@ -108,7 +108,7 @@ func pinnerGetPtr(i *any) unsafe.Pointer { if etyp == nil { panic(errorString("runtime.Pinner: argument is nil")) } - if kind := etyp.Kind_ & abi.KindMask; kind != abi.Pointer && kind != abi.UnsafePointer { + if kind := etyp.Kind(); kind != abi.Pointer && kind != abi.UnsafePointer { panic(errorString("runtime.Pinner: argument is not a pointer: " + toRType(etyp).string())) } if inUserArenaChunk(uintptr(e.data)) { diff --git a/src/runtime/plugin.go b/src/runtime/plugin.go index 4b6821b1fb..49cf13cb64 100644 --- a/src/runtime/plugin.go +++ b/src/runtime/plugin.go @@ -88,7 +88,7 @@ func plugin_lastmoduleinit() (path string, syms map[string]any, initTasks []*ini (*valp)[0] = unsafe.Pointer(t) name := symName.Name() - if t.Kind_&abi.KindMask == abi.Func { + if t.Kind() == abi.Func { name = "." + name } syms[name] = val diff --git a/src/runtime/proc.go b/src/runtime/proc.go index bee3b26c0e..25d39d9ba3 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -862,10 +862,10 @@ func schedinit() { ticks.init() // run as early as possible moduledataverify() stackinit() + randinit() // must run before mallocinit, alginit, mcommoninit mallocinit() godebug := getGodebugEarly() cpuinit(godebug) // must run before alginit - randinit() // must run before alginit, mcommoninit alginit() // maps, hash, rand must not be used before this call mcommoninit(gp.m, -1) modulesinit() // provides activeModules @@ -6200,10 +6200,6 @@ func checkdead() { // This is a variable for testing purposes. It normally doesn't change. var forcegcperiod int64 = 2 * 60 * 1e9 -// needSysmonWorkaround is true if the workaround for -// golang.org/issue/42515 is needed on NetBSD. -var needSysmonWorkaround bool = false - // haveSysmon indicates whether there is sysmon thread support. // // No threads on wasm yet, so no sysmon. @@ -6312,26 +6308,6 @@ func sysmon() { netpollAdjustWaiters(delta) } } - if GOOS == "netbsd" && needSysmonWorkaround { - // netpoll is responsible for waiting for timer - // expiration, so we typically don't have to worry - // about starting an M to service timers. (Note that - // sleep for timeSleepUntil above simply ensures sysmon - // starts running again when that timer expiration may - // cause Go code to run again). - // - // However, netbsd has a kernel bug that sometimes - // misses netpollBreak wake-ups, which can lead to - // unbounded delays servicing timers. If we detect this - // overrun, then startm to get something to handle the - // timer. - // - // See issue 42515 and - // https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094. - if next := timeSleepUntil(); next < now { - startm(nil, false, false) - } - } // Check if we need to update GOMAXPROCS at most once per second. if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now { sysmonUpdateGOMAXPROCS() diff --git a/src/runtime/race.go b/src/runtime/race.go index 7e7bca76ac..2cd4e3a9a2 100644 --- a/src/runtime/race.go +++ b/src/runtime/race.go @@ -156,7 +156,7 @@ const raceenabled = true // callerpc is a return PC of the function that calls this function, // pc is start PC of the function that calls this function. func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { - kind := t.Kind_ & abi.KindMask + kind := t.Kind() if kind == abi.Array || kind == abi.Struct { // for composite objects we have to read every address // because a write might happen to any subobject. @@ -174,7 +174,7 @@ func race_ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) { } func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { - kind := t.Kind_ & abi.KindMask + kind := t.Kind() if kind == abi.Array || kind == abi.Struct { // for composite objects we have to write every address // because a write might happen to any subobject. diff --git a/src/runtime/race/testdata/rangefunc_test.go b/src/runtime/race/testdata/rangefunc_test.go index 453c0733ed..986395bfb9 100644 --- a/src/runtime/race/testdata/rangefunc_test.go +++ b/src/runtime/race/testdata/rangefunc_test.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build goexperiment.rangefunc - package race_test import ( diff --git a/src/runtime/rt0_openbsd_mips64.s b/src/runtime/rt0_openbsd_mips64.s deleted file mode 100644 index 82a8dfaba6..0000000000 --- a/src/runtime/rt0_openbsd_mips64.s +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -TEXT _rt0_mips64_openbsd(SB),NOSPLIT,$0 - JMP _main<>(SB) - -TEXT _rt0_mips64le_openbsd(SB),NOSPLIT,$0 - JMP _main<>(SB) - -TEXT _main<>(SB),NOSPLIT|NOFRAME,$0 - // In a statically linked binary, the stack contains argc, - // argv as argc string pointers followed by a NULL, envv as a - // sequence of string pointers followed by a NULL, and auxv. - // There is no TLS base pointer. -#ifdef GOARCH_mips64 - MOVW 4(R29), R4 // argc, big-endian ABI places int32 at offset 4 -#else - MOVW 0(R29), R4 // argc -#endif - ADDV $8, R29, R5 // argv - JMP main(SB) - -TEXT main(SB),NOSPLIT|NOFRAME,$0 - // in external linking, glibc jumps to main with argc in R4 - // and argv in R5 - - // initialize REGSB = PC&0xffffffff00000000 - BGEZAL R0, 1(PC) - SRLV $32, R31, RSB - SLLV $32, RSB - - MOVV $runtime·rt0_go(SB), R1 - JMP (R1) diff --git a/src/runtime/runtime-gdb.py b/src/runtime/runtime-gdb.py index 6d99515176..345a59605e 100644 --- a/src/runtime/runtime-gdb.py +++ b/src/runtime/runtime-gdb.py @@ -160,14 +160,7 @@ class MapTypePrinter: return str(self.val.type) def children(self): - fields = [f.name for f in self.val.type.strip_typedefs().target().fields()] - if 'buckets' in fields: - yield from self.old_map_children() - else: - yield from self.swiss_map_children() - - def swiss_map_children(self): - SwissMapGroupSlots = 8 # see internal/abi:SwissMapGroupSlots + MapGroupSlots = 8 # see internal/abi:MapGroupSlots cnt = 0 # Yield keys and elements in group. @@ -175,7 +168,7 @@ class MapTypePrinter: def group_slots(group): ctrl = group['ctrl'] - for i in xrange(SwissMapGroupSlots): + for i in xrange(MapGroupSlots): c = (ctrl >> (8*i)) & 0xff if (c & 0x80) != 0: # Empty or deleted @@ -186,7 +179,7 @@ class MapTypePrinter: yield str(cnt+1), group['slots'][i]['elem'] # The linker DWARF generation - # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypesSwiss) records + # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypes) records # dirPtr as a **table[K,V], but it may actually be two different types: # # For "full size" maps (dirLen > 0), dirPtr is actually a pointer to @@ -249,7 +242,7 @@ class MapTypePrinter: length = table['groups']['lengthMask'] + 1 # The linker DWARF generation - # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypesSwiss) records + # (cmd/link/internal/ld.(*dwctxt).synthesizemaptypes) records # groups.data as a *group[K,V], but it is actually a pointer to # variable length array *[length]group[K,V]. # @@ -270,40 +263,6 @@ class MapTypePrinter: yield from group_slots(group) - def old_map_children(self): - MapBucketCount = 8 # see internal/abi:OldMapBucketCount - B = self.val['B'] - buckets = self.val['buckets'] - oldbuckets = self.val['oldbuckets'] - flags = self.val['flags'] - inttype = self.val['hash0'].type - cnt = 0 - for bucket in xrange(2 ** int(B)): - bp = buckets + bucket - if oldbuckets: - oldbucket = bucket & (2 ** (B - 1) - 1) - oldbp = oldbuckets + oldbucket - oldb = oldbp.dereference() - if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet - if bucket >= 2 ** (B - 1): - continue # already did old bucket - bp = oldbp - while bp: - b = bp.dereference() - for i in xrange(MapBucketCount): - if b['tophash'][i] != 0: - k = b['keys'][i] - v = b['values'][i] - if flags & 1: - k = k.dereference() - if flags & 2: - v = v.dereference() - yield str(cnt), k - yield str(cnt + 1), v - cnt += 2 - bp = b['overflow'] - - class ChanTypePrinter: """Pretty print chan[T] types. diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index 47c1fe5851..e81efadeb3 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -8,8 +8,6 @@ import ( "bytes" "flag" "fmt" - "internal/abi" - "internal/goexperiment" "internal/testenv" "os" "os/exec" @@ -155,9 +153,6 @@ func checkPtraceScope(t *testing.T) { } } -// NOTE: the maps below are allocated larger than abi.MapBucketCount -// to ensure that they are not "optimized out". - var helloSource = ` import "fmt" import "runtime" @@ -166,19 +161,21 @@ var gslice []string var smallmapvar map[string]string func main() { smallmapvar = make(map[string]string) - mapvar := make(map[string]string, ` + strconv.FormatInt(abi.OldMapBucketCount+9, 10) + `) - slicemap := make(map[string][]string,` + strconv.FormatInt(abi.OldMapBucketCount+3, 10) + `) - chanint := make(chan int, 10) - chanstr := make(chan string, 10) - chanint <- 99 + // NOTE: the maps below are allocated large to ensure that they are not + // "optimized out". + mapvar := make(map[string]string, 10) + slicemap := make(map[string][]string, 10) + chanint := make(chan int, 10) + chanstr := make(chan string, 10) + chanint <- 99 chanint <- 11 - chanstr <- "spongepants" - chanstr <- "squarebob" + chanstr <- "spongepants" + chanstr <- "squarebob" smallmapvar["abc"] = "def" mapvar["abc"] = "def" mapvar["ghi"] = "jkl" slicemap["a"] = []string{"b","c","d"} - slicemap["e"] = []string{"f","g","h"} + slicemap["e"] = []string{"f","g","h"} strvar := "abc" ptrvar := &strvar slicevar := make([]string, 0, 16) @@ -638,20 +635,10 @@ func TestGdbAutotmpTypes(t *testing.T) { types := []string{ "[]main.astruct", "main.astruct", - } - if goexperiment.SwissMap { - types = append(types, []string{ - "groupReference<string,main.astruct>", - "table<string,main.astruct>", - "map<string,main.astruct>", - "map<string,main.astruct> * map[string]main.astruct", - }...) - } else { - types = append(types, []string{ - "bucket<string,main.astruct>", - "hash<string,main.astruct>", - "hash<string,main.astruct> * map[string]main.astruct", - }...) + "groupReference<string,main.astruct>", + "table<string,main.astruct>", + "map<string,main.astruct>", + "map<string,main.astruct> * map[string]main.astruct", } for _, name := range types { if !strings.Contains(sgot, name) { diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 1e2de52989..b5d2dcefad 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -597,9 +597,7 @@ type m struct { freelink *m // on sched.freem trace mTraceState - // these are here because they are too large to be on the stack - // of low-level NOSPLIT functions. - libcall libcall + // These are here to avoid using the G stack so the stack can move during the call. libcallpc uintptr // for cpu profiler libcallsp uintptr libcallg guintptr @@ -1009,14 +1007,13 @@ type _defer struct { // // A _panic value must only ever live on the stack. // -// The argp and link fields are stack pointers, but don't need special +// The gopanicFP and link fields are stack pointers, but don't need special // handling during stack growth: because they are pointer-typed and // _panic values only live on the stack, regular stack pointer // adjustment takes care of them. type _panic struct { - argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink - arg any // argument to panic - link *_panic // link to earlier panic + arg any // argument to panic + link *_panic // link to earlier panic // startPC and startSP track where _panic.start was called. startPC uintptr @@ -1039,6 +1036,8 @@ type _panic struct { repanicked bool // whether this panic repanicked goexit bool deferreturn bool + + gopanicFP unsafe.Pointer // frame pointer of the gopanic frame } // savedOpenDeferState tracks the extra state from _panic that's diff --git a/src/runtime/set_vma_name_linux.go b/src/runtime/set_vma_name_linux.go index 100c2bfeca..9b6654f332 100644 --- a/src/runtime/set_vma_name_linux.go +++ b/src/runtime/set_vma_name_linux.go @@ -8,7 +8,7 @@ package runtime import ( "internal/runtime/atomic" - "internal/runtime/syscall" + "internal/runtime/syscall/linux" "unsafe" ) @@ -24,7 +24,7 @@ func setVMAName(start unsafe.Pointer, length uintptr, name string) { n := copy(sysName[:], " Go: ") copy(sysName[n:79], name) // leave final byte zero - _, _, err := syscall.Syscall6(syscall.SYS_PRCTL, syscall.PR_SET_VMA, syscall.PR_SET_VMA_ANON_NAME, uintptr(start), length, uintptr(unsafe.Pointer(&sysName[0])), 0) + _, _, err := linux.Syscall6(linux.SYS_PRCTL, linux.PR_SET_VMA, linux.PR_SET_VMA_ANON_NAME, uintptr(start), length, uintptr(unsafe.Pointer(&sysName[0])), 0) if err == _EINVAL { prSetVMAUnsupported.Store(true) } diff --git a/src/runtime/signal_mips64x.go b/src/runtime/signal_mips64x.go index cee1bf7a1b..eea2169408 100644 --- a/src/runtime/signal_mips64x.go +++ b/src/runtime/signal_mips64x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (linux || openbsd) && (mips64 || mips64le) +//go:build linux && (mips64 || mips64le) package runtime diff --git a/src/runtime/signal_openbsd_mips64.go b/src/runtime/signal_openbsd_mips64.go deleted file mode 100644 index 54ed523c7b..0000000000 --- a/src/runtime/signal_openbsd_mips64.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package runtime - -import ( - "unsafe" -) - -type sigctxt struct { - info *siginfo - ctxt unsafe.Pointer -} - -//go:nosplit -//go:nowritebarrierrec -func (c *sigctxt) regs() *sigcontext { - return (*sigcontext)(c.ctxt) -} - -func (c *sigctxt) r0() uint64 { return c.regs().sc_regs[0] } -func (c *sigctxt) r1() uint64 { return c.regs().sc_regs[1] } -func (c *sigctxt) r2() uint64 { return c.regs().sc_regs[2] } -func (c *sigctxt) r3() uint64 { return c.regs().sc_regs[3] } -func (c *sigctxt) r4() uint64 { return c.regs().sc_regs[4] } -func (c *sigctxt) r5() uint64 { return c.regs().sc_regs[5] } -func (c *sigctxt) r6() uint64 { return c.regs().sc_regs[6] } -func (c *sigctxt) r7() uint64 { return c.regs().sc_regs[7] } -func (c *sigctxt) r8() uint64 { return c.regs().sc_regs[8] } -func (c *sigctxt) r9() uint64 { return c.regs().sc_regs[9] } -func (c *sigctxt) r10() uint64 { return c.regs().sc_regs[10] } -func (c *sigctxt) r11() uint64 { return c.regs().sc_regs[11] } -func (c *sigctxt) r12() uint64 { return c.regs().sc_regs[12] } -func (c *sigctxt) r13() uint64 { return c.regs().sc_regs[13] } -func (c *sigctxt) r14() uint64 { return c.regs().sc_regs[14] } -func (c *sigctxt) r15() uint64 { return c.regs().sc_regs[15] } -func (c *sigctxt) r16() uint64 { return c.regs().sc_regs[16] } -func (c *sigctxt) r17() uint64 { return c.regs().sc_regs[17] } -func (c *sigctxt) r18() uint64 { return c.regs().sc_regs[18] } -func (c *sigctxt) r19() uint64 { return c.regs().sc_regs[19] } -func (c *sigctxt) r20() uint64 { return c.regs().sc_regs[20] } -func (c *sigctxt) r21() uint64 { return c.regs().sc_regs[21] } -func (c *sigctxt) r22() uint64 { return c.regs().sc_regs[22] } -func (c *sigctxt) r23() uint64 { return c.regs().sc_regs[23] } -func (c *sigctxt) r24() uint64 { return c.regs().sc_regs[24] } -func (c *sigctxt) r25() uint64 { return c.regs().sc_regs[25] } -func (c *sigctxt) r26() uint64 { return c.regs().sc_regs[26] } -func (c *sigctxt) r27() uint64 { return c.regs().sc_regs[27] } -func (c *sigctxt) r28() uint64 { return c.regs().sc_regs[28] } -func (c *sigctxt) r29() uint64 { return c.regs().sc_regs[29] } -func (c *sigctxt) r30() uint64 { return c.regs().sc_regs[30] } -func (c *sigctxt) r31() uint64 { return c.regs().sc_regs[31] } -func (c *sigctxt) sp() uint64 { return c.regs().sc_regs[29] } - -//go:nosplit -//go:nowritebarrierrec -func (c *sigctxt) pc() uint64 { return c.regs().sc_pc } - -func (c *sigctxt) link() uint64 { return c.regs().sc_regs[31] } -func (c *sigctxt) lo() uint64 { return c.regs().mullo } -func (c *sigctxt) hi() uint64 { return c.regs().mulhi } - -func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) } -func (c *sigctxt) sigaddr() uint64 { - return *(*uint64)(add(unsafe.Pointer(c.info), 16)) -} - -func (c *sigctxt) set_r28(x uint64) { c.regs().sc_regs[28] = x } -func (c *sigctxt) set_r30(x uint64) { c.regs().sc_regs[30] = x } -func (c *sigctxt) set_pc(x uint64) { c.regs().sc_pc = x } -func (c *sigctxt) set_sp(x uint64) { c.regs().sc_regs[29] = x } -func (c *sigctxt) set_link(x uint64) { c.regs().sc_regs[31] = x } - -func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) } -func (c *sigctxt) set_sigaddr(x uint64) { - *(*uint64)(add(unsafe.Pointer(c.info), 16)) = x -} diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go index 7d7734433e..07778c8ebe 100644 --- a/src/runtime/signal_windows.go +++ b/src/runtime/signal_windows.go @@ -18,24 +18,24 @@ const ( ) func preventErrorDialogs() { - errormode := stdcall0(_GetErrorMode) - stdcall1(_SetErrorMode, errormode|_SEM_FAILCRITICALERRORS|_SEM_NOGPFAULTERRORBOX|_SEM_NOOPENFILEERRORBOX) + errormode := stdcall(_GetErrorMode) + stdcall(_SetErrorMode, errormode|_SEM_FAILCRITICALERRORS|_SEM_NOGPFAULTERRORBOX|_SEM_NOOPENFILEERRORBOX) // Disable WER fault reporting UI. // Do this even if WER is disabled as a whole, // as WER might be enabled later with setTraceback("wer") // and we still want the fault reporting UI to be disabled if this happens. var werflags uintptr - stdcall2(_WerGetFlags, currentProcess, uintptr(unsafe.Pointer(&werflags))) - stdcall1(_WerSetFlags, werflags|_WER_FAULT_REPORTING_NO_UI) + stdcall(_WerGetFlags, currentProcess, uintptr(unsafe.Pointer(&werflags))) + stdcall(_WerSetFlags, werflags|_WER_FAULT_REPORTING_NO_UI) } // enableWER re-enables Windows error reporting without fault reporting UI. func enableWER() { // re-enable Windows Error Reporting - errormode := stdcall0(_GetErrorMode) + errormode := stdcall(_GetErrorMode) if errormode&_SEM_NOGPFAULTERRORBOX != 0 { - stdcall1(_SetErrorMode, errormode^_SEM_NOGPFAULTERRORBOX) + stdcall(_SetErrorMode, errormode^_SEM_NOGPFAULTERRORBOX) } } @@ -47,14 +47,14 @@ func sehtramp() func sigresume() func initExceptionHandler() { - stdcall2(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp)) + stdcall(_AddVectoredExceptionHandler, 1, abi.FuncPCABI0(exceptiontramp)) if GOARCH == "386" { // use SetUnhandledExceptionFilter for windows-386. // note: SetUnhandledExceptionFilter handler won't be called, if debugging. - stdcall1(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp)) + stdcall(_SetUnhandledExceptionFilter, abi.FuncPCABI0(lastcontinuetramp)) } else { - stdcall2(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp)) - stdcall2(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp)) + stdcall(_AddVectoredContinueHandler, 1, abi.FuncPCABI0(firstcontinuetramp)) + stdcall(_AddVectoredContinueHandler, 0, abi.FuncPCABI0(lastcontinuetramp)) } } @@ -279,11 +279,11 @@ func sehhandler(_ *exceptionrecord, _ uint64, _ *context, dctxt *_DISPATCHER_CON ctxt := dctxt.ctx() var base, sp uintptr for { - entry := stdcall3(_RtlLookupFunctionEntry, ctxt.ip(), uintptr(unsafe.Pointer(&base)), 0) + entry := stdcall(_RtlLookupFunctionEntry, ctxt.ip(), uintptr(unsafe.Pointer(&base)), 0) if entry == 0 { break } - stdcall8(_RtlVirtualUnwind, 0, base, ctxt.ip(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0) + stdcall(_RtlVirtualUnwind, 0, base, ctxt.ip(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0) if sp < gp.stack.lo || gp.stack.hi <= sp { break } @@ -467,7 +467,7 @@ func dieFromException(info *exceptionrecord, r *context) { } } const FAIL_FAST_GENERATE_EXCEPTION_ADDRESS = 0x1 - stdcall3(_RaiseFailFastException, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(r)), FAIL_FAST_GENERATE_EXCEPTION_ADDRESS) + stdcall(_RaiseFailFastException, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(r)), FAIL_FAST_GENERATE_EXCEPTION_ADDRESS) } // gsignalStack is unused on Windows. diff --git a/src/runtime/slice.go b/src/runtime/slice.go index 79d3f6c0de..e31d5dccb2 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -397,5 +397,5 @@ func bytealg_MakeNoZero(len int) []byte { panicmakeslicelen() } cap := roundupsize(uintptr(len), true) - return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len] + return unsafe.Slice((*byte)(mallocgc(cap, nil, false)), cap)[:len] } diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index 866c46a83d..56f2a00d76 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -647,8 +647,15 @@ func moduledataverify1(datap *moduledata) { min := datap.textAddr(datap.ftab[0].entryoff) max := datap.textAddr(datap.ftab[nftab].entryoff) - if datap.minpc != min || datap.maxpc != max { - println("minpc=", hex(datap.minpc), "min=", hex(min), "maxpc=", hex(datap.maxpc), "max=", hex(max)) + minpc := datap.minpc + maxpc := datap.maxpc + if GOARCH == "wasm" { + // On Wasm, the func table contains the function index, whereas + // the "PC" is function index << 16 + block index. + maxpc = alignUp(maxpc, 1<<16) // round up for end PC + } + if minpc != min || maxpc != max { + println("minpc=", hex(minpc), "min=", hex(min), "maxpc=", hex(maxpc), "max=", hex(max)) throw("minpc or maxpc invalid") } @@ -694,6 +701,11 @@ func (md *moduledata) textAddr(off32 uint32) uintptr { throw("runtime: text offset out of range") } } + if GOARCH == "wasm" { + // On Wasm, a text offset (e.g. in the method table) is function index, whereas + // the "PC" is function index << 16 + block index. + res <<= 16 + } return res } @@ -704,8 +716,17 @@ func (md *moduledata) textAddr(off32 uint32) uintptr { // //go:nosplit func (md *moduledata) textOff(pc uintptr) (uint32, bool) { - res := uint32(pc - md.text) + off := pc - md.text + if GOARCH == "wasm" { + // On Wasm, the func table contains the function index, whereas + // the "PC" is function index << 16 + block index. + off >>= 16 + } + res := uint32(off) if len(md.textsectmap) > 1 { + if GOARCH == "wasm" { + fatal("unexpected multiple text sections on Wasm") + } for i, sect := range md.textsectmap { if sect.baseaddr > pc { // pc is not in any section. @@ -904,6 +925,11 @@ func findfunc(pc uintptr) funcInfo { } x := uintptr(pcOff) + datap.text - datap.minpc // TODO: are datap.text and datap.minpc always equal? + if GOARCH == "wasm" { + // On Wasm, pcOff is the function index, whereas + // the "PC" is function index << 16 + block index. + x = uintptr(pcOff)<<16 + datap.text - datap.minpc + } b := x / abi.FuncTabBucketSize i := x % abi.FuncTabBucketSize / (abi.FuncTabBucketSize / nsub) diff --git a/src/runtime/sys_aix_ppc64.s b/src/runtime/sys_aix_ppc64.s index 66081977b1..a0ef7e111a 100644 --- a/src/runtime/sys_aix_ppc64.s +++ b/src/runtime/sys_aix_ppc64.s @@ -130,15 +130,15 @@ TEXT sigtramp<>(SB),NOSPLIT|NOFRAME|TOPFRAME,$0 // Save m->libcall. We need to do this because we // might get interrupted by a signal in runtime·asmcgocall. - MOVD (m_libcall+libcall_fn)(R6), R7 + MOVD (m_mOS+mOS_libcall+libcall_fn)(R6), R7 MOVD R7, 96(R1) - MOVD (m_libcall+libcall_args)(R6), R7 + MOVD (m_mOS+mOS_libcall+libcall_args)(R6), R7 MOVD R7, 104(R1) - MOVD (m_libcall+libcall_n)(R6), R7 + MOVD (m_mOS+mOS_libcall+libcall_n)(R6), R7 MOVD R7, 112(R1) - MOVD (m_libcall+libcall_r1)(R6), R7 + MOVD (m_mOS+mOS_libcall+libcall_r1)(R6), R7 MOVD R7, 120(R1) - MOVD (m_libcall+libcall_r2)(R6), R7 + MOVD (m_mOS+mOS_libcall+libcall_r2)(R6), R7 MOVD R7, 128(R1) // save errno, it might be EINTR; stuff we do here might reset it. @@ -162,15 +162,15 @@ sigtramp: // restore libcall MOVD 96(R1), R7 - MOVD R7, (m_libcall+libcall_fn)(R6) + MOVD R7, (m_mOS+mOS_libcall+libcall_fn)(R6) MOVD 104(R1), R7 - MOVD R7, (m_libcall+libcall_args)(R6) + MOVD R7, (m_mOS+mOS_libcall+libcall_args)(R6) MOVD 112(R1), R7 - MOVD R7, (m_libcall+libcall_n)(R6) + MOVD R7, (m_mOS+mOS_libcall+libcall_n)(R6) MOVD 120(R1), R7 - MOVD R7, (m_libcall+libcall_r1)(R6) + MOVD R7, (m_mOS+mOS_libcall+libcall_r1)(R6) MOVD 128(R1), R7 - MOVD R7, (m_libcall+libcall_r2)(R6) + MOVD R7, (m_mOS+mOS_libcall+libcall_r2)(R6) // restore errno MOVD (m_mOS+mOS_perrno)(R6), R7 diff --git a/src/runtime/sys_libc.go b/src/runtime/sys_libc.go index 72d8991559..214e879319 100644 --- a/src/runtime/sys_libc.go +++ b/src/runtime/sys_libc.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin || (openbsd && !mips64) +//go:build darwin || openbsd package runtime diff --git a/src/runtime/sys_openbsd.go b/src/runtime/sys_openbsd.go index c4b8489612..df503d24c6 100644 --- a/src/runtime/sys_openbsd.go +++ b/src/runtime/sys_openbsd.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build openbsd && !mips64 - package runtime import ( "internal/abi" + "internal/runtime/atomic" "unsafe" ) @@ -61,6 +60,412 @@ func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32 } func pthread_create_trampoline() +//go:nosplit +//go:cgo_unsafe_args +func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32 { + ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(thrsleep_trampoline)), unsafe.Pointer(&ident)) + KeepAlive(tsp) + KeepAlive(abort) + return ret +} +func thrsleep_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func thrwakeup(ident uintptr, n int32) int32 { + return libcCall(unsafe.Pointer(abi.FuncPCABI0(thrwakeup_trampoline)), unsafe.Pointer(&ident)) +} +func thrwakeup_trampoline() + +//go:nosplit +func osyield() { + libcCall(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil)) +} +func sched_yield_trampoline() + +//go:nosplit +func osyield_no_g() { + asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil)) +} + +// This is exported via linkname to assembly in runtime/cgo. +// +//go:linkname exit +//go:nosplit +//go:cgo_unsafe_args +func exit(code int32) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(exit_trampoline)), unsafe.Pointer(&code)) +} +func exit_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func getthrid() (tid int32) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(getthrid_trampoline)), unsafe.Pointer(&tid)) + return +} +func getthrid_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func raiseproc(sig uint32) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(raiseproc_trampoline)), unsafe.Pointer(&sig)) +} +func raiseproc_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func thrkill(tid int32, sig int) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(thrkill_trampoline)), unsafe.Pointer(&tid)) +} +func thrkill_trampoline() + +// mmap is used to do low-level memory allocation via mmap. Don't allow stack +// splits, since this function (used by sysAlloc) is called in a lot of low-level +// parts of the runtime and callers often assume it won't acquire any locks. +// +//go:nosplit +func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) { + args := struct { + addr unsafe.Pointer + n uintptr + prot, flags, fd int32 + off uint32 + ret1 unsafe.Pointer + ret2 int + }{addr, n, prot, flags, fd, off, nil, 0} + libcCall(unsafe.Pointer(abi.FuncPCABI0(mmap_trampoline)), unsafe.Pointer(&args)) + KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address. + return args.ret1, args.ret2 +} +func mmap_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func munmap(addr unsafe.Pointer, n uintptr) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(munmap_trampoline)), unsafe.Pointer(&addr)) + KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address. +} +func munmap_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func madvise(addr unsafe.Pointer, n uintptr, flags int32) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(madvise_trampoline)), unsafe.Pointer(&addr)) + KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address. +} +func madvise_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func open(name *byte, mode, perm int32) (ret int32) { + ret = libcCall(unsafe.Pointer(abi.FuncPCABI0(open_trampoline)), unsafe.Pointer(&name)) + KeepAlive(name) + return +} +func open_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func closefd(fd int32) int32 { + return libcCall(unsafe.Pointer(abi.FuncPCABI0(close_trampoline)), unsafe.Pointer(&fd)) +} +func close_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func read(fd int32, p unsafe.Pointer, n int32) int32 { + ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(read_trampoline)), unsafe.Pointer(&fd)) + KeepAlive(p) + return ret +} +func read_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func write1(fd uintptr, p unsafe.Pointer, n int32) int32 { + ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(write_trampoline)), unsafe.Pointer(&fd)) + KeepAlive(p) + return ret +} +func write_trampoline() + +func pipe2(flags int32) (r, w int32, errno int32) { + var p [2]int32 + args := struct { + p unsafe.Pointer + flags int32 + }{noescape(unsafe.Pointer(&p)), flags} + errno = libcCall(unsafe.Pointer(abi.FuncPCABI0(pipe2_trampoline)), unsafe.Pointer(&args)) + return p[0], p[1], errno +} +func pipe2_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func setitimer(mode int32, new, old *itimerval) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(setitimer_trampoline)), unsafe.Pointer(&mode)) + KeepAlive(new) + KeepAlive(old) +} +func setitimer_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func usleep(usec uint32) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec)) +} +func usleep_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func usleep_no_g(usec uint32) { + asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec)) +} + +//go:nosplit +//go:cgo_unsafe_args +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 { + ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctl_trampoline)), unsafe.Pointer(&mib)) + KeepAlive(mib) + KeepAlive(out) + KeepAlive(size) + KeepAlive(dst) + return ret +} +func sysctl_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func fcntl(fd, cmd, arg int32) (ret int32, errno int32) { + args := struct { + fd, cmd, arg int32 + ret, errno int32 + }{fd, cmd, arg, 0, 0} + libcCall(unsafe.Pointer(abi.FuncPCABI0(fcntl_trampoline)), unsafe.Pointer(&args)) + return args.ret, args.errno +} +func fcntl_trampoline() + +//go:nosplit +func nanotime1() int64 { + var ts timespec + args := struct { + clock_id int32 + tp unsafe.Pointer + }{_CLOCK_MONOTONIC, unsafe.Pointer(&ts)} + if errno := libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args)); errno < 0 { + // Avoid growing the nosplit stack. + systemstack(func() { + println("runtime: errno", -errno) + throw("clock_gettime failed") + }) + } + return ts.tv_sec*1e9 + int64(ts.tv_nsec) +} +func clock_gettime_trampoline() + +//go:nosplit +func walltime() (int64, int32) { + var ts timespec + args := struct { + clock_id int32 + tp unsafe.Pointer + }{_CLOCK_REALTIME, unsafe.Pointer(&ts)} + if errno := libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args)); errno < 0 { + // Avoid growing the nosplit stack. + systemstack(func() { + println("runtime: errno", -errno) + throw("clock_gettime failed") + }) + } + return ts.tv_sec, int32(ts.tv_nsec) +} + +//go:nosplit +//go:cgo_unsafe_args +func kqueue() int32 { + return libcCall(unsafe.Pointer(abi.FuncPCABI0(kqueue_trampoline)), nil) +} +func kqueue_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 { + ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(kevent_trampoline)), unsafe.Pointer(&kq)) + KeepAlive(ch) + KeepAlive(ev) + KeepAlive(ts) + return ret +} +func kevent_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func sigaction(sig uint32, new *sigactiont, old *sigactiont) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaction_trampoline)), unsafe.Pointer(&sig)) + KeepAlive(new) + KeepAlive(old) +} +func sigaction_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func sigprocmask(how uint32, new *sigset, old *sigset) { + // sigprocmask is called from sigsave, which is called from needm. + // As such, we have to be able to run with no g here. + asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sigprocmask_trampoline)), unsafe.Pointer(&how)) + KeepAlive(new) + KeepAlive(old) +} +func sigprocmask_trampoline() + +//go:nosplit +//go:cgo_unsafe_args +func sigaltstack(new *stackt, old *stackt) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaltstack_trampoline)), unsafe.Pointer(&new)) + KeepAlive(new) + KeepAlive(old) +} +func sigaltstack_trampoline() + +// Not used on OpenBSD, but must be defined. +func exitThread(wait *atomic.Uint32) { + throw("exitThread") +} + +//go:nosplit +//go:cgo_unsafe_args +func issetugid() (ret int32) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(issetugid_trampoline)), unsafe.Pointer(&ret)) + return +} +func issetugid_trampoline() + +// The X versions of syscall expect the libc call to return a 64-bit result. +// Otherwise (the non-X version) expects a 32-bit result. +// This distinction is required because an error is indicated by returning -1, +// and we need to know whether to check 32 or 64 bits of the result. +// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.) + +// golang.org/x/sys linknames syscall_syscall +// (in addition to standard package syscall). +// Do not remove or change the type signature. +// +//go:linkname syscall_syscall syscall.syscall +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + entersyscall() + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscall() + +//go:linkname syscall_syscallX syscall.syscallX +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + entersyscall() + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscallX() + +// golang.org/x/sys linknames syscall.syscall6 +// (in addition to standard package syscall). +// Do not remove or change the type signature. +// +//go:linkname syscall_syscall6 syscall.syscall6 +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + entersyscall() + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscall6() + +//go:linkname syscall_syscall6X syscall.syscall6X +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + entersyscall() + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscall6X() + +// golang.org/x/sys linknames syscall.syscall10 +// (in addition to standard package syscall). +// Do not remove or change the type signature. +// +//go:linkname syscall_syscall10 syscall.syscall10 +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) { + entersyscall() + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscall10() + +//go:linkname syscall_syscall10X syscall.syscall10X +//go:nosplit +//go:cgo_unsafe_args +func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) { + entersyscall() + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn)) + exitsyscall() + return +} +func syscall10X() + +// golang.org/x/sys linknames syscall_rawSyscall +// (in addition to standard package syscall). +// Do not remove or change the type signature. +// +//go:linkname syscall_rawSyscall syscall.rawSyscall +//go:nosplit +//go:cgo_unsafe_args +func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn)) + return +} + +// golang.org/x/sys linknames syscall_rawSyscall6 +// (in addition to standard package syscall). +// Do not remove or change the type signature. +// +//go:linkname syscall_rawSyscall6 syscall.rawSyscall6 +//go:nosplit +//go:cgo_unsafe_args +func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn)) + return +} + +//go:linkname syscall_rawSyscall6X syscall.rawSyscall6X +//go:nosplit +//go:cgo_unsafe_args +func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn)) + return +} + +//go:linkname syscall_rawSyscall10X syscall.rawSyscall10X +//go:nosplit +//go:cgo_unsafe_args +func syscall_rawSyscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) { + libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn)) + return +} + // Tell the linker that the libc_* functions are to be found // in a system library, with the libc_ prefix missing. @@ -71,5 +476,40 @@ func pthread_create_trampoline() //go:cgo_import_dynamic libc_pthread_create pthread_create "libpthread.so" //go:cgo_import_dynamic libc_pthread_sigmask pthread_sigmask "libpthread.so" +//go:cgo_import_dynamic libc_thrsleep __thrsleep "libc.so" +//go:cgo_import_dynamic libc_thrwakeup __thrwakeup "libc.so" +//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so" + +//go:cgo_import_dynamic libc_errno __errno "libc.so" +//go:cgo_import_dynamic libc_exit exit "libc.so" +//go:cgo_import_dynamic libc_getthrid getthrid "libc.so" +//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so" +//go:cgo_import_dynamic libc_thrkill thrkill "libc.so" + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" +//go:cgo_import_dynamic libc_munmap munmap "libc.so" +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + +//go:cgo_import_dynamic libc_open open "libc.so" +//go:cgo_import_dynamic libc_close close "libc.so" +//go:cgo_import_dynamic libc_read read "libc.so" +//go:cgo_import_dynamic libc_write write "libc.so" +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" +//go:cgo_import_dynamic libc_setitimer setitimer "libc.so" +//go:cgo_import_dynamic libc_usleep usleep "libc.so" +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" +//go:cgo_import_dynamic libc_getpid getpid "libc.so" +//go:cgo_import_dynamic libc_kill kill "libc.so" +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + +//go:cgo_import_dynamic libc_sigaction sigaction "libc.so" +//go:cgo_import_dynamic libc_sigaltstack sigaltstack "libc.so" + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + //go:cgo_import_dynamic _ _ "libpthread.so" //go:cgo_import_dynamic _ _ "libc.so" diff --git a/src/runtime/sys_openbsd1.go b/src/runtime/sys_openbsd1.go deleted file mode 100644 index d852e3c58a..0000000000 --- a/src/runtime/sys_openbsd1.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build openbsd && !mips64 - -package runtime - -import ( - "internal/abi" - "unsafe" -) - -//go:nosplit -//go:cgo_unsafe_args -func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32 { - ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(thrsleep_trampoline)), unsafe.Pointer(&ident)) - KeepAlive(tsp) - KeepAlive(abort) - return ret -} -func thrsleep_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func thrwakeup(ident uintptr, n int32) int32 { - return libcCall(unsafe.Pointer(abi.FuncPCABI0(thrwakeup_trampoline)), unsafe.Pointer(&ident)) -} -func thrwakeup_trampoline() - -//go:nosplit -func osyield() { - libcCall(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil)) -} -func sched_yield_trampoline() - -//go:nosplit -func osyield_no_g() { - asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sched_yield_trampoline)), unsafe.Pointer(nil)) -} - -//go:cgo_import_dynamic libc_thrsleep __thrsleep "libc.so" -//go:cgo_import_dynamic libc_thrwakeup __thrwakeup "libc.so" -//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so" - -//go:cgo_import_dynamic _ _ "libc.so" diff --git a/src/runtime/sys_openbsd2.go b/src/runtime/sys_openbsd2.go deleted file mode 100644 index 8f5242018d..0000000000 --- a/src/runtime/sys_openbsd2.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build openbsd && !mips64 - -package runtime - -import ( - "internal/abi" - "internal/runtime/atomic" - "unsafe" -) - -// This is exported via linkname to assembly in runtime/cgo. -// -//go:linkname exit -//go:nosplit -//go:cgo_unsafe_args -func exit(code int32) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(exit_trampoline)), unsafe.Pointer(&code)) -} -func exit_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func getthrid() (tid int32) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(getthrid_trampoline)), unsafe.Pointer(&tid)) - return -} -func getthrid_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func raiseproc(sig uint32) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(raiseproc_trampoline)), unsafe.Pointer(&sig)) -} -func raiseproc_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func thrkill(tid int32, sig int) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(thrkill_trampoline)), unsafe.Pointer(&tid)) -} -func thrkill_trampoline() - -// mmap is used to do low-level memory allocation via mmap. Don't allow stack -// splits, since this function (used by sysAlloc) is called in a lot of low-level -// parts of the runtime and callers often assume it won't acquire any locks. -// -//go:nosplit -func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) { - args := struct { - addr unsafe.Pointer - n uintptr - prot, flags, fd int32 - off uint32 - ret1 unsafe.Pointer - ret2 int - }{addr, n, prot, flags, fd, off, nil, 0} - libcCall(unsafe.Pointer(abi.FuncPCABI0(mmap_trampoline)), unsafe.Pointer(&args)) - KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address. - return args.ret1, args.ret2 -} -func mmap_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func munmap(addr unsafe.Pointer, n uintptr) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(munmap_trampoline)), unsafe.Pointer(&addr)) - KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address. -} -func munmap_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func madvise(addr unsafe.Pointer, n uintptr, flags int32) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(madvise_trampoline)), unsafe.Pointer(&addr)) - KeepAlive(addr) // Just for consistency. Hopefully addr is not a Go address. -} -func madvise_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func open(name *byte, mode, perm int32) (ret int32) { - ret = libcCall(unsafe.Pointer(abi.FuncPCABI0(open_trampoline)), unsafe.Pointer(&name)) - KeepAlive(name) - return -} -func open_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func closefd(fd int32) int32 { - return libcCall(unsafe.Pointer(abi.FuncPCABI0(close_trampoline)), unsafe.Pointer(&fd)) -} -func close_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func read(fd int32, p unsafe.Pointer, n int32) int32 { - ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(read_trampoline)), unsafe.Pointer(&fd)) - KeepAlive(p) - return ret -} -func read_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func write1(fd uintptr, p unsafe.Pointer, n int32) int32 { - ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(write_trampoline)), unsafe.Pointer(&fd)) - KeepAlive(p) - return ret -} -func write_trampoline() - -func pipe2(flags int32) (r, w int32, errno int32) { - var p [2]int32 - args := struct { - p unsafe.Pointer - flags int32 - }{noescape(unsafe.Pointer(&p)), flags} - errno = libcCall(unsafe.Pointer(abi.FuncPCABI0(pipe2_trampoline)), unsafe.Pointer(&args)) - return p[0], p[1], errno -} -func pipe2_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func setitimer(mode int32, new, old *itimerval) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(setitimer_trampoline)), unsafe.Pointer(&mode)) - KeepAlive(new) - KeepAlive(old) -} -func setitimer_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func usleep(usec uint32) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec)) -} -func usleep_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func usleep_no_g(usec uint32) { - asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(usleep_trampoline)), unsafe.Pointer(&usec)) -} - -//go:nosplit -//go:cgo_unsafe_args -func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 { - ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(sysctl_trampoline)), unsafe.Pointer(&mib)) - KeepAlive(mib) - KeepAlive(out) - KeepAlive(size) - KeepAlive(dst) - return ret -} -func sysctl_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func fcntl(fd, cmd, arg int32) (ret int32, errno int32) { - args := struct { - fd, cmd, arg int32 - ret, errno int32 - }{fd, cmd, arg, 0, 0} - libcCall(unsafe.Pointer(abi.FuncPCABI0(fcntl_trampoline)), unsafe.Pointer(&args)) - return args.ret, args.errno -} -func fcntl_trampoline() - -//go:nosplit -func nanotime1() int64 { - var ts timespec - args := struct { - clock_id int32 - tp unsafe.Pointer - }{_CLOCK_MONOTONIC, unsafe.Pointer(&ts)} - if errno := libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args)); errno < 0 { - // Avoid growing the nosplit stack. - systemstack(func() { - println("runtime: errno", -errno) - throw("clock_gettime failed") - }) - } - return ts.tv_sec*1e9 + int64(ts.tv_nsec) -} -func clock_gettime_trampoline() - -//go:nosplit -func walltime() (int64, int32) { - var ts timespec - args := struct { - clock_id int32 - tp unsafe.Pointer - }{_CLOCK_REALTIME, unsafe.Pointer(&ts)} - if errno := libcCall(unsafe.Pointer(abi.FuncPCABI0(clock_gettime_trampoline)), unsafe.Pointer(&args)); errno < 0 { - // Avoid growing the nosplit stack. - systemstack(func() { - println("runtime: errno", -errno) - throw("clock_gettime failed") - }) - } - return ts.tv_sec, int32(ts.tv_nsec) -} - -//go:nosplit -//go:cgo_unsafe_args -func kqueue() int32 { - return libcCall(unsafe.Pointer(abi.FuncPCABI0(kqueue_trampoline)), nil) -} -func kqueue_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 { - ret := libcCall(unsafe.Pointer(abi.FuncPCABI0(kevent_trampoline)), unsafe.Pointer(&kq)) - KeepAlive(ch) - KeepAlive(ev) - KeepAlive(ts) - return ret -} -func kevent_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func sigaction(sig uint32, new *sigactiont, old *sigactiont) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaction_trampoline)), unsafe.Pointer(&sig)) - KeepAlive(new) - KeepAlive(old) -} -func sigaction_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func sigprocmask(how uint32, new *sigset, old *sigset) { - // sigprocmask is called from sigsave, which is called from needm. - // As such, we have to be able to run with no g here. - asmcgocall_no_g(unsafe.Pointer(abi.FuncPCABI0(sigprocmask_trampoline)), unsafe.Pointer(&how)) - KeepAlive(new) - KeepAlive(old) -} -func sigprocmask_trampoline() - -//go:nosplit -//go:cgo_unsafe_args -func sigaltstack(new *stackt, old *stackt) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(sigaltstack_trampoline)), unsafe.Pointer(&new)) - KeepAlive(new) - KeepAlive(old) -} -func sigaltstack_trampoline() - -// Not used on OpenBSD, but must be defined. -func exitThread(wait *atomic.Uint32) { - throw("exitThread") -} - -//go:nosplit -//go:cgo_unsafe_args -func issetugid() (ret int32) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(issetugid_trampoline)), unsafe.Pointer(&ret)) - return -} -func issetugid_trampoline() - -// Tell the linker that the libc_* functions are to be found -// in a system library, with the libc_ prefix missing. - -//go:cgo_import_dynamic libc_errno __errno "libc.so" -//go:cgo_import_dynamic libc_exit exit "libc.so" -//go:cgo_import_dynamic libc_getthrid getthrid "libc.so" -//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so" -//go:cgo_import_dynamic libc_thrkill thrkill "libc.so" - -//go:cgo_import_dynamic libc_mmap mmap "libc.so" -//go:cgo_import_dynamic libc_munmap munmap "libc.so" -//go:cgo_import_dynamic libc_madvise madvise "libc.so" - -//go:cgo_import_dynamic libc_open open "libc.so" -//go:cgo_import_dynamic libc_close close "libc.so" -//go:cgo_import_dynamic libc_read read "libc.so" -//go:cgo_import_dynamic libc_write write "libc.so" -//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" - -//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" -//go:cgo_import_dynamic libc_setitimer setitimer "libc.so" -//go:cgo_import_dynamic libc_usleep usleep "libc.so" -//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" -//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" -//go:cgo_import_dynamic libc_getpid getpid "libc.so" -//go:cgo_import_dynamic libc_kill kill "libc.so" -//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" -//go:cgo_import_dynamic libc_kevent kevent "libc.so" - -//go:cgo_import_dynamic libc_sigaction sigaction "libc.so" -//go:cgo_import_dynamic libc_sigaltstack sigaltstack "libc.so" - -//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" - -//go:cgo_import_dynamic _ _ "libc.so" diff --git a/src/runtime/sys_openbsd3.go b/src/runtime/sys_openbsd3.go deleted file mode 100644 index de09ec5e25..0000000000 --- a/src/runtime/sys_openbsd3.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build openbsd && !mips64 - -package runtime - -import ( - "internal/abi" - "unsafe" -) - -// The X versions of syscall expect the libc call to return a 64-bit result. -// Otherwise (the non-X version) expects a 32-bit result. -// This distinction is required because an error is indicated by returning -1, -// and we need to know whether to check 32 or 64 bits of the result. -// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.) - -// golang.org/x/sys linknames syscall_syscall -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_syscall syscall.syscall -//go:nosplit -//go:cgo_unsafe_args -func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - entersyscall() - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn)) - exitsyscall() - return -} -func syscall() - -//go:linkname syscall_syscallX syscall.syscallX -//go:nosplit -//go:cgo_unsafe_args -func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - entersyscall() - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn)) - exitsyscall() - return -} -func syscallX() - -// golang.org/x/sys linknames syscall.syscall6 -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_syscall6 syscall.syscall6 -//go:nosplit -//go:cgo_unsafe_args -func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - entersyscall() - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn)) - exitsyscall() - return -} -func syscall6() - -//go:linkname syscall_syscall6X syscall.syscall6X -//go:nosplit -//go:cgo_unsafe_args -func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - entersyscall() - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn)) - exitsyscall() - return -} -func syscall6X() - -// golang.org/x/sys linknames syscall.syscall10 -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_syscall10 syscall.syscall10 -//go:nosplit -//go:cgo_unsafe_args -func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) { - entersyscall() - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10)), unsafe.Pointer(&fn)) - exitsyscall() - return -} -func syscall10() - -//go:linkname syscall_syscall10X syscall.syscall10X -//go:nosplit -//go:cgo_unsafe_args -func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) { - entersyscall() - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn)) - exitsyscall() - return -} -func syscall10X() - -// golang.org/x/sys linknames syscall_rawSyscall -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_rawSyscall syscall.rawSyscall -//go:nosplit -//go:cgo_unsafe_args -func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn)) - return -} - -// golang.org/x/sys linknames syscall_rawSyscall6 -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_rawSyscall6 syscall.rawSyscall6 -//go:nosplit -//go:cgo_unsafe_args -func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn)) - return -} - -//go:linkname syscall_rawSyscall6X syscall.rawSyscall6X -//go:nosplit -//go:cgo_unsafe_args -func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn)) - return -} - -//go:linkname syscall_rawSyscall10X syscall.rawSyscall10X -//go:nosplit -//go:cgo_unsafe_args -func syscall_rawSyscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) { - libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall10X)), unsafe.Pointer(&fn)) - return -} diff --git a/src/runtime/sys_openbsd_mips64.s b/src/runtime/sys_openbsd_mips64.s deleted file mode 100644 index 7ac0db0480..0000000000 --- a/src/runtime/sys_openbsd_mips64.s +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// -// System calls and other sys.stuff for mips64, OpenBSD -// /usr/src/sys/kern/syscalls.master for syscall numbers. -// - -#include "go_asm.h" -#include "go_tls.h" -#include "textflag.h" - -#define CLOCK_REALTIME $0 -#define CLOCK_MONOTONIC $3 - -// Exit the entire program (like C exit) -TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0 - MOVW code+0(FP), R4 // arg 1 - status - MOVV $1, R2 // sys_exit - SYSCALL - BEQ R7, 3(PC) - MOVV $0, R2 // crash on syscall failure - MOVV R2, (R2) - RET - -// func exitThread(wait *atomic.Uint32) -TEXT runtime·exitThread(SB),NOSPLIT,$0 - MOVV wait+0(FP), R4 // arg 1 - notdead - MOVV $302, R2 // sys___threxit - SYSCALL - MOVV $0, R2 // crash on syscall failure - MOVV R2, (R2) - JMP 0(PC) - -TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0 - MOVV name+0(FP), R4 // arg 1 - path - MOVW mode+8(FP), R5 // arg 2 - mode - MOVW perm+12(FP), R6 // arg 3 - perm - MOVV $5, R2 // sys_open - SYSCALL - BEQ R7, 2(PC) - MOVW $-1, R2 - MOVW R2, ret+16(FP) - RET - -TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0 - MOVW fd+0(FP), R4 // arg 1 - fd - MOVV $6, R2 // sys_close - SYSCALL - BEQ R7, 2(PC) - MOVW $-1, R2 - MOVW R2, ret+8(FP) - RET - -TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0 - MOVW fd+0(FP), R4 // arg 1 - fd - MOVV p+8(FP), R5 // arg 2 - buf - MOVW n+16(FP), R6 // arg 3 - nbyte - MOVV $3, R2 // sys_read - SYSCALL - BEQ R7, 2(PC) - SUBVU R2, R0, R2 // caller expects negative errno - MOVW R2, ret+24(FP) - RET - -// func pipe2(flags int32) (r, w int32, errno int32) -TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20 - MOVV $r+8(FP), R4 - MOVW flags+0(FP), R5 - MOVV $101, R2 // sys_pipe2 - SYSCALL - BEQ R7, 2(PC) - SUBVU R2, R0, R2 // caller expects negative errno - MOVW R2, errno+16(FP) - RET - -TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0 - MOVV fd+0(FP), R4 // arg 1 - fd - MOVV p+8(FP), R5 // arg 2 - buf - MOVW n+16(FP), R6 // arg 3 - nbyte - MOVV $4, R2 // sys_write - SYSCALL - BEQ R7, 2(PC) - SUBVU R2, R0, R2 // caller expects negative errno - MOVW R2, ret+24(FP) - RET - -TEXT runtime·usleep(SB),NOSPLIT,$24-4 - MOVWU usec+0(FP), R3 - MOVV R3, R5 - MOVW $1000000, R4 - DIVVU R4, R3 - MOVV LO, R3 - MOVV R3, 8(R29) // tv_sec - MOVW $1000, R4 - MULVU R3, R4 - MOVV LO, R4 - SUBVU R4, R5 - MOVV R5, 16(R29) // tv_nsec - - ADDV $8, R29, R4 // arg 1 - rqtp - MOVV $0, R5 // arg 2 - rmtp - MOVV $91, R2 // sys_nanosleep - SYSCALL - RET - -TEXT runtime·getthrid(SB),NOSPLIT,$0-4 - MOVV $299, R2 // sys_getthrid - SYSCALL - MOVW R2, ret+0(FP) - RET - -TEXT runtime·thrkill(SB),NOSPLIT,$0-16 - MOVW tid+0(FP), R4 // arg 1 - tid - MOVV sig+8(FP), R5 // arg 2 - signum - MOVW $0, R6 // arg 3 - tcb - MOVV $119, R2 // sys_thrkill - SYSCALL - RET - -TEXT runtime·raiseproc(SB),NOSPLIT,$0 - MOVV $20, R4 // sys_getpid - SYSCALL - MOVV R2, R4 // arg 1 - pid - MOVW sig+0(FP), R5 // arg 2 - signum - MOVV $122, R2 // sys_kill - SYSCALL - RET - -TEXT runtime·mmap(SB),NOSPLIT,$0 - MOVV addr+0(FP), R4 // arg 1 - addr - MOVV n+8(FP), R5 // arg 2 - len - MOVW prot+16(FP), R6 // arg 3 - prot - MOVW flags+20(FP), R7 // arg 4 - flags - MOVW fd+24(FP), R8 // arg 5 - fd - MOVW $0, R9 // arg 6 - pad - MOVW off+28(FP), R10 // arg 7 - offset - MOVV $197, R2 // sys_mmap - SYSCALL - MOVV $0, R4 - BEQ R7, 3(PC) - MOVV R2, R4 // if error, move to R4 - MOVV $0, R2 - MOVV R2, p+32(FP) - MOVV R4, err+40(FP) - RET - -TEXT runtime·munmap(SB),NOSPLIT,$0 - MOVV addr+0(FP), R4 // arg 1 - addr - MOVV n+8(FP), R5 // arg 2 - len - MOVV $73, R2 // sys_munmap - SYSCALL - BEQ R7, 3(PC) - MOVV $0, R2 // crash on syscall failure - MOVV R2, (R2) - RET - -TEXT runtime·madvise(SB),NOSPLIT,$0 - MOVV addr+0(FP), R4 // arg 1 - addr - MOVV n+8(FP), R5 // arg 2 - len - MOVW flags+16(FP), R6 // arg 2 - flags - MOVV $75, R2 // sys_madvise - SYSCALL - BEQ R7, 2(PC) - MOVW $-1, R2 - MOVW R2, ret+24(FP) - RET - -TEXT runtime·setitimer(SB),NOSPLIT,$0 - MOVW mode+0(FP), R4 // arg 1 - mode - MOVV new+8(FP), R5 // arg 2 - new value - MOVV old+16(FP), R6 // arg 3 - old value - MOVV $69, R2 // sys_setitimer - SYSCALL - RET - -// func walltime() (sec int64, nsec int32) -TEXT runtime·walltime(SB), NOSPLIT, $32 - MOVW CLOCK_REALTIME, R4 // arg 1 - clock_id - MOVV $8(R29), R5 // arg 2 - tp - MOVV $87, R2 // sys_clock_gettime - SYSCALL - - MOVV 8(R29), R4 // sec - MOVV 16(R29), R5 // nsec - MOVV R4, sec+0(FP) - MOVW R5, nsec+8(FP) - - RET - -// int64 nanotime1(void) so really -// void nanotime1(int64 *nsec) -TEXT runtime·nanotime1(SB),NOSPLIT,$32 - MOVW CLOCK_MONOTONIC, R4 // arg 1 - clock_id - MOVV $8(R29), R5 // arg 2 - tp - MOVV $87, R2 // sys_clock_gettime - SYSCALL - - MOVV 8(R29), R3 // sec - MOVV 16(R29), R5 // nsec - - MOVV $1000000000, R4 - MULVU R4, R3 - MOVV LO, R3 - ADDVU R5, R3 - MOVV R3, ret+0(FP) - RET - -TEXT runtime·sigaction(SB),NOSPLIT,$0 - MOVW sig+0(FP), R4 // arg 1 - signum - MOVV new+8(FP), R5 // arg 2 - new sigaction - MOVV old+16(FP), R6 // arg 3 - old sigaction - MOVV $46, R2 // sys_sigaction - SYSCALL - BEQ R7, 3(PC) - MOVV $3, R2 // crash on syscall failure - MOVV R2, (R2) - RET - -TEXT runtime·obsdsigprocmask(SB),NOSPLIT,$0 - MOVW how+0(FP), R4 // arg 1 - mode - MOVW new+4(FP), R5 // arg 2 - new - MOVV $48, R2 // sys_sigprocmask - SYSCALL - BEQ R7, 3(PC) - MOVV $3, R2 // crash on syscall failure - MOVV R2, (R2) - MOVW R2, ret+8(FP) - RET - -TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 - MOVW sig+8(FP), R4 - MOVV info+16(FP), R5 - MOVV ctx+24(FP), R6 - MOVV fn+0(FP), R25 // Must use R25, needed for PIC code. - CALL (R25) - RET - -TEXT runtime·sigtramp(SB),NOSPLIT|TOPFRAME,$192 - // initialize REGSB = PC&0xffffffff00000000 - BGEZAL R0, 1(PC) - SRLV $32, R31, RSB - SLLV $32, RSB - - // this might be called in external code context, - // where g is not set. - MOVB runtime·iscgo(SB), R1 - BEQ R1, 2(PC) - JAL runtime·load_g(SB) - - MOVW R4, 8(R29) - MOVV R5, 16(R29) - MOVV R6, 24(R29) - MOVV $runtime·sigtrampgo(SB), R1 - JAL (R1) - RET - -// int32 tfork(void *param, uintptr psize, M *mp, G *gp, void (*fn)(void)); -TEXT runtime·tfork(SB),NOSPLIT,$0 - - // Copy mp, gp and fn off parent stack for use by child. - MOVV mm+16(FP), R16 - MOVV gg+24(FP), R17 - MOVV fn+32(FP), R18 - - MOVV param+0(FP), R4 // arg 1 - param - MOVV psize+8(FP), R5 // arg 2 - psize - MOVV $8, R2 // sys___tfork - SYSCALL - - // Return if syscall failed. - BEQ R7, 4(PC) - SUBVU R2, R0, R2 // caller expects negative errno - MOVW R2, ret+40(FP) - RET - - // In parent, return. - BEQ R2, 3(PC) - MOVW $0, ret+40(FP) - RET - - // Initialise m, g. - MOVV R17, g - MOVV R16, g_m(g) - - // Call fn. - CALL (R18) - - // fn should never return. - MOVV $2, R8 // crash if reached - MOVV R8, (R8) - RET - -TEXT runtime·sigaltstack(SB),NOSPLIT,$0 - MOVV new+0(FP), R4 // arg 1 - new sigaltstack - MOVV old+8(FP), R5 // arg 2 - old sigaltstack - MOVV $288, R2 // sys_sigaltstack - SYSCALL - BEQ R7, 3(PC) - MOVV $0, R8 // crash on syscall failure - MOVV R8, (R8) - RET - -TEXT runtime·osyield(SB),NOSPLIT,$0 - MOVV $298, R2 // sys_sched_yield - SYSCALL - RET - -TEXT runtime·thrsleep(SB),NOSPLIT,$0 - MOVV ident+0(FP), R4 // arg 1 - ident - MOVW clock_id+8(FP), R5 // arg 2 - clock_id - MOVV tsp+16(FP), R6 // arg 3 - tsp - MOVV lock+24(FP), R7 // arg 4 - lock - MOVV abort+32(FP), R8 // arg 5 - abort - MOVV $94, R2 // sys___thrsleep - SYSCALL - MOVW R2, ret+40(FP) - RET - -TEXT runtime·thrwakeup(SB),NOSPLIT,$0 - MOVV ident+0(FP), R4 // arg 1 - ident - MOVW n+8(FP), R5 // arg 2 - n - MOVV $301, R2 // sys___thrwakeup - SYSCALL - MOVW R2, ret+16(FP) - RET - -TEXT runtime·sysctl(SB),NOSPLIT,$0 - MOVV mib+0(FP), R4 // arg 1 - mib - MOVW miblen+8(FP), R5 // arg 2 - miblen - MOVV out+16(FP), R6 // arg 3 - out - MOVV size+24(FP), R7 // arg 4 - size - MOVV dst+32(FP), R8 // arg 5 - dest - MOVV ndst+40(FP), R9 // arg 6 - newlen - MOVV $202, R2 // sys___sysctl - SYSCALL - BEQ R7, 2(PC) - SUBVU R2, R0, R2 // caller expects negative errno - MOVW R2, ret+48(FP) - RET - -// int32 runtime·kqueue(void); -TEXT runtime·kqueue(SB),NOSPLIT,$0 - MOVV $269, R2 // sys_kqueue - SYSCALL - BEQ R7, 2(PC) - SUBVU R2, R0, R2 // caller expects negative errno - MOVW R2, ret+0(FP) - RET - -// int32 runtime·kevent(int kq, Kevent *changelist, int nchanges, Kevent *eventlist, int nevents, Timespec *timeout); -TEXT runtime·kevent(SB),NOSPLIT,$0 - MOVW kq+0(FP), R4 // arg 1 - kq - MOVV ch+8(FP), R5 // arg 2 - changelist - MOVW nch+16(FP), R6 // arg 3 - nchanges - MOVV ev+24(FP), R7 // arg 4 - eventlist - MOVW nev+32(FP), R8 // arg 5 - nevents - MOVV ts+40(FP), R9 // arg 6 - timeout - MOVV $72, R2 // sys_kevent - SYSCALL - BEQ R7, 2(PC) - SUBVU R2, R0, R2 // caller expects negative errno - MOVW R2, ret+48(FP) - RET - -// func fcntl(fd, cmd, arg int32) (int32, int32) -TEXT runtime·fcntl(SB),NOSPLIT,$0 - MOVW fd+0(FP), R4 // fd - MOVW cmd+4(FP), R5 // cmd - MOVW arg+8(FP), R6 // arg - MOVV $92, R2 // sys_fcntl - SYSCALL - MOVV $0, R4 - BEQ R7, noerr - MOVV R2, R4 - MOVW $-1, R2 -noerr: - MOVW R2, ret+16(FP) - MOVW R4, errno+20(FP) - RET - -// func issetugid() int32 -TEXT runtime·issetugid(SB),NOSPLIT,$0 - MOVV $253, R2 // sys_issetugid - SYSCALL - MOVW R2, ret+0(FP) - RET diff --git a/src/runtime/sys_solaris_amd64.s b/src/runtime/sys_solaris_amd64.s index 7a80020ba3..9235cad391 100644 --- a/src/runtime/sys_solaris_amd64.s +++ b/src/runtime/sys_solaris_amd64.s @@ -155,7 +155,7 @@ allgood: // save m->libcall MOVQ g_m(R10), BP - LEAQ m_libcall(BP), R11 + LEAQ (m_mOS+mOS_libcall)(BP), R11 MOVQ libcall_fn(R11), R10 MOVQ R10, 72(SP) MOVQ libcall_args(R11), R10 @@ -197,7 +197,7 @@ allgood: MOVQ g(BX), BP MOVQ g_m(BP), BP // restore libcall - LEAQ m_libcall(BP), R11 + LEAQ (m_mOS+mOS_libcall)(BP), R11 MOVQ 72(SP), R10 MOVQ R10, libcall_fn(R11) MOVQ 80(SP), R10 diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s index e71fda78ae..4030e4c38b 100644 --- a/src/runtime/sys_windows_386.s +++ b/src/runtime/sys_windows_386.s @@ -11,49 +11,6 @@ #define TEB_TlsSlots 0xE10 #define TEB_ArbitraryPtr 0x14 -TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0 - JMP runtime·asmstdcall(SB) - -// void runtime·asmstdcall(void *c); -TEXT runtime·asmstdcall(SB),NOSPLIT,$0 - MOVL fn+0(FP), BX - MOVL SP, BP // save stack pointer - - // SetLastError(0). - MOVL $0, 0x34(FS) - - MOVL libcall_n(BX), CX - - // Fast version, do not store args on the stack. - CMPL CX, $0 - JE docall - - // Copy args to the stack. - MOVL CX, AX - SALL $2, AX - SUBL AX, SP // room for args - MOVL SP, DI - MOVL libcall_args(BX), SI - CLD - REP; MOVSL - -docall: - // Call stdcall or cdecl function. - // DI SI BP BX are preserved, SP is not - CALL libcall_fn(BX) - MOVL BP, SP - - // Return result. - MOVL fn+0(FP), BX - MOVL AX, libcall_r1(BX) - MOVL DX, libcall_r2(BX) - - // GetLastError(). - MOVL 0x34(FS), AX - MOVL AX, libcall_err(BX) - - RET - // faster get/set last error TEXT runtime·getlasterror(SB),NOSPLIT,$0 MOVL 0x34(FS), AX diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s index 56a2dc0bcf..e438599910 100644 --- a/src/runtime/sys_windows_amd64.s +++ b/src/runtime/sys_windows_amd64.s @@ -12,85 +12,6 @@ #define TEB_TlsSlots 0x1480 #define TEB_ArbitraryPtr 0x28 -TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0 - MOVQ AX, CX - JMP runtime·asmstdcall(SB) - -// void runtime·asmstdcall(void *c); -TEXT runtime·asmstdcall(SB),NOSPLIT,$16 - MOVQ SP, AX - ANDQ $~15, SP // alignment as per Windows requirement - MOVQ AX, 8(SP) - MOVQ CX, 0(SP) // asmcgocall will put first argument into CX. - - MOVQ libcall_fn(CX), AX - MOVQ libcall_args(CX), SI - MOVQ libcall_n(CX), CX - - // SetLastError(0). - MOVQ 0x30(GS), DI - MOVL $0, 0x68(DI) - - SUBQ $(const_maxArgs*8), SP // room for args - - // Fast version, do not store args on the stack. - CMPL CX, $0; JE _0args - CMPL CX, $1; JE _1args - CMPL CX, $2; JE _2args - CMPL CX, $3; JE _3args - CMPL CX, $4; JE _4args - - // Check we have enough room for args. - CMPL CX, $const_maxArgs - JLE 2(PC) - INT $3 // not enough room -> crash - - // Copy args to the stack. - MOVQ SP, DI - CLD - REP; MOVSQ - MOVQ SP, SI - - // Load first 4 args into correspondent registers. - // Floating point arguments are passed in the XMM - // registers. Set them here in case any of the arguments - // are floating point values. For details see - // https://learn.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-170 -_4args: - MOVQ 24(SI), R9 - MOVQ R9, X3 -_3args: - MOVQ 16(SI), R8 - MOVQ R8, X2 -_2args: - MOVQ 8(SI), DX - MOVQ DX, X1 -_1args: - MOVQ 0(SI), CX - MOVQ CX, X0 -_0args: - - // Call stdcall function. - CALL AX - - ADDQ $(const_maxArgs*8), SP - - // Return result. - MOVQ 0(SP), CX - MOVQ 8(SP), SP - MOVQ AX, libcall_r1(CX) - // Floating point return values are returned in XMM0. Setting r2 to this - // value in case this call returned a floating point value. For details, - // see https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention - MOVQ X0, libcall_r2(CX) - - // GetLastError(). - MOVQ 0x30(GS), DI - MOVL 0x68(DI), AX - MOVQ AX, libcall_err(CX) - - RET - // faster get/set last error TEXT runtime·getlasterror(SB),NOSPLIT,$0 MOVQ 0x30(GS), AX diff --git a/src/runtime/sys_windows_arm.s b/src/runtime/sys_windows_arm.s index 99f33cf07d..c7f2369e57 100644 --- a/src/runtime/sys_windows_arm.s +++ b/src/runtime/sys_windows_arm.s @@ -9,76 +9,6 @@ // Note: For system ABI, R0-R3 are args, R4-R11 are callee-save. -TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0 - B runtime·asmstdcall(SB) - -// void runtime·asmstdcall(void *c); -TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0 - MOVM.DB.W [R4, R5, R14], (R13) // push {r4, r5, lr} - MOVW R0, R4 // put libcall * in r4 - MOVW R13, R5 // save stack pointer in r5 - - // SetLastError(0) - MOVW $0, R0 - MRC 15, 0, R1, C13, C0, 2 - MOVW R0, 0x34(R1) - - MOVW 8(R4), R12 // libcall->args - - // Do we have more than 4 arguments? - MOVW 4(R4), R0 // libcall->n - SUB.S $4, R0, R2 - BLE loadregs - - // Reserve stack space for remaining args - SUB R2<<2, R13 - BIC $0x7, R13 // alignment for ABI - - // R0: count of arguments - // R1: - // R2: loop counter, from 0 to (n-4) - // R3: scratch - // R4: pointer to libcall struct - // R12: libcall->args - MOVW $0, R2 -stackargs: - ADD $4, R2, R3 // r3 = args[4 + i] - MOVW R3<<2(R12), R3 - MOVW R3, R2<<2(R13) // stack[i] = r3 - - ADD $1, R2 // i++ - SUB $4, R0, R3 // while (i < (n - 4)) - CMP R3, R2 - BLT stackargs - -loadregs: - CMP $3, R0 - MOVW.GT 12(R12), R3 - - CMP $2, R0 - MOVW.GT 8(R12), R2 - - CMP $1, R0 - MOVW.GT 4(R12), R1 - - CMP $0, R0 - MOVW.GT 0(R12), R0 - - BIC $0x7, R13 // alignment for ABI - MOVW 0(R4), R12 // branch to libcall->fn - BL (R12) - - MOVW R5, R13 // free stack space - MOVW R0, 12(R4) // save return value to libcall->r1 - MOVW R1, 16(R4) - - // GetLastError - MRC 15, 0, R1, C13, C0, 2 - MOVW 0x34(R1), R0 - MOVW R0, 20(R4) // store in libcall->err - - MOVM.IA.W (R13), [R4, R5, R15] - TEXT runtime·getlasterror(SB),NOSPLIT,$0 MRC 15, 0, R0, C13, C0, 2 MOVW 0x34(R0), R0 diff --git a/src/runtime/sys_windows_arm64.s b/src/runtime/sys_windows_arm64.s index 1f6d411b07..da3cb7e546 100644 --- a/src/runtime/sys_windows_arm64.s +++ b/src/runtime/sys_windows_arm64.s @@ -19,88 +19,6 @@ // // load_g and save_g (in tls_arm64.s) clobber R27 (REGTMP) and R0. -TEXT runtime·asmstdcall_trampoline<ABIInternal>(SB),NOSPLIT,$0 - B runtime·asmstdcall(SB) - -// void runtime·asmstdcall(void *c); -TEXT runtime·asmstdcall(SB),NOSPLIT,$16 - STP (R19, R20), 16(RSP) // save old R19, R20 - MOVD R0, R19 // save libcall pointer - MOVD RSP, R20 // save stack pointer - - // SetLastError(0) - MOVD $0, TEB_error(R18_PLATFORM) - MOVD libcall_args(R19), R12 // libcall->args - - // Do we have more than 8 arguments? - MOVD libcall_n(R19), R0 - CMP $0, R0; BEQ _0args - CMP $1, R0; BEQ _1args - CMP $2, R0; BEQ _2args - CMP $3, R0; BEQ _3args - CMP $4, R0; BEQ _4args - CMP $5, R0; BEQ _5args - CMP $6, R0; BEQ _6args - CMP $7, R0; BEQ _7args - CMP $8, R0; BEQ _8args - - // Reserve stack space for remaining args - SUB $8, R0, R2 - ADD $1, R2, R3 // make even number of words for stack alignment - AND $~1, R3 - LSL $3, R3 - SUB R3, RSP - - // R4: size of stack arguments (n-8)*8 - // R5: &args[8] - // R6: loop counter, from 0 to (n-8)*8 - // R7: scratch - // R8: copy of RSP - (R2)(RSP) assembles as (R2)(ZR) - SUB $8, R0, R4 - LSL $3, R4 - ADD $(8*8), R12, R5 - MOVD $0, R6 - MOVD RSP, R8 -stackargs: - MOVD (R6)(R5), R7 - MOVD R7, (R6)(R8) - ADD $8, R6 - CMP R6, R4 - BNE stackargs - -_8args: - MOVD (7*8)(R12), R7 -_7args: - MOVD (6*8)(R12), R6 -_6args: - MOVD (5*8)(R12), R5 -_5args: - MOVD (4*8)(R12), R4 -_4args: - MOVD (3*8)(R12), R3 -_3args: - MOVD (2*8)(R12), R2 -_2args: - MOVD (1*8)(R12), R1 -_1args: - MOVD (0*8)(R12), R0 -_0args: - - MOVD libcall_fn(R19), R12 // branch to libcall->fn - BL (R12) - - MOVD R20, RSP // free stack space - MOVD R0, libcall_r1(R19) // save return value to libcall->r1 - // TODO(rsc) floating point like amd64 in libcall->r2? - - // GetLastError - MOVD TEB_error(R18_PLATFORM), R0 - MOVD R0, libcall_err(R19) - - // Restore callee-saved registers. - LDP 16(RSP), (R19, R20) - RET - TEXT runtime·getlasterror(SB),NOSPLIT,$0 MOVD TEB_error(R18_PLATFORM), R0 MOVD R0, ret+0(FP) diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 85b1b8c902..b3c3d8c0d5 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -7,6 +7,7 @@ package runtime import ( "internal/abi" "internal/goarch" + "internal/runtime/syscall/windows" "unsafe" ) @@ -103,7 +104,7 @@ func (p *abiDesc) assignArg(t *_type) { // registers and the stack. panic("compileCallback: argument size is larger than uintptr") } - if k := t.Kind_ & abi.KindMask; GOARCH != "386" && (k == abi.Float32 || k == abi.Float64) { + if k := t.Kind(); GOARCH != "386" && (k == abi.Float32 || k == abi.Float64) { // In fastcall, floating-point arguments in // the first four positions are passed in // floating-point registers, which we don't @@ -174,7 +175,7 @@ func (p *abiDesc) assignArg(t *_type) { // // Returns whether the assignment succeeded. func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool { - switch k := t.Kind_ & abi.KindMask; k { + switch k := t.Kind(); k { case abi.Bool, abi.Int, abi.Int8, abi.Int16, abi.Int32, abi.Uint, abi.Uint8, abi.Uint16, abi.Uint32, abi.Uintptr, abi.Pointer, abi.UnsafePointer: // Assign a register for all these types. return p.assignReg(t.Size_, offset) @@ -269,7 +270,7 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) { cdecl = false } - if fn._type == nil || (fn._type.Kind_&abi.KindMask) != abi.Func { + if fn._type == nil || fn._type.Kind() != abi.Func { panic("compileCallback: expected function with one uintptr-sized result") } ft := (*functype)(unsafe.Pointer(fn._type)) @@ -290,7 +291,7 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) { if ft.OutSlice()[0].Size_ != goarch.PtrSize { panic("compileCallback: expected function with one uintptr-sized result") } - if k := ft.OutSlice()[0].Kind_ & abi.KindMask; k == abi.Float32 || k == abi.Float64 { + if k := ft.OutSlice()[0].Kind(); k == abi.Float32 || k == abi.Float64 { // In cdecl and stdcall, float results are returned in // ST(0). In fastcall, they're returned in XMM0. // Either way, it's not AX. @@ -411,101 +412,19 @@ func callbackWrap(a *callbackArgs) { } } -const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 - -//go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary -func syscall_loadsystemlibrary(filename *uint16) (handle, err uintptr) { - handle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_LoadLibraryExW)), uintptr(unsafe.Pointer(filename)), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32) - KeepAlive(filename) - if handle != 0 { - err = 0 - } - return -} - -// golang.org/x/sys linknames syscall.loadlibrary -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_loadlibrary syscall.loadlibrary -func syscall_loadlibrary(filename *uint16) (handle, err uintptr) { - handle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_LoadLibraryW)), uintptr(unsafe.Pointer(filename))) - KeepAlive(filename) - if handle != 0 { - err = 0 - } - return -} - -// golang.org/x/sys linknames syscall.getprocaddress -// (in addition to standard package syscall). -// Do not remove or change the type signature. -// -//go:linkname syscall_getprocaddress syscall.getprocaddress -func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) { - outhandle, _, err = syscall_SyscallN(uintptr(unsafe.Pointer(_GetProcAddress)), handle, uintptr(unsafe.Pointer(procname))) - KeepAlive(procname) - if outhandle != 0 { - err = 0 - } - return -} - -//go:linkname syscall_Syscall syscall.Syscall -//go:nosplit -func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - return syscall_syscalln(fn, nargs, a1, a2, a3) -} - -//go:linkname syscall_Syscall6 syscall.Syscall6 -//go:nosplit -func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6) -} - -//go:linkname syscall_Syscall9 syscall.Syscall9 -//go:nosplit -func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { - return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9) -} - -//go:linkname syscall_Syscall12 syscall.Syscall12 -//go:nosplit -func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) { - return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) -} - -//go:linkname syscall_Syscall15 syscall.Syscall15 -//go:nosplit -func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { - return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) -} - -//go:linkname syscall_Syscall18 syscall.Syscall18 -//go:nosplit -func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) { - return syscall_syscalln(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18) -} - -// maxArgs should be divisible by 2, as Windows stack -// must be kept 16-byte aligned on syscall entry. +// syscall_syscalln calls fn with args[:n]. +// It is used to implement [syscall.SyscallN]. +// It shouldn't be used in the runtime package, +// use [stdcall] instead. // -// Although it only permits maximum 42 parameters, it -// is arguably large enough. -const maxArgs = 42 - -//go:linkname syscall_SyscallN syscall.SyscallN -//go:nosplit -func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { - return syscall_syscalln(fn, uintptr(len(args)), args...) -} - +//go:linkname syscall_syscalln syscall.syscalln //go:nosplit +//go:uintptrkeepalive func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) { if n > uintptr(len(args)) { panic("syscall: n > len(args)") // should not be reachable from user code } - if n > maxArgs { + if n > windows.MaxArgs { panic("runtime: SyscallN has too many arguments") } @@ -513,15 +432,15 @@ func syscall_syscalln(fn, n uintptr, args ...uintptr) (r1, r2, err uintptr) { // the stack because the stack can move during fn if it // calls back into Go. c := &getg().m.winsyscall - c.fn = fn - c.n = n - if c.n != 0 { - c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) + c.Fn = fn + c.N = n + if c.N != 0 { + c.Args = uintptr(noescape(unsafe.Pointer(&args[0]))) } cgocall(asmstdcallAddr, unsafe.Pointer(c)) // cgocall may reschedule us on to a different M, // but it copies the return values into the new M's // so we can read them from there. c = &getg().m.winsyscall - return c.r1, c.r2, c.err + return c.R1, c.R2, c.Err } diff --git a/src/runtime/syscall_windows_test.go b/src/runtime/syscall_windows_test.go index ad9bfb464b..6a9b165d62 100644 --- a/src/runtime/syscall_windows_test.go +++ b/src/runtime/syscall_windows_test.go @@ -8,6 +8,7 @@ import ( "fmt" "internal/abi" "internal/race" + "internal/runtime/syscall/windows" "internal/syscall/windows/sysdll" "internal/testenv" "io" @@ -776,7 +777,7 @@ func TestSyscallN(t *testing.T) { t.Skipf("skipping test: GOARCH=%s", runtime.GOARCH) } - for arglen := 0; arglen <= runtime.MaxArgs; arglen++ { + for arglen := 0; arglen <= windows.MaxArgs; arglen++ { arglen := arglen t.Run(fmt.Sprintf("arg-%d", arglen), func(t *testing.T) { t.Parallel() diff --git a/src/runtime/testdata/testprog/coro.go b/src/runtime/testdata/testprog/coro.go index 032215b801..5f3d302987 100644 --- a/src/runtime/testdata/testprog/coro.go +++ b/src/runtime/testdata/testprog/coro.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build goexperiment.rangefunc - package main import ( diff --git a/src/runtime/testdata/testprog/gc.go b/src/runtime/testdata/testprog/gc.go index 5dc85fbb62..bbe1453401 100644 --- a/src/runtime/testdata/testprog/gc.go +++ b/src/runtime/testdata/testprog/gc.go @@ -395,6 +395,9 @@ func gcMemoryLimit(gcPercent int) { // somewhat heavily here) this bound is kept loose. In practice the Go runtime // should do considerably better than this bound. bound := int64(myLimit + 16<<20) + if runtime.GOOS == "darwin" { + bound += 16 << 20 // Be more lax on Darwin, see issue 73136. + } start := time.Now() for time.Since(start) < 200*time.Millisecond { metrics.Read(m[:]) diff --git a/src/runtime/testdata/testprogcgo/coro.go b/src/runtime/testdata/testprogcgo/coro.go index e0cb945112..93be92cb7a 100644 --- a/src/runtime/testdata/testprogcgo/coro.go +++ b/src/runtime/testdata/testprogcgo/coro.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build goexperiment.rangefunc && !windows +//go:build !windows package main diff --git a/src/runtime/traceallocfree.go b/src/runtime/traceallocfree.go index 70e48ea3a6..b1b6c63462 100644 --- a/src/runtime/traceallocfree.go +++ b/src/runtime/traceallocfree.go @@ -37,7 +37,7 @@ func traceSnapshotMemory(gen uintptr) { } // Emit info. - w.varint(uint64(trace.minPageHeapAddr)) + w.varint(trace.minPageHeapAddr) w.varint(uint64(pageSize)) w.varint(uint64(gc.MinHeapAlign)) w.varint(uint64(fixedStack)) diff --git a/src/runtime/tracebuf.go b/src/runtime/tracebuf.go index 08a1d46838..5adaede424 100644 --- a/src/runtime/tracebuf.go +++ b/src/runtime/tracebuf.go @@ -183,7 +183,7 @@ func (w traceWriter) refill() traceWriter { // Tolerate a nil mp. mID := ^uint64(0) if w.mp != nil { - mID = uint64(w.mp.procid) + mID = w.mp.procid } // Write the buffer's header. @@ -194,7 +194,7 @@ func (w traceWriter) refill() traceWriter { w.byte(byte(w.exp)) } w.varint(uint64(w.gen)) - w.varint(uint64(mID)) + w.varint(mID) w.varint(uint64(ts)) w.traceBuf.lenPos = w.varintReserve() return w diff --git a/src/runtime/tracecpu.go b/src/runtime/tracecpu.go index 092c707f83..e64ca32cdf 100644 --- a/src/runtime/tracecpu.go +++ b/src/runtime/tracecpu.go @@ -258,7 +258,7 @@ func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) { if gp != nil { hdr[1] = gp.goid } - hdr[2] = uint64(mp.procid) + hdr[2] = mp.procid // Allow only one writer at a time for !trace.signalLock.CompareAndSwap(0, 1) { diff --git a/src/runtime/traceevent.go b/src/runtime/traceevent.go index 263847be2e..b0bc4c017d 100644 --- a/src/runtime/traceevent.go +++ b/src/runtime/traceevent.go @@ -42,7 +42,7 @@ func (tl traceLocker) eventWriter(goStatus tracev2.GoStatus, procStatus tracev2. tl.writer().writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep).end() } if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) { - tl.writer().writeGoStatus(uint64(gp.goid), int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end() + tl.writer().writeGoStatus(gp.goid, int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end() } return traceEventWriter{tl} } diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go index a2775a3427..06e36fd802 100644 --- a/src/runtime/traceruntime.go +++ b/src/runtime/traceruntime.go @@ -457,7 +457,7 @@ func (tl traceLocker) GoPreempt() { // GoStop emits a GoStop event with the provided reason. func (tl traceLocker) GoStop(reason traceGoStopReason) { - tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(0)) + tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, trace.goStopReasons[tl.gen%2][reason], tl.stack(0)) } // GoPark emits a GoBlock event with the provided reason. @@ -465,7 +465,7 @@ func (tl traceLocker) GoStop(reason traceGoStopReason) { // TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly // that we have both, and waitReason is way more descriptive. func (tl traceLocker) GoPark(reason traceBlockReason, skip int) { - tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip)) + tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, trace.goBlockReasons[tl.gen%2][reason], tl.stack(skip)) } // GoUnpark emits a GoUnblock event. diff --git a/src/runtime/tracestack.go b/src/runtime/tracestack.go index 76d6b05048..51f3c29445 100644 --- a/src/runtime/tracestack.go +++ b/src/runtime/tracestack.go @@ -190,7 +190,7 @@ func dumpStacksRec(node *traceMapNode, w traceWriter, stackBuf []uintptr) traceW // Emit stack event. w.byte(byte(tracev2.EvStack)) - w.varint(uint64(node.id)) + w.varint(node.id) w.varint(uint64(len(frames))) for _, frame := range frames { w.varint(uint64(frame.PC)) diff --git a/src/runtime/tracetype.go b/src/runtime/tracetype.go index f54f812578..613fc88202 100644 --- a/src/runtime/tracetype.go +++ b/src/runtime/tracetype.go @@ -64,7 +64,7 @@ func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter { } // Emit type. - w.varint(uint64(node.id)) + w.varint(node.id) w.varint(uint64(uintptr(unsafe.Pointer(typ)))) w.varint(uint64(typ.Size())) w.varint(uint64(typ.PtrBytes)) diff --git a/src/runtime/type.go b/src/runtime/type.go index c11c866cd8..9009119464 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -9,7 +9,6 @@ package runtime import ( "internal/abi" "internal/goarch" - "internal/goexperiment" "internal/runtime/atomic" "unsafe" ) @@ -69,7 +68,7 @@ func (t rtype) pkgpath() string { if u := t.uncommon(); u != nil { return t.nameOff(u.PkgPath).Name() } - switch t.Kind_ & abi.KindMask { + switch t.Kind() { case abi.Struct: st := (*structtype)(unsafe.Pointer(t.Type)) return st.PkgPath.Name() @@ -522,8 +521,8 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool { if t == v { return true } - kind := t.Kind_ & abi.KindMask - if kind != v.Kind_&abi.KindMask { + kind := t.Kind() + if kind != v.Kind() { return false } rt, rv := toRType(t), toRType(v) @@ -605,13 +604,8 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool { } return true case abi.Map: - if goexperiment.SwissMap { - mt := (*abi.SwissMapType)(unsafe.Pointer(t)) - mv := (*abi.SwissMapType)(unsafe.Pointer(v)) - return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen) - } - mt := (*abi.OldMapType)(unsafe.Pointer(t)) - mv := (*abi.OldMapType)(unsafe.Pointer(v)) + mt := (*abi.MapType)(unsafe.Pointer(t)) + mv := (*abi.MapType)(unsafe.Pointer(v)) return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen) case abi.Pointer: pt := (*ptrtype)(unsafe.Pointer(t)) diff --git a/src/runtime/typekind.go b/src/runtime/typekind.go deleted file mode 100644 index 4920a7cf14..0000000000 --- a/src/runtime/typekind.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package runtime - -import "internal/abi" - -// isDirectIface reports whether t is stored directly in an interface value. -func isDirectIface(t *_type) bool { - return t.Kind_&abi.KindDirectIface != 0 -} diff --git a/src/runtime/vdso_test.go b/src/runtime/vdso_test.go index b0f5fbe728..cb70a040d6 100644 --- a/src/runtime/vdso_test.go +++ b/src/runtime/vdso_test.go @@ -62,7 +62,7 @@ func TestUsingVDSO(t *testing.T) { t.Logf("%s", out) } if err != nil { - if err := err.(*exec.ExitError); err != nil && err.Sys().(syscall.WaitStatus).Signaled() { + if err, ok := err.(*exec.ExitError); ok && err.Sys().(syscall.WaitStatus).Signaled() { if !bytes.Contains(out, []byte("+++ killed by")) { // strace itself occasionally crashes. // Here, it exited with a signal, but |
