aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorCherry Mui <cherryyz@google.com>2025-09-16 10:22:34 -0400
committerCherry Mui <cherryyz@google.com>2025-09-16 10:22:34 -0400
commitbdd30e25caa0b69e335ba1f1f48566924850fa4b (patch)
tree3f7dba7bae3eaf8de272679aa7a00bf85585cd16 /src/runtime
parent0e590a505d7f1050ac60df4b52c414cfc618239d (diff)
parentca0e03560df7279bc307da08db7237beb32b0d99 (diff)
downloadgo-bdd30e25caa0b69e335ba1f1f48566924850fa4b.tar.xz
[dev.simd] all: merge master (ca0e035) into dev.simd
Conflicts: - src/internal/goexperiment/flags.go Merge List: + 2025-09-15 ca0e03560d cmd/link: remove support for windows/arm relocations + 2025-09-15 17a0fabc43 cmd/link: remove support for darwin/386 relocations + 2025-09-15 eb7c67fdc9 cmd/internal/obj/loong64: use the MOVVP instruction to optimize prologue + 2025-09-15 6b8d507508 cmd/internal/obj/riscv: implement vector segment load/store instructions + 2025-09-15 7ddbf4d820 cmd/asm: add double precision comparision testcases for riscv64 + 2025-09-15 c39abe0658 runtime: fix TestSehUnwind + 2025-09-15 e3ed0fbe6a all: replace strings.Split with strings.SplitSeq + 2025-09-15 10bfddc91d all: remove redundant words in comment + 2025-09-15 2469e92d8c cmd/compile: combine doubling with shift on riscv64 + 2025-09-15 aa83aee7de net/http: clarify panic conditions in Handle, HandleFunc, AddInsecureBypassPattern + 2025-09-15 b9e2977f1d crypto/internal/cryptotest: use linux-amd64_avx512 builder for SHA-NI + 2025-09-15 8105d0ccc2 cmd/go,crypto/internal/fips140: prevent using FIPS 140-3 mode with purego tag + 2025-09-15 7f70ca8726 crypto/internal/cryptotest: add MustSupportFIPS140 + 2025-09-15 9e71d8a9f7 cmd/internal/testdir: re-enable default all codegen flag on linux-amd64 + 2025-09-15 004858ccdd all: replace os.Getenv("GO_BUILDER_NAME") with testenv.Builder in tests + 2025-09-15 dbde15800c cmd: vendor x/tools@9fccddc + 2025-09-15 8ace10dad2 os: add (*Process).WithHandle + 2025-09-15 3573227fe3 os: add and use errProcessReleased + 2025-09-15 68c6a73380 internal/syscall/unix: add KernelVersionGE + 2025-09-15 e603e9834e cmd/link: support race mode with MSVC clang + 2025-09-15 e5ee1f2600 test/codegen: check zerobase for newobject on 0-sized types + 2025-09-15 77b93d41d5 net/http: add comments for transport gzip reader + 2025-09-15 30d510ca2d cmd/compile,cmd/gofmt: use reflect.TypeFor + 2025-09-15 8320fe8f0e runtime: deduplicate syscall assembly for darwin + 2025-09-14 080882a928 net: use context.AfterFunc in connect + 2025-09-12 ac803b5949 cmd/go/internal/work: copy vet tool's stdout to our stdout + 2025-09-12 889e71c2ac runtime: move Windows types and consts to internal/runtime/syscall/windows + 2025-09-12 cc8a6780ac vendor: update x/tools to 3adf0e9, and other repos + 2025-09-12 911455fe18 cmd/link: don't count tbss section in TestFlagD + 2025-09-12 f1fd13016a cmd/compile: optimize abi.Type.GCData loads + 2025-09-12 dc960d0bfe cmd/compile, reflect: further allow inlining of TypeFor + 2025-09-12 7acb0d0446 runtime: fix syscall9 on darwin/arm64 + 2025-09-12 60c1ee9183 internal/goexperiment: add a sizespecializedmalloc goexperiment setting + 2025-09-12 c70713da82 cmd/link: support MSVC clang + 2025-09-12 9271bbbb80 internal/testenv: update Builder docs with LUCI builder names + 2025-09-12 a4e25c3d65 net,internal/poll: skip TestAllocs when race is enabled on Windows + 2025-09-12 dd8276657f cmd/asm, cmd/internal/obj: add riscv64 generic CSR ops + 2025-09-11 f37d75472d runtime: move mksizeclasses.go to runtime/_mkmalloc + 2025-09-11 73676e3223 cmd/go: run cgo and cgo compiles in their own actions + 2025-09-11 0e1b98993e testing: exit B.Loop early upon saturation + 2025-09-11 84e9ab3984 cmd/go/internal/work: remove deps[1]="fmt" vet hack Change-Id: I1424228bcd9291c9ff29f6ae843d5b90652f237e
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/_mkmalloc/mksizeclasses.go364
-rw-r--r--src/runtime/defs_windows.go101
-rw-r--r--src/runtime/defs_windows_386.go100
-rw-r--r--src/runtime/defs_windows_amd64.go128
-rw-r--r--src/runtime/defs_windows_arm64.go121
-rw-r--r--src/runtime/export_windows_test.go22
-rw-r--r--src/runtime/netpoll_windows.go30
-rw-r--r--src/runtime/os_windows.go92
-rw-r--r--src/runtime/os_windows_arm64.go4
-rw-r--r--src/runtime/pprof/pprof_test.go10
-rw-r--r--src/runtime/runtime-seh_windows_test.go32
-rw-r--r--src/runtime/signal_windows.go164
-rw-r--r--src/runtime/signal_windows_386.go28
-rw-r--r--src/runtime/signal_windows_amd64.go36
-rw-r--r--src/runtime/signal_windows_arm64.go49
-rw-r--r--src/runtime/sys_darwin.go136
-rw-r--r--src/runtime/sys_darwin_amd64.s190
-rw-r--r--src/runtime/sys_darwin_arm64.s180
-rw-r--r--src/runtime/syscall_test.go28
-rw-r--r--src/runtime/testdata/testsyscall/testsyscall.go65
-rw-r--r--src/runtime/testdata/testsyscall/testsyscall.s55
-rw-r--r--src/runtime/testdata/testsyscall/testsyscallc/testsyscallc.go48
22 files changed, 968 insertions, 1015 deletions
diff --git a/src/runtime/_mkmalloc/mksizeclasses.go b/src/runtime/_mkmalloc/mksizeclasses.go
new file mode 100644
index 0000000000..a8d2d2db1e
--- /dev/null
+++ b/src/runtime/_mkmalloc/mksizeclasses.go
@@ -0,0 +1,364 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Generate tables for small malloc size classes.
+//
+// See malloc.go for overview.
+//
+// The size classes are chosen so that rounding an allocation
+// request up to the next size class wastes at most 12.5% (1.125x).
+//
+// Each size class has its own page count that gets allocated
+// and chopped up when new objects of the size class are needed.
+// That page count is chosen so that chopping up the run of
+// pages into objects of the given size wastes at most 12.5% (1.125x)
+// of the memory. It is not necessary that the cutoff here be
+// the same as above.
+//
+// The two sources of waste multiply, so the worst possible case
+// for the above constraints would be that allocations of some
+// size might have a 26.6% (1.266x) overhead.
+// In practice, only one of the wastes comes into play for a
+// given size (sizes < 512 waste mainly on the round-up,
+// sizes > 512 waste mainly on the page chopping).
+// For really small sizes, alignment constraints force the
+// overhead higher.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "io"
+ "log"
+ "math"
+ "math/bits"
+ "os"
+)
+
+// Generate internal/runtime/gc/msize.go
+
+var stdout = flag.Bool("stdout", false, "write to stdout instead of sizeclasses.go")
+
+func main() {
+ flag.Parse()
+
+ var b bytes.Buffer
+ fmt.Fprintln(&b, "// Code generated by mksizeclasses.go; DO NOT EDIT.")
+ fmt.Fprintln(&b, "//go:generate go -C ../../../runtime/_mkmalloc run mksizeclasses.go")
+ fmt.Fprintln(&b)
+ fmt.Fprintln(&b, "package gc")
+ classes := makeClasses()
+
+ printComment(&b, classes)
+
+ printClasses(&b, classes)
+
+ out, err := format.Source(b.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+ if *stdout {
+ _, err = os.Stdout.Write(out)
+ } else {
+ err = os.WriteFile("../../internal/runtime/gc/sizeclasses.go", out, 0666)
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+const (
+ // Constants that we use and will transfer to the runtime.
+ minHeapAlign = 8
+ maxSmallSize = 32 << 10
+ smallSizeDiv = 8
+ smallSizeMax = 1024
+ largeSizeDiv = 128
+ pageShift = 13
+
+ // Derived constants.
+ pageSize = 1 << pageShift
+)
+
+type class struct {
+ size int // max size
+ npages int // number of pages
+}
+
+func powerOfTwo(x int) bool {
+ return x != 0 && x&(x-1) == 0
+}
+
+func makeClasses() []class {
+ var classes []class
+
+ classes = append(classes, class{}) // class #0 is a dummy entry
+
+ align := minHeapAlign
+ for size := align; size <= maxSmallSize; size += align {
+ if powerOfTwo(size) { // bump alignment once in a while
+ if size >= 2048 {
+ align = 256
+ } else if size >= 128 {
+ align = size / 8
+ } else if size >= 32 {
+ align = 16 // heap bitmaps assume 16 byte alignment for allocations >= 32 bytes.
+ }
+ }
+ if !powerOfTwo(align) {
+ panic("incorrect alignment")
+ }
+
+ // Make the allocnpages big enough that
+ // the leftover is less than 1/8 of the total,
+ // so wasted space is at most 12.5%.
+ allocsize := pageSize
+ for allocsize%size > allocsize/8 {
+ allocsize += pageSize
+ }
+ npages := allocsize / pageSize
+
+ // If the previous sizeclass chose the same
+ // allocation size and fit the same number of
+ // objects into the page, we might as well
+ // use just this size instead of having two
+ // different sizes.
+ if len(classes) > 1 && npages == classes[len(classes)-1].npages && allocsize/size == allocsize/classes[len(classes)-1].size {
+ classes[len(classes)-1].size = size
+ continue
+ }
+ classes = append(classes, class{size: size, npages: npages})
+ }
+
+ // Increase object sizes if we can fit the same number of larger objects
+ // into the same number of pages. For example, we choose size 8448 above
+ // with 6 objects in 7 pages. But we can well use object size 9472,
+ // which is also 6 objects in 7 pages but +1024 bytes (+12.12%).
+ // We need to preserve at least largeSizeDiv alignment otherwise
+ // sizeToClass won't work.
+ for i := range classes {
+ if i == 0 {
+ continue
+ }
+ c := &classes[i]
+ psize := c.npages * pageSize
+ new_size := (psize / (psize / c.size)) &^ (largeSizeDiv - 1)
+ if new_size > c.size {
+ c.size = new_size
+ }
+ }
+
+ if len(classes) != 68 {
+ panic("number of size classes has changed")
+ }
+
+ for i := range classes {
+ computeDivMagic(&classes[i])
+ }
+
+ return classes
+}
+
+// computeDivMagic checks that the division required to compute object
+// index from span offset can be computed using 32-bit multiplication.
+// n / c.size is implemented as (n * (^uint32(0)/uint32(c.size) + 1)) >> 32
+// for all 0 <= n <= c.npages * pageSize
+func computeDivMagic(c *class) {
+ // divisor
+ d := c.size
+ if d == 0 {
+ return
+ }
+
+ // maximum input value for which the formula needs to work.
+ max := c.npages * pageSize
+
+ // As reported in [1], if n and d are unsigned N-bit integers, we
+ // can compute n / d as ⌊n * c / 2^F⌋, where c is ⌈2^F / d⌉ and F is
+ // computed with:
+ //
+ // Algorithm 2: Algorithm to select the number of fractional bits
+ // and the scaled approximate reciprocal in the case of unsigned
+ // integers.
+ //
+ // if d is a power of two then
+ // Let F ← log₂(d) and c = 1.
+ // else
+ // Let F ← N + L where L is the smallest integer
+ // such that d ≤ (2^(N+L) mod d) + 2^L.
+ // end if
+ //
+ // [1] "Faster Remainder by Direct Computation: Applications to
+ // Compilers and Software Libraries" Daniel Lemire, Owen Kaser,
+ // Nathan Kurz arXiv:1902.01961
+ //
+ // To minimize the risk of introducing errors, we implement the
+ // algorithm exactly as stated, rather than trying to adapt it to
+ // fit typical Go idioms.
+ N := bits.Len(uint(max))
+ var F int
+ if powerOfTwo(d) {
+ F = int(math.Log2(float64(d)))
+ if d != 1<<F {
+ panic("imprecise log2")
+ }
+ } else {
+ for L := 0; ; L++ {
+ if d <= ((1<<(N+L))%d)+(1<<L) {
+ F = N + L
+ break
+ }
+ }
+ }
+
+ // Also, noted in the paper, F is the smallest number of fractional
+ // bits required. We use 32 bits, because it works for all size
+ // classes and is fast on all CPU architectures that we support.
+ if F > 32 {
+ fmt.Printf("d=%d max=%d N=%d F=%d\n", c.size, max, N, F)
+ panic("size class requires more than 32 bits of precision")
+ }
+
+ // Brute force double-check with the exact computation that will be
+ // done by the runtime.
+ m := ^uint32(0)/uint32(c.size) + 1
+ for n := 0; n <= max; n++ {
+ if uint32((uint64(n)*uint64(m))>>32) != uint32(n/c.size) {
+ fmt.Printf("d=%d max=%d m=%d n=%d\n", d, max, m, n)
+ panic("bad 32-bit multiply magic")
+ }
+ }
+}
+
+func printComment(w io.Writer, classes []class) {
+ fmt.Fprintf(w, "// %-5s %-9s %-10s %-7s %-10s %-9s %-9s\n", "class", "bytes/obj", "bytes/span", "objects", "tail waste", "max waste", "min align")
+ prevSize := 0
+ var minAligns [pageShift + 1]int
+ for i, c := range classes {
+ if i == 0 {
+ continue
+ }
+ spanSize := c.npages * pageSize
+ objects := spanSize / c.size
+ tailWaste := spanSize - c.size*(spanSize/c.size)
+ maxWaste := float64((c.size-prevSize-1)*objects+tailWaste) / float64(spanSize)
+ alignBits := bits.TrailingZeros(uint(c.size))
+ if alignBits > pageShift {
+ // object alignment is capped at page alignment
+ alignBits = pageShift
+ }
+ for i := range minAligns {
+ if i > alignBits {
+ minAligns[i] = 0
+ } else if minAligns[i] == 0 {
+ minAligns[i] = c.size
+ }
+ }
+ prevSize = c.size
+ fmt.Fprintf(w, "// %5d %9d %10d %7d %10d %8.2f%% %9d\n", i, c.size, spanSize, objects, tailWaste, 100*maxWaste, 1<<alignBits)
+ }
+ fmt.Fprintf(w, "\n")
+
+ fmt.Fprintf(w, "// %-9s %-4s %-12s\n", "alignment", "bits", "min obj size")
+ for bits, size := range minAligns {
+ if size == 0 {
+ break
+ }
+ if bits+1 < len(minAligns) && size == minAligns[bits+1] {
+ continue
+ }
+ fmt.Fprintf(w, "// %9d %4d %12d\n", 1<<bits, bits, size)
+ }
+ fmt.Fprintf(w, "\n")
+}
+
+func maxObjsPerSpan(classes []class) int {
+ most := 0
+ for _, c := range classes[1:] {
+ n := c.npages * pageSize / c.size
+ most = max(most, n)
+ }
+ return most
+}
+
+func maxNPages(classes []class) int {
+ most := 0
+ for _, c := range classes[1:] {
+ most = max(most, c.npages)
+ }
+ return most
+}
+
+func printClasses(w io.Writer, classes []class) {
+ fmt.Fprintln(w, "const (")
+ fmt.Fprintf(w, "MinHeapAlign = %d\n", minHeapAlign)
+ fmt.Fprintf(w, "MaxSmallSize = %d\n", maxSmallSize)
+ fmt.Fprintf(w, "SmallSizeDiv = %d\n", smallSizeDiv)
+ fmt.Fprintf(w, "SmallSizeMax = %d\n", smallSizeMax)
+ fmt.Fprintf(w, "LargeSizeDiv = %d\n", largeSizeDiv)
+ fmt.Fprintf(w, "NumSizeClasses = %d\n", len(classes))
+ fmt.Fprintf(w, "PageShift = %d\n", pageShift)
+ fmt.Fprintf(w, "MaxObjsPerSpan = %d\n", maxObjsPerSpan(classes))
+ fmt.Fprintf(w, "MaxSizeClassNPages = %d\n", maxNPages(classes))
+ fmt.Fprintln(w, ")")
+
+ fmt.Fprint(w, "var SizeClassToSize = [NumSizeClasses]uint16 {")
+ for _, c := range classes {
+ fmt.Fprintf(w, "%d,", c.size)
+ }
+ fmt.Fprintln(w, "}")
+
+ fmt.Fprint(w, "var SizeClassToNPages = [NumSizeClasses]uint8 {")
+ for _, c := range classes {
+ fmt.Fprintf(w, "%d,", c.npages)
+ }
+ fmt.Fprintln(w, "}")
+
+ fmt.Fprint(w, "var SizeClassToDivMagic = [NumSizeClasses]uint32 {")
+ for _, c := range classes {
+ if c.size == 0 {
+ fmt.Fprintf(w, "0,")
+ continue
+ }
+ fmt.Fprintf(w, "^uint32(0)/%d+1,", c.size)
+ }
+ fmt.Fprintln(w, "}")
+
+ // map from size to size class, for small sizes.
+ sc := make([]int, smallSizeMax/smallSizeDiv+1)
+ for i := range sc {
+ size := i * smallSizeDiv
+ for j, c := range classes {
+ if c.size >= size {
+ sc[i] = j
+ break
+ }
+ }
+ }
+ fmt.Fprint(w, "var SizeToSizeClass8 = [SmallSizeMax/SmallSizeDiv+1]uint8 {")
+ for _, v := range sc {
+ fmt.Fprintf(w, "%d,", v)
+ }
+ fmt.Fprintln(w, "}")
+
+ // map from size to size class, for large sizes.
+ sc = make([]int, (maxSmallSize-smallSizeMax)/largeSizeDiv+1)
+ for i := range sc {
+ size := smallSizeMax + i*largeSizeDiv
+ for j, c := range classes {
+ if c.size >= size {
+ sc[i] = j
+ break
+ }
+ }
+ }
+ fmt.Fprint(w, "var SizeToSizeClass128 = [(MaxSmallSize-SmallSizeMax)/LargeSizeDiv+1]uint8 {")
+ for _, v := range sc {
+ fmt.Fprintf(w, "%d,", v)
+ }
+ fmt.Fprintln(w, "}")
+}
diff --git a/src/runtime/defs_windows.go b/src/runtime/defs_windows.go
deleted file mode 100644
index 2f09afbe1f..0000000000
--- a/src/runtime/defs_windows.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Windows architecture-independent definitions.
-
-package runtime
-
-const (
- _PROT_NONE = 0
- _PROT_READ = 1
- _PROT_WRITE = 2
- _PROT_EXEC = 4
-
- _MAP_ANON = 1
- _MAP_PRIVATE = 2
-
- _DUPLICATE_SAME_ACCESS = 0x2
- _THREAD_PRIORITY_HIGHEST = 0x2
-
- _SIGINT = 0x2
- _SIGTERM = 0xF
- _CTRL_C_EVENT = 0x0
- _CTRL_BREAK_EVENT = 0x1
- _CTRL_CLOSE_EVENT = 0x2
- _CTRL_LOGOFF_EVENT = 0x5
- _CTRL_SHUTDOWN_EVENT = 0x6
-
- _EXCEPTION_ACCESS_VIOLATION = 0xc0000005
- _EXCEPTION_IN_PAGE_ERROR = 0xc0000006
- _EXCEPTION_BREAKPOINT = 0x80000003
- _EXCEPTION_ILLEGAL_INSTRUCTION = 0xc000001d
- _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d
- _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e
- _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f
- _EXCEPTION_FLT_OVERFLOW = 0xc0000091
- _EXCEPTION_FLT_UNDERFLOW = 0xc0000093
- _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094
- _EXCEPTION_INT_OVERFLOW = 0xc0000095
-
- _INFINITE = 0xffffffff
- _WAIT_TIMEOUT = 0x102
-
- _EXCEPTION_CONTINUE_EXECUTION = -0x1
- _EXCEPTION_CONTINUE_SEARCH = 0x0
- _EXCEPTION_CONTINUE_SEARCH_SEH = 0x1
-)
-
-type systeminfo struct {
- anon0 [4]byte
- dwpagesize uint32
- lpminimumapplicationaddress *byte
- lpmaximumapplicationaddress *byte
- dwactiveprocessormask uintptr
- dwnumberofprocessors uint32
- dwprocessortype uint32
- dwallocationgranularity uint32
- wprocessorlevel uint16
- wprocessorrevision uint16
-}
-
-type exceptionpointers struct {
- record *exceptionrecord
- context *context
-}
-
-type exceptionrecord struct {
- exceptioncode uint32
- exceptionflags uint32
- exceptionrecord *exceptionrecord
- exceptionaddress uintptr
- numberparameters uint32
- exceptioninformation [15]uintptr
-}
-
-type overlapped struct {
- internal uintptr
- internalhigh uintptr
- anon0 [8]byte
- hevent *byte
-}
-
-type memoryBasicInformation struct {
- baseAddress uintptr
- allocationBase uintptr
- allocationProtect uint32
- regionSize uintptr
- state uint32
- protect uint32
- type_ uint32
-}
-
-// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/ns-wdm-_osversioninfow
-type _OSVERSIONINFOW struct {
- osVersionInfoSize uint32
- majorVersion uint32
- minorVersion uint32
- buildNumber uint32
- platformId uint32
- csdVersion [128]uint16
-}
diff --git a/src/runtime/defs_windows_386.go b/src/runtime/defs_windows_386.go
deleted file mode 100644
index 12cd442eb5..0000000000
--- a/src/runtime/defs_windows_386.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/goarch"
- "unsafe"
-)
-
-const _CONTEXT_CONTROL = 0x10001
-
-type floatingsavearea struct {
- controlword uint32
- statusword uint32
- tagword uint32
- erroroffset uint32
- errorselector uint32
- dataoffset uint32
- dataselector uint32
- registerarea [80]uint8
- cr0npxstate uint32
-}
-
-type context struct {
- contextflags uint32
- dr0 uint32
- dr1 uint32
- dr2 uint32
- dr3 uint32
- dr6 uint32
- dr7 uint32
- floatsave floatingsavearea
- seggs uint32
- segfs uint32
- seges uint32
- segds uint32
- edi uint32
- esi uint32
- ebx uint32
- edx uint32
- ecx uint32
- eax uint32
- ebp uint32
- eip uint32
- segcs uint32
- eflags uint32
- esp uint32
- segss uint32
- extendedregisters [512]uint8
-}
-
-func (c *context) ip() uintptr { return uintptr(c.eip) }
-func (c *context) sp() uintptr { return uintptr(c.esp) }
-
-// 386 does not have link register, so this returns 0.
-func (c *context) lr() uintptr { return 0 }
-func (c *context) set_lr(x uintptr) {}
-
-func (c *context) set_ip(x uintptr) { c.eip = uint32(x) }
-func (c *context) set_sp(x uintptr) { c.esp = uint32(x) }
-
-// 386 does not have frame pointer register.
-func (c *context) set_fp(x uintptr) {}
-
-func (c *context) pushCall(targetPC, resumePC uintptr) {
- sp := c.sp() - goarch.StackAlign
- *(*uintptr)(unsafe.Pointer(sp)) = resumePC
- c.set_sp(sp)
- c.set_ip(targetPC)
-}
-
-func prepareContextForSigResume(c *context) {
- c.edx = c.esp
- c.ecx = c.eip
-}
-
-func dumpregs(r *context) {
- print("eax ", hex(r.eax), "\n")
- print("ebx ", hex(r.ebx), "\n")
- print("ecx ", hex(r.ecx), "\n")
- print("edx ", hex(r.edx), "\n")
- print("edi ", hex(r.edi), "\n")
- print("esi ", hex(r.esi), "\n")
- print("ebp ", hex(r.ebp), "\n")
- print("esp ", hex(r.esp), "\n")
- print("eip ", hex(r.eip), "\n")
- print("eflags ", hex(r.eflags), "\n")
- print("cs ", hex(r.segcs), "\n")
- print("fs ", hex(r.segfs), "\n")
- print("gs ", hex(r.seggs), "\n")
-}
-
-// _DISPATCHER_CONTEXT is not defined on 386.
-type _DISPATCHER_CONTEXT struct{}
-
-func (c *_DISPATCHER_CONTEXT) ctx() *context {
- return nil
-}
diff --git a/src/runtime/defs_windows_amd64.go b/src/runtime/defs_windows_amd64.go
deleted file mode 100644
index 9bb7ee80ad..0000000000
--- a/src/runtime/defs_windows_amd64.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/goarch"
- "unsafe"
-)
-
-const _CONTEXT_CONTROL = 0x100001
-
-type m128a struct {
- low uint64
- high int64
-}
-
-type context struct {
- p1home uint64
- p2home uint64
- p3home uint64
- p4home uint64
- p5home uint64
- p6home uint64
- contextflags uint32
- mxcsr uint32
- segcs uint16
- segds uint16
- seges uint16
- segfs uint16
- seggs uint16
- segss uint16
- eflags uint32
- dr0 uint64
- dr1 uint64
- dr2 uint64
- dr3 uint64
- dr6 uint64
- dr7 uint64
- rax uint64
- rcx uint64
- rdx uint64
- rbx uint64
- rsp uint64
- rbp uint64
- rsi uint64
- rdi uint64
- r8 uint64
- r9 uint64
- r10 uint64
- r11 uint64
- r12 uint64
- r13 uint64
- r14 uint64
- r15 uint64
- rip uint64
- anon0 [512]byte
- vectorregister [26]m128a
- vectorcontrol uint64
- debugcontrol uint64
- lastbranchtorip uint64
- lastbranchfromrip uint64
- lastexceptiontorip uint64
- lastexceptionfromrip uint64
-}
-
-func (c *context) ip() uintptr { return uintptr(c.rip) }
-func (c *context) sp() uintptr { return uintptr(c.rsp) }
-
-// AMD64 does not have link register, so this returns 0.
-func (c *context) lr() uintptr { return 0 }
-func (c *context) set_lr(x uintptr) {}
-
-func (c *context) set_ip(x uintptr) { c.rip = uint64(x) }
-func (c *context) set_sp(x uintptr) { c.rsp = uint64(x) }
-func (c *context) set_fp(x uintptr) { c.rbp = uint64(x) }
-
-func (c *context) pushCall(targetPC, resumePC uintptr) {
- sp := c.sp() - goarch.StackAlign
- *(*uintptr)(unsafe.Pointer(sp)) = resumePC
- c.set_sp(sp)
- c.set_ip(targetPC)
-}
-
-func prepareContextForSigResume(c *context) {
- c.r8 = c.rsp
- c.r9 = c.rip
-}
-
-func dumpregs(r *context) {
- print("rax ", hex(r.rax), "\n")
- print("rbx ", hex(r.rbx), "\n")
- print("rcx ", hex(r.rcx), "\n")
- print("rdx ", hex(r.rdx), "\n")
- print("rdi ", hex(r.rdi), "\n")
- print("rsi ", hex(r.rsi), "\n")
- print("rbp ", hex(r.rbp), "\n")
- print("rsp ", hex(r.rsp), "\n")
- print("r8 ", hex(r.r8), "\n")
- print("r9 ", hex(r.r9), "\n")
- print("r10 ", hex(r.r10), "\n")
- print("r11 ", hex(r.r11), "\n")
- print("r12 ", hex(r.r12), "\n")
- print("r13 ", hex(r.r13), "\n")
- print("r14 ", hex(r.r14), "\n")
- print("r15 ", hex(r.r15), "\n")
- print("rip ", hex(r.rip), "\n")
- print("rflags ", hex(r.eflags), "\n")
- print("cs ", hex(r.segcs), "\n")
- print("fs ", hex(r.segfs), "\n")
- print("gs ", hex(r.seggs), "\n")
-}
-
-type _DISPATCHER_CONTEXT struct {
- controlPc uint64
- imageBase uint64
- functionEntry uintptr
- establisherFrame uint64
- targetIp uint64
- context *context
- languageHandler uintptr
- handlerData uintptr
-}
-
-func (c *_DISPATCHER_CONTEXT) ctx() *context {
- return c.context
-}
diff --git a/src/runtime/defs_windows_arm64.go b/src/runtime/defs_windows_arm64.go
deleted file mode 100644
index 077bed24e2..0000000000
--- a/src/runtime/defs_windows_arm64.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "internal/goarch"
- "unsafe"
-)
-
-// NOTE(rsc): _CONTEXT_CONTROL is actually 0x400001 and should include PC, SP, and LR.
-// However, empirically, LR doesn't come along on Windows 10
-// unless you also set _CONTEXT_INTEGER (0x400002).
-// Without LR, we skip over the next-to-bottom function in profiles
-// when the bottom function is frameless.
-// So we set both here, to make a working _CONTEXT_CONTROL.
-const _CONTEXT_CONTROL = 0x400003
-
-type neon128 struct {
- low uint64
- high int64
-}
-
-// See https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-arm64_nt_context
-type context struct {
- contextflags uint32
- cpsr uint32
- x [31]uint64 // fp is x[29], lr is x[30]
- xsp uint64
- pc uint64
- v [32]neon128
- fpcr uint32
- fpsr uint32
- bcr [8]uint32
- bvr [8]uint64
- wcr [2]uint32
- wvr [2]uint64
-}
-
-func (c *context) ip() uintptr { return uintptr(c.pc) }
-func (c *context) sp() uintptr { return uintptr(c.xsp) }
-func (c *context) lr() uintptr { return uintptr(c.x[30]) }
-
-func (c *context) set_ip(x uintptr) { c.pc = uint64(x) }
-func (c *context) set_sp(x uintptr) { c.xsp = uint64(x) }
-func (c *context) set_lr(x uintptr) { c.x[30] = uint64(x) }
-func (c *context) set_fp(x uintptr) { c.x[29] = uint64(x) }
-
-func (c *context) pushCall(targetPC, resumePC uintptr) {
- // Push LR. The injected call is responsible
- // for restoring LR. gentraceback is aware of
- // this extra slot. See sigctxt.pushCall in
- // signal_arm64.go.
- sp := c.sp() - goarch.StackAlign
- c.set_sp(sp)
- *(*uint64)(unsafe.Pointer(sp)) = uint64(c.lr())
- c.set_lr(resumePC)
- c.set_ip(targetPC)
-}
-
-func prepareContextForSigResume(c *context) {
- c.x[0] = c.xsp
- c.x[1] = c.pc
-}
-
-func dumpregs(r *context) {
- print("r0 ", hex(r.x[0]), "\n")
- print("r1 ", hex(r.x[1]), "\n")
- print("r2 ", hex(r.x[2]), "\n")
- print("r3 ", hex(r.x[3]), "\n")
- print("r4 ", hex(r.x[4]), "\n")
- print("r5 ", hex(r.x[5]), "\n")
- print("r6 ", hex(r.x[6]), "\n")
- print("r7 ", hex(r.x[7]), "\n")
- print("r8 ", hex(r.x[8]), "\n")
- print("r9 ", hex(r.x[9]), "\n")
- print("r10 ", hex(r.x[10]), "\n")
- print("r11 ", hex(r.x[11]), "\n")
- print("r12 ", hex(r.x[12]), "\n")
- print("r13 ", hex(r.x[13]), "\n")
- print("r14 ", hex(r.x[14]), "\n")
- print("r15 ", hex(r.x[15]), "\n")
- print("r16 ", hex(r.x[16]), "\n")
- print("r17 ", hex(r.x[17]), "\n")
- print("r18 ", hex(r.x[18]), "\n")
- print("r19 ", hex(r.x[19]), "\n")
- print("r20 ", hex(r.x[20]), "\n")
- print("r21 ", hex(r.x[21]), "\n")
- print("r22 ", hex(r.x[22]), "\n")
- print("r23 ", hex(r.x[23]), "\n")
- print("r24 ", hex(r.x[24]), "\n")
- print("r25 ", hex(r.x[25]), "\n")
- print("r26 ", hex(r.x[26]), "\n")
- print("r27 ", hex(r.x[27]), "\n")
- print("r28 ", hex(r.x[28]), "\n")
- print("r29 ", hex(r.x[29]), "\n")
- print("lr ", hex(r.x[30]), "\n")
- print("sp ", hex(r.xsp), "\n")
- print("pc ", hex(r.pc), "\n")
- print("cpsr ", hex(r.cpsr), "\n")
-}
-
-func stackcheck() {
- // TODO: not implemented on ARM
-}
-
-type _DISPATCHER_CONTEXT struct {
- controlPc uint64
- imageBase uint64
- functionEntry uintptr
- establisherFrame uint64
- targetIp uint64
- context *context
- languageHandler uintptr
- handlerData uintptr
-}
-
-func (c *_DISPATCHER_CONTEXT) ctx() *context {
- return c.context
-}
diff --git a/src/runtime/export_windows_test.go b/src/runtime/export_windows_test.go
index caaf2dae51..f44e24bbac 100644
--- a/src/runtime/export_windows_test.go
+++ b/src/runtime/export_windows_test.go
@@ -7,7 +7,7 @@
package runtime
import (
- "internal/runtime/sys"
+ "internal/runtime/syscall/windows"
"unsafe"
)
@@ -17,23 +17,11 @@ var (
)
func NumberOfProcessors() int32 {
- var info systeminfo
+ var info windows.SystemInfo
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
- return int32(info.dwnumberofprocessors)
+ return int32(info.NumberOfProcessors)
}
-type ContextStub struct {
- context
-}
-
-func (c ContextStub) GetPC() uintptr {
- return c.ip()
-}
-
-func NewContextStub() *ContextStub {
- var ctx context
- ctx.set_ip(sys.GetCallerPC())
- ctx.set_sp(sys.GetCallerSP())
- ctx.set_fp(getcallerfp())
- return &ContextStub{ctx}
+func GetCallerFp() uintptr {
+ return getcallerfp()
}
diff --git a/src/runtime/netpoll_windows.go b/src/runtime/netpoll_windows.go
index 93137e4709..db5d043506 100644
--- a/src/runtime/netpoll_windows.go
+++ b/src/runtime/netpoll_windows.go
@@ -7,13 +7,10 @@ package runtime
import (
"internal/goarch"
"internal/runtime/atomic"
+ "internal/runtime/syscall/windows"
"unsafe"
)
-const _DWORD_MAX = 0xffffffff
-
-const _INVALID_HANDLE_VALUE = ^uintptr(0)
-
// Sources are used to identify the event that created an overlapped entry.
// The source values are arbitrary. There is no risk of collision with user
// defined values because the only way to set the key of an overlapped entry
@@ -59,7 +56,7 @@ func unpackNetpollSource(key uintptr) uint8 {
// Keep these in sync.
type pollOperation struct {
// used by windows
- _ overlapped
+ _ windows.Overlapped
// used by netpoll
pd *pollDesc
mode int32
@@ -90,19 +87,19 @@ func pollOperationFromOverlappedEntry(e *overlappedEntry) *pollOperation {
// https://learn.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-overlapped_entry
type overlappedEntry struct {
key uintptr
- ov *overlapped
+ ov *windows.Overlapped
internal uintptr
qty uint32
}
var (
- iocphandle uintptr = _INVALID_HANDLE_VALUE // completion port io handle
+ iocphandle uintptr = windows.INVALID_HANDLE_VALUE // completion port io handle
netpollWakeSig atomic.Uint32 // used to avoid duplicate calls of netpollBreak
)
func netpollinit() {
- iocphandle = stdcall(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
+ iocphandle = stdcall(_CreateIoCompletionPort, windows.INVALID_HANDLE_VALUE, 0, 0, windows.DWORD_MAX)
if iocphandle == 0 {
println("runtime: CreateIoCompletionPort failed (errno=", getlasterror(), ")")
throw("runtime: netpollinit failed")
@@ -152,7 +149,7 @@ func netpollBreak() {
// delay == 0: does not block, just polls
// delay > 0: block for up to that many nanoseconds
func netpoll(delay int64) (gList, int32) {
- if iocphandle == _INVALID_HANDLE_VALUE {
+ if iocphandle == windows.INVALID_HANDLE_VALUE {
return gList{}, 0
}
@@ -182,7 +179,7 @@ func netpoll(delay int64) (gList, int32) {
}
}
if delay < 0 {
- wait = _INFINITE
+ wait = windows.INFINITE
} else if delay == 0 {
wait = 0
} else if delay < 1e6 {
@@ -200,7 +197,7 @@ func netpoll(delay int64) (gList, int32) {
if stdcall(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 {
mp.blocked = false
errno := getlasterror()
- if errno == _WAIT_TIMEOUT {
+ if errno == windows.WAIT_TIMEOUT {
return gList{}, 0
}
println("runtime: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
@@ -243,11 +240,6 @@ func netpoll(delay int64) (gList, int32) {
// netpollQueueTimer queues a timer to wake up the poller after the given delay.
// It returns true if the timer expired during this call.
func netpollQueueTimer(delay int64) (signaled bool) {
- const (
- STATUS_SUCCESS = 0x00000000
- STATUS_PENDING = 0x00000103
- STATUS_CANCELLED = 0xC0000120
- )
mp := getg().m
// A wait completion packet can only be associated with one timer at a time,
// so we need to cancel the previous one if it exists. This wouldn't be necessary
@@ -258,11 +250,11 @@ func netpollQueueTimer(delay int64) (signaled bool) {
// another thread, so defer the cancellation until it is really necessary.
errno := stdcall(_NtCancelWaitCompletionPacket, mp.waitIocpHandle, 1)
switch errno {
- case STATUS_CANCELLED:
+ case windows.STATUS_CANCELLED:
// STATUS_CANCELLED is returned when the associated timer has already expired,
// in which automatically cancels the wait completion packet.
fallthrough
- case STATUS_SUCCESS:
+ case windows.STATUS_SUCCESS:
dt := -delay / 100 // relative sleep (negative), 100ns units
if stdcall(_SetWaitableTimer, mp.waitIocpTimer, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) == 0 {
println("runtime: SetWaitableTimer failed; errno=", getlasterror())
@@ -273,7 +265,7 @@ func netpollQueueTimer(delay int64) (signaled bool) {
println("runtime: NtAssociateWaitCompletionPacket failed; errno=", errno)
throw("runtime: netpoll failed")
}
- case STATUS_PENDING:
+ case windows.STATUS_PENDING:
// STATUS_PENDING is returned if the wait operation can't be canceled yet.
// This can happen if this thread was woken up by another event, such as a netpollBreak,
// and the timer expired just while calling NtCancelWaitCompletionPacket, in which case
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index ab4e165bae..f47419cf7d 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -323,7 +323,7 @@ func monitorSuspendResume() {
func getCPUCount() int32 {
var mask, sysmask uintptr
- ret := stdcall(_GetProcessAffinityMask, currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
+ ret := stdcall(_GetProcessAffinityMask, windows.CurrentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
if ret != 0 {
n := 0
maskbits := int(unsafe.Sizeof(mask) * 8)
@@ -337,22 +337,17 @@ func getCPUCount() int32 {
}
}
// use GetSystemInfo if GetProcessAffinityMask fails
- var info systeminfo
+ var info windows.SystemInfo
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
- return int32(info.dwnumberofprocessors)
+ return int32(info.NumberOfProcessors)
}
func getPageSize() uintptr {
- var info systeminfo
+ var info windows.SystemInfo
stdcall(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
- return uintptr(info.dwpagesize)
+ return uintptr(info.PageSize)
}
-const (
- currentProcess = ^uintptr(0) // -1 = current process
- currentThread = ^uintptr(1) // -2 = current thread
-)
-
// in sys_windows_386.s and sys_windows_amd64.s:
func getlasterror() uint32
@@ -405,18 +400,11 @@ var haveHighResSleep = false
// resolution timer. createHighResTimer returns new timer
// handle or 0, if CreateWaitableTimerEx failed.
func createHighResTimer() uintptr {
- const (
- // As per @jstarks, see
- // https://github.com/golang/go/issues/8687#issuecomment-656259353
- _CREATE_WAITABLE_TIMER_HIGH_RESOLUTION = 0x00000002
-
- _SYNCHRONIZE = 0x00100000
- _TIMER_QUERY_STATE = 0x0001
- _TIMER_MODIFY_STATE = 0x0002
- )
+ // As per @jstarks, see
+ // https://github.com/golang/go/issues/8687#issuecomment-656259353
return stdcall(_CreateWaitableTimerExW, 0, 0,
- _CREATE_WAITABLE_TIMER_HIGH_RESOLUTION,
- _SYNCHRONIZE|_TIMER_QUERY_STATE|_TIMER_MODIFY_STATE)
+ windows.CREATE_WAITABLE_TIMER_HIGH_RESOLUTION,
+ windows.SYNCHRONIZE|windows.TIMER_QUERY_STATE|windows.TIMER_MODIFY_STATE)
}
func initHighResTimer() {
@@ -454,10 +442,10 @@ func initLongPathSupport() {
)
// Check that we're ≥ 10.0.15063.
- info := _OSVERSIONINFOW{}
- info.osVersionInfoSize = uint32(unsafe.Sizeof(info))
+ info := windows.OSVERSIONINFOW{}
+ info.OSVersionInfoSize = uint32(unsafe.Sizeof(info))
stdcall(_RtlGetVersion, uintptr(unsafe.Pointer(&info)))
- if info.majorVersion < 10 || (info.majorVersion == 10 && info.minorVersion == 0 && info.buildNumber < 15063) {
+ if info.MajorVersion < 10 || (info.MajorVersion == 10 && info.MinorVersion == 0 && info.BuildNumber < 15063) {
return
}
@@ -493,7 +481,7 @@ func osinit() {
// of dedicated threads -- GUI, IO, computational, etc. Go processes use
// equivalent threads that all do a mix of GUI, IO, computations, etc.
// In such context dynamic priority boosting does nothing but harm, so we turn it off.
- stdcall(_SetProcessPriorityBoost, currentProcess, 1)
+ stdcall(_SetProcessPriorityBoost, windows.CurrentProcess, 1)
}
//go:nosplit
@@ -671,7 +659,7 @@ func semasleep(ns int64) int32 {
var result uintptr
if ns < 0 {
- result = stdcall(_WaitForSingleObject, getg().m.waitsema, uintptr(_INFINITE))
+ result = stdcall(_WaitForSingleObject, getg().m.waitsema, uintptr(windows.INFINITE))
} else {
start := nanotime()
elapsed := int64(0)
@@ -828,7 +816,7 @@ func sigblock(exiting bool) {
// Called on the new thread, cannot allocate Go memory.
func minit() {
var thandle uintptr
- if stdcall(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
+ if stdcall(_DuplicateHandle, windows.CurrentProcess, windows.CurrentThread, windows.CurrentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, windows.DUPLICATE_SAME_ACCESS) == 0 {
print("runtime.minit: duplicatehandle failed; errno=", getlasterror(), "\n")
throw("runtime.minit: duplicatehandle failed")
}
@@ -863,7 +851,7 @@ func minit() {
// Query the true stack base from the OS. Currently we're
// running on a small assumed stack.
- var mbi memoryBasicInformation
+ var mbi windows.MemoryBasicInformation
res := stdcall(_VirtualQuery, uintptr(unsafe.Pointer(&mbi)), uintptr(unsafe.Pointer(&mbi)), unsafe.Sizeof(mbi))
if res == 0 {
print("runtime: VirtualQuery failed; errno=", getlasterror(), "\n")
@@ -875,7 +863,7 @@ func minit() {
// calling C functions that don't have stack checks and for
// lastcontinuehandler. We shouldn't be anywhere near this
// bound anyway.
- base := mbi.allocationBase + 16<<10
+ base := mbi.AllocationBase + 16<<10
// Sanity check the stack bounds.
g0 := getg()
if base > g0.stack.hi || g0.stack.hi-base > 64<<20 {
@@ -1000,7 +988,7 @@ func osyield() {
//go:nosplit
func usleep_no_g(us uint32) {
timeout := uintptr(us) / 1000 // ms units
- stdcall_no_g(_WaitForSingleObject, _INVALID_HANDLE_VALUE, timeout)
+ stdcall_no_g(_WaitForSingleObject, windows.INVALID_HANDLE_VALUE, timeout)
}
//go:nosplit
@@ -1013,9 +1001,9 @@ func usleep(us uint32) {
h = getg().m.highResTimer
dt := -10 * int64(us) // relative sleep (negative), 100ns units
stdcall(_SetWaitableTimer, h, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0)
- timeout = _INFINITE
+ timeout = windows.INFINITE
} else {
- h = _INVALID_HANDLE_VALUE
+ h = windows.INVALID_HANDLE_VALUE
timeout = uintptr(us) / 1000 // ms units
}
stdcall(_WaitForSingleObject, h, timeout)
@@ -1026,16 +1014,16 @@ func ctrlHandler(_type uint32) uintptr {
var s uint32
switch _type {
- case _CTRL_C_EVENT, _CTRL_BREAK_EVENT:
- s = _SIGINT
- case _CTRL_CLOSE_EVENT, _CTRL_LOGOFF_EVENT, _CTRL_SHUTDOWN_EVENT:
- s = _SIGTERM
+ case windows.CTRL_C_EVENT, windows.CTRL_BREAK_EVENT:
+ s = windows.SIGINT
+ case windows.CTRL_CLOSE_EVENT, windows.CTRL_LOGOFF_EVENT, windows.CTRL_SHUTDOWN_EVENT:
+ s = windows.SIGTERM
default:
return 0
}
if sigsend(s) {
- if s == _SIGTERM {
+ if s == windows.SIGTERM {
// Windows terminates the process after this handler returns.
// Block indefinitely to give signal handlers a chance to clean up,
// but make sure to be properly parked first, so the rest of the
@@ -1054,16 +1042,16 @@ var profiletimer uintptr
func profilem(mp *m, thread uintptr) {
// Align Context to 16 bytes.
- var c *context
+ var c *windows.Context
var cbuf [unsafe.Sizeof(*c) + 15]byte
- c = (*context)(unsafe.Pointer((uintptr(unsafe.Pointer(&cbuf[15]))) &^ 15))
+ c = (*windows.Context)(unsafe.Pointer((uintptr(unsafe.Pointer(&cbuf[15]))) &^ 15))
- c.contextflags = _CONTEXT_CONTROL
+ c.ContextFlags = windows.CONTEXT_CONTROL
stdcall(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
- gp := gFromSP(mp, c.sp())
+ gp := gFromSP(mp, c.SP())
- sigprof(c.ip(), c.sp(), c.lr(), gp, mp)
+ sigprof(c.PC(), c.SP(), c.LR(), gp, mp)
}
func gFromSP(mp *m, sp uintptr) *g {
@@ -1080,10 +1068,10 @@ func gFromSP(mp *m, sp uintptr) *g {
}
func profileLoop() {
- stdcall(_SetThreadPriority, currentThread, _THREAD_PRIORITY_HIGHEST)
+ stdcall(_SetThreadPriority, windows.CurrentThread, windows.THREAD_PRIORITY_HIGHEST)
for {
- stdcall(_WaitForSingleObject, profiletimer, _INFINITE)
+ stdcall(_WaitForSingleObject, profiletimer, windows.INFINITE)
first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
for mp := first; mp != nil; mp = mp.alllink {
if mp == getg().m {
@@ -1101,7 +1089,7 @@ func profileLoop() {
}
// Acquire our own handle to the thread.
var thread uintptr
- if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
+ if stdcall(_DuplicateHandle, windows.CurrentProcess, mp.thread, windows.CurrentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, windows.DUPLICATE_SAME_ACCESS) == 0 {
print("runtime: duplicatehandle failed; errno=", getlasterror(), "\n")
throw("duplicatehandle failed")
}
@@ -1183,17 +1171,17 @@ func preemptM(mp *m) {
return
}
var thread uintptr
- if stdcall(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
+ if stdcall(_DuplicateHandle, windows.CurrentProcess, mp.thread, windows.CurrentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, windows.DUPLICATE_SAME_ACCESS) == 0 {
print("runtime.preemptM: duplicatehandle failed; errno=", getlasterror(), "\n")
throw("runtime.preemptM: duplicatehandle failed")
}
unlock(&mp.threadLock)
// Prepare thread context buffer. This must be aligned to 16 bytes.
- var c *context
+ var c *windows.Context
var cbuf [unsafe.Sizeof(*c) + 15]byte
- c = (*context)(unsafe.Pointer((uintptr(unsafe.Pointer(&cbuf[15]))) &^ 15))
- c.contextflags = _CONTEXT_CONTROL
+ c = (*windows.Context)(unsafe.Pointer((uintptr(unsafe.Pointer(&cbuf[15]))) &^ 15))
+ c.ContextFlags = windows.CONTEXT_CONTROL
// Serialize thread suspension. SuspendThread is asynchronous,
// so it's otherwise possible for two threads to suspend each
@@ -1227,12 +1215,12 @@ func preemptM(mp *m) {
unlock(&suspendLock)
// Does it want a preemption and is it safe to preempt?
- gp := gFromSP(mp, c.sp())
+ gp := gFromSP(mp, c.SP())
if gp != nil && wantAsyncPreempt(gp) {
- if ok, resumePC := isAsyncSafePoint(gp, c.ip(), c.sp(), c.lr()); ok {
+ if ok, resumePC := isAsyncSafePoint(gp, c.PC(), c.SP(), c.LR()); ok {
// Inject call to asyncPreempt
targetPC := abi.FuncPCABI0(asyncPreempt)
- c.pushCall(targetPC, resumePC)
+ c.PushCall(targetPC, resumePC)
stdcall(_SetThreadContext, thread, uintptr(unsafe.Pointer(c)))
}
}
diff --git a/src/runtime/os_windows_arm64.go b/src/runtime/os_windows_arm64.go
index bd80c08b0e..62caea7c2c 100644
--- a/src/runtime/os_windows_arm64.go
+++ b/src/runtime/os_windows_arm64.go
@@ -12,3 +12,7 @@ func cputicks() int64 {
stdcall(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
return counter
}
+
+func stackcheck() {
+ // TODO: not implemented
+}
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index 99c5155806..25a2f3b324 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -117,10 +117,6 @@ func TestCPUProfileMultithreadMagnitude(t *testing.T) {
t.Skip("issue 35057 is only confirmed on Linux")
}
- // Linux [5.9,5.16) has a kernel bug that can break CPU timers on newly
- // created threads, breaking our CPU accounting.
- major, minor := unix.KernelVersion()
- t.Logf("Running on Linux %d.%d", major, minor)
defer func() {
if t.Failed() {
t.Logf("Failure of this test may indicate that your system suffers from a known Linux kernel bug fixed on newer kernels. See https://golang.org/issue/49065.")
@@ -131,9 +127,9 @@ func TestCPUProfileMultithreadMagnitude(t *testing.T) {
// it enabled to potentially warn users that they are on a broken
// kernel.
if testenv.Builder() != "" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64") {
- have59 := major > 5 || (major == 5 && minor >= 9)
- have516 := major > 5 || (major == 5 && minor >= 16)
- if have59 && !have516 {
+ // Linux [5.9,5.16) has a kernel bug that can break CPU timers on newly
+ // created threads, breaking our CPU accounting.
+ if unix.KernelVersionGE(5, 9) && !unix.KernelVersionGE(5, 16) {
testenv.SkipFlaky(t, 49065)
}
}
diff --git a/src/runtime/runtime-seh_windows_test.go b/src/runtime/runtime-seh_windows_test.go
index ca92d7f178..e1cd0601bb 100644
--- a/src/runtime/runtime-seh_windows_test.go
+++ b/src/runtime/runtime-seh_windows_test.go
@@ -6,6 +6,7 @@ package runtime_test
import (
"internal/abi"
+ "internal/runtime/sys"
"internal/syscall/windows"
"runtime"
"slices"
@@ -47,7 +48,7 @@ func TestSehLookupFunctionEntry(t *testing.T) {
{"func in prologue", sehf1pc + 1, true},
{"anonymous func with frame", abi.FuncPCABIInternal(fnwithframe), true},
{"anonymous func without frame", abi.FuncPCABIInternal(fnwithoutframe), false},
- {"pc at func body", runtime.NewContextStub().GetPC(), true},
+ {"pc at func body", sys.GetCallerPC(), true},
}
for _, tt := range tests {
var base uintptr
@@ -64,23 +65,32 @@ func TestSehLookupFunctionEntry(t *testing.T) {
}
}
+//go:noinline
+func newCtx() *windows.Context {
+ var ctx windows.Context
+ ctx.SetPC(sys.GetCallerPC())
+ ctx.SetSP(sys.GetCallerSP())
+ ctx.SetFP(runtime.GetCallerFp())
+ return &ctx
+}
+
func sehCallers() []uintptr {
// We don't need a real context,
// RtlVirtualUnwind just needs a context with
// valid a pc, sp and fp (aka bp).
- ctx := runtime.NewContextStub()
+ ctx := newCtx()
pcs := make([]uintptr, 15)
var base, frame uintptr
var n int
for i := 0; i < len(pcs); i++ {
- fn := windows.RtlLookupFunctionEntry(ctx.GetPC(), &base, nil)
+ fn := windows.RtlLookupFunctionEntry(ctx.PC(), &base, nil)
if fn == nil {
break
}
- pcs[i] = ctx.GetPC()
+ pcs[i] = ctx.PC()
n++
- windows.RtlVirtualUnwind(0, base, ctx.GetPC(), fn, unsafe.Pointer(ctx), nil, &frame, nil)
+ windows.RtlVirtualUnwind(0, base, ctx.PC(), fn, unsafe.Pointer(ctx), nil, &frame, nil)
}
return pcs[:n]
}
@@ -116,6 +126,9 @@ func testSehCallersEqual(t *testing.T, pcs []uintptr, want []string) {
// These functions are skipped as they appear inconsistently depending
// whether inlining is on or off.
continue
+ case "runtime_test.sehCallers":
+ // This is an artifact of the implementation of sehCallers.
+ continue
}
got = append(got, name)
}
@@ -129,15 +142,14 @@ func TestSehUnwind(t *testing.T) {
t.Skip("skipping amd64-only test")
}
pcs := sehf3(false)
- testSehCallersEqual(t, pcs, []string{"runtime_test.sehCallers", "runtime_test.sehf4",
- "runtime_test.sehf3", "runtime_test.TestSehUnwind"})
+ testSehCallersEqual(t, pcs, []string{"runtime_test.sehf4", "runtime_test.sehf3", "runtime_test.TestSehUnwind"})
}
func TestSehUnwindPanic(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("skipping amd64-only test")
}
- want := []string{"runtime_test.sehCallers", "runtime_test.TestSehUnwindPanic.func1", "runtime.gopanic",
+ want := []string{"runtime_test.TestSehUnwindPanic.func1", "runtime.gopanic",
"runtime_test.sehf4", "runtime_test.sehf3", "runtime_test.TestSehUnwindPanic"}
defer func() {
if r := recover(); r == nil {
@@ -153,7 +165,7 @@ func TestSehUnwindDoublePanic(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("skipping amd64-only test")
}
- want := []string{"runtime_test.sehCallers", "runtime_test.TestSehUnwindDoublePanic.func1.1", "runtime.gopanic",
+ want := []string{"runtime_test.TestSehUnwindDoublePanic.func1.1", "runtime.gopanic",
"runtime_test.TestSehUnwindDoublePanic.func1", "runtime.gopanic", "runtime_test.TestSehUnwindDoublePanic"}
defer func() {
defer func() {
@@ -175,7 +187,7 @@ func TestSehUnwindNilPointerPanic(t *testing.T) {
if runtime.GOARCH != "amd64" {
t.Skip("skipping amd64-only test")
}
- want := []string{"runtime_test.sehCallers", "runtime_test.TestSehUnwindNilPointerPanic.func1", "runtime.gopanic",
+ want := []string{"runtime_test.TestSehUnwindNilPointerPanic.func1", "runtime.gopanic",
"runtime.sigpanic", "runtime_test.TestSehUnwindNilPointerPanic"}
defer func() {
if r := recover(); r == nil {
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index f7628a0165..40547b8113 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -6,36 +6,29 @@ package runtime
import (
"internal/abi"
+ "internal/runtime/syscall/windows"
"unsafe"
)
-const (
- _SEM_FAILCRITICALERRORS = 0x0001
- _SEM_NOGPFAULTERRORBOX = 0x0002
- _SEM_NOOPENFILEERRORBOX = 0x8000
-
- _WER_FAULT_REPORTING_NO_UI = 0x0020
-)
-
func preventErrorDialogs() {
errormode := stdcall(_GetErrorMode)
- stdcall(_SetErrorMode, errormode|_SEM_FAILCRITICALERRORS|_SEM_NOGPFAULTERRORBOX|_SEM_NOOPENFILEERRORBOX)
+ stdcall(_SetErrorMode, errormode|windows.SEM_FAILCRITICALERRORS|windows.SEM_NOGPFAULTERRORBOX|windows.SEM_NOOPENFILEERRORBOX)
// Disable WER fault reporting UI.
// Do this even if WER is disabled as a whole,
// as WER might be enabled later with setTraceback("wer")
// and we still want the fault reporting UI to be disabled if this happens.
var werflags uintptr
- stdcall(_WerGetFlags, currentProcess, uintptr(unsafe.Pointer(&werflags)))
- stdcall(_WerSetFlags, werflags|_WER_FAULT_REPORTING_NO_UI)
+ stdcall(_WerGetFlags, windows.CurrentProcess, uintptr(unsafe.Pointer(&werflags)))
+ stdcall(_WerSetFlags, werflags|windows.WER_FAULT_REPORTING_NO_UI)
}
// enableWER re-enables Windows error reporting without fault reporting UI.
func enableWER() {
// re-enable Windows Error Reporting
errormode := stdcall(_GetErrorMode)
- if errormode&_SEM_NOGPFAULTERRORBOX != 0 {
- stdcall(_SetErrorMode, errormode^_SEM_NOGPFAULTERRORBOX)
+ if errormode&windows.SEM_NOGPFAULTERRORBOX != 0 {
+ stdcall(_SetErrorMode, errormode^windows.SEM_NOGPFAULTERRORBOX)
}
}
@@ -62,8 +55,8 @@ func initExceptionHandler() {
// by calling runtime.abort function.
//
//go:nosplit
-func isAbort(r *context) bool {
- pc := r.ip()
+func isAbort(r *windows.Context) bool {
+ pc := r.PC()
if GOARCH == "386" || GOARCH == "amd64" {
// In the case of an abort, the exception IP is one byte after
// the INT3 (this differs from UNIX OSes).
@@ -79,29 +72,29 @@ func isAbort(r *context) bool {
// because of a stack overflow.
//
//go:nosplit
-func isgoexception(info *exceptionrecord, r *context) bool {
+func isgoexception(info *windows.ExceptionRecord, r *windows.Context) bool {
// Only handle exception if executing instructions in Go binary
// (not Windows library code).
// TODO(mwhudson): needs to loop to support shared libs
- if r.ip() < firstmoduledata.text || firstmoduledata.etext < r.ip() {
+ if r.PC() < firstmoduledata.text || firstmoduledata.etext < r.PC() {
return false
}
// Go will only handle some exceptions.
- switch info.exceptioncode {
+ switch info.ExceptionCode {
default:
return false
- case _EXCEPTION_ACCESS_VIOLATION:
- case _EXCEPTION_IN_PAGE_ERROR:
- case _EXCEPTION_INT_DIVIDE_BY_ZERO:
- case _EXCEPTION_INT_OVERFLOW:
- case _EXCEPTION_FLT_DENORMAL_OPERAND:
- case _EXCEPTION_FLT_DIVIDE_BY_ZERO:
- case _EXCEPTION_FLT_INEXACT_RESULT:
- case _EXCEPTION_FLT_OVERFLOW:
- case _EXCEPTION_FLT_UNDERFLOW:
- case _EXCEPTION_BREAKPOINT:
- case _EXCEPTION_ILLEGAL_INSTRUCTION: // breakpoint arrives this way on arm64
+ case windows.EXCEPTION_ACCESS_VIOLATION:
+ case windows.EXCEPTION_IN_PAGE_ERROR:
+ case windows.EXCEPTION_INT_DIVIDE_BY_ZERO:
+ case windows.EXCEPTION_INT_OVERFLOW:
+ case windows.EXCEPTION_FLT_DENORMAL_OPERAND:
+ case windows.EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ case windows.EXCEPTION_FLT_INEXACT_RESULT:
+ case windows.EXCEPTION_FLT_OVERFLOW:
+ case windows.EXCEPTION_FLT_UNDERFLOW:
+ case windows.EXCEPTION_BREAKPOINT:
+ case windows.EXCEPTION_ILLEGAL_INSTRUCTION: // breakpoint arrives this way on arm64
}
return true
}
@@ -134,13 +127,13 @@ func sigFetchG() *g {
// It is nosplit for the same reason as exceptionhandler.
//
//go:nosplit
-func sigtrampgo(ep *exceptionpointers, kind int) int32 {
+func sigtrampgo(ep *windows.ExceptionPointers, kind int) int32 {
gp := sigFetchG()
if gp == nil {
- return _EXCEPTION_CONTINUE_SEARCH
+ return windows.EXCEPTION_CONTINUE_SEARCH
}
- var fn func(info *exceptionrecord, r *context, gp *g) int32
+ var fn func(info *windows.ExceptionRecord, r *windows.Context, gp *g) int32
switch kind {
case callbackVEH:
fn = exceptionhandler
@@ -169,12 +162,12 @@ func sigtrampgo(ep *exceptionpointers, kind int) int32 {
var ret int32
if gp != gp.m.g0 {
systemstack(func() {
- ret = fn(ep.record, ep.context, gp)
+ ret = fn(ep.Record, ep.Context, gp)
})
} else {
- ret = fn(ep.record, ep.context, gp)
+ ret = fn(ep.Record, ep.Context, gp)
}
- if ret == _EXCEPTION_CONTINUE_SEARCH {
+ if ret == windows.EXCEPTION_CONTINUE_SEARCH {
return ret
}
@@ -189,13 +182,13 @@ func sigtrampgo(ep *exceptionpointers, kind int) int32 {
// will not actually return to the original frame, so the registers
// are effectively dead. But this does mean we can't use the
// same mechanism for async preemption.
- if ep.context.ip() == abi.FuncPCABI0(sigresume) {
+ if ep.Context.PC() == abi.FuncPCABI0(sigresume) {
// sigresume has already been set up by a previous exception.
return ret
}
- prepareContextForSigResume(ep.context)
- ep.context.set_sp(gp.m.g0.sched.sp)
- ep.context.set_ip(abi.FuncPCABI0(sigresume))
+ prepareContextForSigResume(ep.Context)
+ ep.Context.SetSP(gp.m.g0.sched.sp)
+ ep.Context.SetPC(abi.FuncPCABI0(sigresume))
return ret
}
@@ -207,9 +200,9 @@ func sigtrampgo(ep *exceptionpointers, kind int) int32 {
// _EXCEPTION_BREAKPOINT, which is raised by abort() if we overflow the g0 stack.
//
//go:nosplit
-func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
+func exceptionhandler(info *windows.ExceptionRecord, r *windows.Context, gp *g) int32 {
if !isgoexception(info, r) {
- return _EXCEPTION_CONTINUE_SEARCH
+ return windows.EXCEPTION_CONTINUE_SEARCH
}
if gp.throwsplit || isAbort(r) {
@@ -226,10 +219,10 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
// Have to pass arguments out of band since
// augmenting the stack frame would break
// the unwinding code.
- gp.sig = info.exceptioncode
- gp.sigcode0 = info.exceptioninformation[0]
- gp.sigcode1 = info.exceptioninformation[1]
- gp.sigpc = r.ip()
+ gp.sig = info.ExceptionCode
+ gp.sigcode0 = info.ExceptionInformation[0]
+ gp.sigcode1 = info.ExceptionInformation[1]
+ gp.sigpc = r.PC()
// Only push runtime·sigpanic if r.ip() != 0.
// If r.ip() == 0, probably panicked because of a
@@ -244,13 +237,13 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
// The exception is not from asyncPreempt, so not to push a
// sigpanic call to make it look like that. Instead, just
// overwrite the PC. (See issue #35773)
- if r.ip() != 0 && r.ip() != abi.FuncPCABI0(asyncPreempt) {
- r.pushCall(abi.FuncPCABI0(sigpanic0), r.ip())
+ if r.PC() != 0 && r.PC() != abi.FuncPCABI0(asyncPreempt) {
+ r.PushCall(abi.FuncPCABI0(sigpanic0), r.PC())
} else {
// Not safe to push the call. Just clobber the frame.
- r.set_ip(abi.FuncPCABI0(sigpanic0))
+ r.SetPC(abi.FuncPCABI0(sigpanic0))
}
- return _EXCEPTION_CONTINUE_EXECUTION
+ return windows.EXCEPTION_CONTINUE_EXECUTION
}
// sehhandler is reached as part of the SEH chain.
@@ -258,11 +251,11 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
// It is nosplit for the same reason as exceptionhandler.
//
//go:nosplit
-func sehhandler(_ *exceptionrecord, _ uint64, _ *context, dctxt *_DISPATCHER_CONTEXT) int32 {
+func sehhandler(_ *windows.ExceptionRecord, _ uint64, _ *windows.Context, dctxt *windows.DISPATCHER_CONTEXT) int32 {
g0 := getg()
if g0 == nil || g0.m.curg == nil {
// No g available, nothing to do here.
- return _EXCEPTION_CONTINUE_SEARCH_SEH
+ return windows.EXCEPTION_CONTINUE_SEARCH_SEH
}
// The Windows SEH machinery will unwind the stack until it finds
// a frame with a handler for the exception or until the frame is
@@ -275,19 +268,19 @@ func sehhandler(_ *exceptionrecord, _ uint64, _ *context, dctxt *_DISPATCHER_CON
// To work around this, manually unwind the stack until the top of the goroutine
// stack is reached, and then pass the control back to Windows.
gp := g0.m.curg
- ctxt := dctxt.ctx()
+ ctxt := dctxt.Ctx()
var base, sp uintptr
for {
- entry := stdcall(_RtlLookupFunctionEntry, ctxt.ip(), uintptr(unsafe.Pointer(&base)), 0)
+ entry := stdcall(_RtlLookupFunctionEntry, ctxt.PC(), uintptr(unsafe.Pointer(&base)), 0)
if entry == 0 {
break
}
- stdcall(_RtlVirtualUnwind, 0, base, ctxt.ip(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0)
+ stdcall(_RtlVirtualUnwind, 0, base, ctxt.PC(), entry, uintptr(unsafe.Pointer(ctxt)), 0, uintptr(unsafe.Pointer(&sp)), 0)
if sp < gp.stack.lo || gp.stack.hi <= sp {
break
}
}
- return _EXCEPTION_CONTINUE_SEARCH_SEH
+ return windows.EXCEPTION_CONTINUE_SEARCH_SEH
}
// It seems Windows searches ContinueHandler's list even
@@ -298,11 +291,11 @@ func sehhandler(_ *exceptionrecord, _ uint64, _ *context, dctxt *_DISPATCHER_CON
// It is nosplit for the same reason as exceptionhandler.
//
//go:nosplit
-func firstcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 {
+func firstcontinuehandler(info *windows.ExceptionRecord, r *windows.Context, gp *g) int32 {
if !isgoexception(info, r) {
- return _EXCEPTION_CONTINUE_SEARCH
+ return windows.EXCEPTION_CONTINUE_SEARCH
}
- return _EXCEPTION_CONTINUE_EXECUTION
+ return windows.EXCEPTION_CONTINUE_EXECUTION
}
// lastcontinuehandler is reached, because runtime cannot handle
@@ -311,12 +304,12 @@ func firstcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 {
// It is nosplit for the same reason as exceptionhandler.
//
//go:nosplit
-func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 {
+func lastcontinuehandler(info *windows.ExceptionRecord, r *windows.Context, gp *g) int32 {
if islibrary || isarchive {
// Go DLL/archive has been loaded in a non-go program.
// If the exception does not originate from go, the go runtime
// should not take responsibility of crashing the process.
- return _EXCEPTION_CONTINUE_SEARCH
+ return windows.EXCEPTION_CONTINUE_SEARCH
}
// VEH is called before SEH, but arm64 MSVC DLLs use SEH to trap
@@ -325,9 +318,9 @@ func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 {
// arm64 and it's an illegal instruction and this is coming from
// non-Go code, then assume it's this runtime probing happen, and
// pass that onward to SEH.
- if GOARCH == "arm64" && info.exceptioncode == _EXCEPTION_ILLEGAL_INSTRUCTION &&
- (r.ip() < firstmoduledata.text || firstmoduledata.etext < r.ip()) {
- return _EXCEPTION_CONTINUE_SEARCH
+ if GOARCH == "arm64" && info.ExceptionCode == windows.EXCEPTION_ILLEGAL_INSTRUCTION &&
+ (r.PC() < firstmoduledata.text || firstmoduledata.etext < r.PC()) {
+ return windows.EXCEPTION_CONTINUE_SEARCH
}
winthrow(info, r, gp)
@@ -337,7 +330,7 @@ func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 {
// Always called on g0. gp is the G where the exception occurred.
//
//go:nosplit
-func winthrow(info *exceptionrecord, r *context, gp *g) {
+func winthrow(info *windows.ExceptionRecord, r *windows.Context, gp *g) {
g0 := getg()
if panicking.Load() != 0 { // traceback already printed
@@ -352,9 +345,9 @@ func winthrow(info *exceptionrecord, r *context, gp *g) {
g0.stackguard0 = g0.stack.lo + stackGuard
g0.stackguard1 = g0.stackguard0
- print("Exception ", hex(info.exceptioncode), " ", hex(info.exceptioninformation[0]), " ", hex(info.exceptioninformation[1]), " ", hex(r.ip()), "\n")
+ print("Exception ", hex(info.ExceptionCode), " ", hex(info.ExceptionInformation[0]), " ", hex(info.ExceptionInformation[1]), " ", hex(r.PC()), "\n")
- print("PC=", hex(r.ip()), "\n")
+ print("PC=", hex(r.PC()), "\n")
if g0.m.incgo && gp == g0.m.g0 && g0.m.curg != nil {
if iscgo {
print("signal arrived during external code execution\n")
@@ -368,7 +361,7 @@ func winthrow(info *exceptionrecord, r *context, gp *g) {
level, _, docrash := gotraceback()
if level > 0 {
- tracebacktrap(r.ip(), r.sp(), r.lr(), gp)
+ tracebacktrap(r.PC(), r.SP(), r.LR(), gp)
tracebackothers(gp)
dumpregs(r)
}
@@ -387,7 +380,7 @@ func sigpanic() {
}
switch gp.sig {
- case _EXCEPTION_ACCESS_VIOLATION, _EXCEPTION_IN_PAGE_ERROR:
+ case windows.EXCEPTION_ACCESS_VIOLATION, windows.EXCEPTION_IN_PAGE_ERROR:
if gp.sigcode1 < 0x1000 {
panicmem()
}
@@ -403,15 +396,15 @@ func sigpanic() {
print("unexpected fault address ", hex(gp.sigcode1), "\n")
}
throw("fault")
- case _EXCEPTION_INT_DIVIDE_BY_ZERO:
+ case windows.EXCEPTION_INT_DIVIDE_BY_ZERO:
panicdivide()
- case _EXCEPTION_INT_OVERFLOW:
+ case windows.EXCEPTION_INT_OVERFLOW:
panicoverflow()
- case _EXCEPTION_FLT_DENORMAL_OPERAND,
- _EXCEPTION_FLT_DIVIDE_BY_ZERO,
- _EXCEPTION_FLT_INEXACT_RESULT,
- _EXCEPTION_FLT_OVERFLOW,
- _EXCEPTION_FLT_UNDERFLOW:
+ case windows.EXCEPTION_FLT_DENORMAL_OPERAND,
+ windows.EXCEPTION_FLT_DIVIDE_BY_ZERO,
+ windows.EXCEPTION_FLT_INEXACT_RESULT,
+ windows.EXCEPTION_FLT_OVERFLOW,
+ windows.EXCEPTION_FLT_UNDERFLOW:
panicfloat()
}
throw("fault")
@@ -444,29 +437,28 @@ func crash() {
// This provides the expected exit status for the shell.
//
//go:nosplit
-func dieFromException(info *exceptionrecord, r *context) {
+func dieFromException(info *windows.ExceptionRecord, r *windows.Context) {
if info == nil {
gp := getg()
if gp.sig != 0 {
// Try to reconstruct an exception record from
// the exception information stored in gp.
- info = &exceptionrecord{
- exceptionaddress: gp.sigpc,
- exceptioncode: gp.sig,
- numberparameters: 2,
+ info = &windows.ExceptionRecord{
+ ExceptionAddress: gp.sigpc,
+ ExceptionCode: gp.sig,
+ NumberParameters: 2,
}
- info.exceptioninformation[0] = gp.sigcode0
- info.exceptioninformation[1] = gp.sigcode1
+ info.ExceptionInformation[0] = gp.sigcode0
+ info.ExceptionInformation[1] = gp.sigcode1
} else {
// By default, a failing Go application exits with exit code 2.
// Use this value when gp does not contain exception info.
- info = &exceptionrecord{
- exceptioncode: 2,
+ info = &windows.ExceptionRecord{
+ ExceptionCode: 2,
}
}
}
- const FAIL_FAST_GENERATE_EXCEPTION_ADDRESS = 0x1
- stdcall(_RaiseFailFastException, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(r)), FAIL_FAST_GENERATE_EXCEPTION_ADDRESS)
+ stdcall(_RaiseFailFastException, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(r)), windows.FAIL_FAST_GENERATE_EXCEPTION_ADDRESS)
}
// gsignalStack is unused on Windows.
diff --git a/src/runtime/signal_windows_386.go b/src/runtime/signal_windows_386.go
new file mode 100644
index 0000000000..1c731290b6
--- /dev/null
+++ b/src/runtime/signal_windows_386.go
@@ -0,0 +1,28 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "internal/runtime/syscall/windows"
+
+func prepareContextForSigResume(c *windows.Context) {
+ c.Edx = c.Esp
+ c.Ecx = c.Eip
+}
+
+func dumpregs(r *windows.Context) {
+ print("eax ", hex(r.Eax), "\n")
+ print("ebx ", hex(r.Ebx), "\n")
+ print("ecx ", hex(r.Ecx), "\n")
+ print("edx ", hex(r.Edx), "\n")
+ print("edi ", hex(r.Edi), "\n")
+ print("esi ", hex(r.Esi), "\n")
+ print("ebp ", hex(r.Ebp), "\n")
+ print("esp ", hex(r.Esp), "\n")
+ print("eip ", hex(r.Eip), "\n")
+ print("eflags ", hex(r.EFlags), "\n")
+ print("cs ", hex(r.SegCs), "\n")
+ print("fs ", hex(r.SegFs), "\n")
+ print("gs ", hex(r.SegGs), "\n")
+}
diff --git a/src/runtime/signal_windows_amd64.go b/src/runtime/signal_windows_amd64.go
new file mode 100644
index 0000000000..ecb7024548
--- /dev/null
+++ b/src/runtime/signal_windows_amd64.go
@@ -0,0 +1,36 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "internal/runtime/syscall/windows"
+
+func prepareContextForSigResume(c *windows.Context) {
+ c.R8 = c.Rsp
+ c.R9 = c.Rip
+}
+
+func dumpregs(r *windows.Context) {
+ print("rax ", hex(r.Rax), "\n")
+ print("rbx ", hex(r.Rbx), "\n")
+ print("rcx ", hex(r.Rcx), "\n")
+ print("rdx ", hex(r.Rdx), "\n")
+ print("rdi ", hex(r.Rdi), "\n")
+ print("rsi ", hex(r.Rsi), "\n")
+ print("rbp ", hex(r.Rbp), "\n")
+ print("rsp ", hex(r.Rsp), "\n")
+ print("r8 ", hex(r.R8), "\n")
+ print("r9 ", hex(r.R9), "\n")
+ print("r10 ", hex(r.R10), "\n")
+ print("r11 ", hex(r.R11), "\n")
+ print("r12 ", hex(r.R12), "\n")
+ print("r13 ", hex(r.R13), "\n")
+ print("r14 ", hex(r.R14), "\n")
+ print("r15 ", hex(r.R15), "\n")
+ print("rip ", hex(r.Rip), "\n")
+ print("rflags ", hex(r.EFlags), "\n")
+ print("cs ", hex(r.SegCs), "\n")
+ print("fs ", hex(r.SegFs), "\n")
+ print("gs ", hex(r.SegGs), "\n")
+}
diff --git a/src/runtime/signal_windows_arm64.go b/src/runtime/signal_windows_arm64.go
new file mode 100644
index 0000000000..78bddb9fb3
--- /dev/null
+++ b/src/runtime/signal_windows_arm64.go
@@ -0,0 +1,49 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "internal/runtime/syscall/windows"
+
+func prepareContextForSigResume(c *windows.Context) {
+ c.X[0] = c.XSp
+ c.X[1] = c.Pc
+}
+
+func dumpregs(r *windows.Context) {
+ print("r0 ", hex(r.X[0]), "\n")
+ print("r1 ", hex(r.X[1]), "\n")
+ print("r2 ", hex(r.X[2]), "\n")
+ print("r3 ", hex(r.X[3]), "\n")
+ print("r4 ", hex(r.X[4]), "\n")
+ print("r5 ", hex(r.X[5]), "\n")
+ print("r6 ", hex(r.X[6]), "\n")
+ print("r7 ", hex(r.X[7]), "\n")
+ print("r8 ", hex(r.X[8]), "\n")
+ print("r9 ", hex(r.X[9]), "\n")
+ print("r10 ", hex(r.X[10]), "\n")
+ print("r11 ", hex(r.X[11]), "\n")
+ print("r12 ", hex(r.X[12]), "\n")
+ print("r13 ", hex(r.X[13]), "\n")
+ print("r14 ", hex(r.X[14]), "\n")
+ print("r15 ", hex(r.X[15]), "\n")
+ print("r16 ", hex(r.X[16]), "\n")
+ print("r17 ", hex(r.X[17]), "\n")
+ print("r18 ", hex(r.X[18]), "\n")
+ print("r19 ", hex(r.X[19]), "\n")
+ print("r20 ", hex(r.X[20]), "\n")
+ print("r21 ", hex(r.X[21]), "\n")
+ print("r22 ", hex(r.X[22]), "\n")
+ print("r23 ", hex(r.X[23]), "\n")
+ print("r24 ", hex(r.X[24]), "\n")
+ print("r25 ", hex(r.X[25]), "\n")
+ print("r26 ", hex(r.X[26]), "\n")
+ print("r27 ", hex(r.X[27]), "\n")
+ print("r28 ", hex(r.X[28]), "\n")
+ print("r29 ", hex(r.X[29]), "\n")
+ print("lr ", hex(r.X[30]), "\n")
+ print("sp ", hex(r.XSp), "\n")
+ print("pc ", hex(r.Pc), "\n")
+ print("cpsr ", hex(r.Cpsr), "\n")
+}
diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go
index ad423afc60..aa628021a0 100644
--- a/src/runtime/sys_darwin.go
+++ b/src/runtime/sys_darwin.go
@@ -10,12 +10,27 @@ import (
"unsafe"
)
+//go:nosplit
+func libcError() uintptr {
+ errPtr, _ := syscall(abi.FuncPCABI0(libc_error_trampoline), 0, 0, 0)
+ return errPtr
+}
+func libc_error_trampoline()
+
// The X versions of syscall expect the libc call to return a 64-bit result.
// Otherwise (the non-X version) expects a 32-bit result.
// This distinction is required because an error is indicated by returning -1,
// and we need to know whether to check 32 or 64 bits of the result.
// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.)
+//go:nosplit
+func syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2 uintptr }{fn, a1, a2, a3, r1, r2}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall_trampoline)), unsafe.Pointer(&args))
+ return args.r1, args.r2
+}
+func syscall_trampoline()
+
// golang.org/x/sys linknames syscall_syscall
// (in addition to standard package syscall).
// Do not remove or change the type signature.
@@ -23,24 +38,28 @@ import (
//go:linkname syscall_syscall syscall.syscall
//go:nosplit
func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args))
+ r1, r2, err = syscall_rawSyscall(fn, a1, a2, a3)
exitsyscall()
- return args.r1, args.r2, args.err
+ return r1, r2, err
}
-func syscall()
//go:linkname syscall_syscallX syscall.syscallX
//go:nosplit
func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&args))
+ r1, r2, err = syscall_rawSyscallX(fn, a1, a2, a3)
exitsyscall()
- return args.r1, args.r2, args.err
+ return r1, r2, err
}
-func syscallX()
+
+//go:nosplit
+func syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr) {
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2 uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6_trampoline)), unsafe.Pointer(&args))
+ return args.r1, args.r2
+}
+func syscall6_trampoline()
// golang.org/x/sys linknames syscall.syscall6
// (in addition to standard package syscall).
@@ -56,13 +75,28 @@ func syscallX()
//go:linkname syscall_syscall6 syscall.syscall6
//go:nosplit
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args))
+ r1, r2, err = syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6)
+ exitsyscall()
+ return r1, r2, err
+}
+
+//go:linkname syscall_syscall6X syscall.syscall6X
+//go:nosplit
+func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ r1, r2, err = syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6)
exitsyscall()
- return args.r1, args.r2, args.err
+ return r1, r2, err
}
-func syscall6()
+
+//go:nosplit
+func syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr) {
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, r1, r2 uintptr }{fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, r1, r2}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall9_trampoline)), unsafe.Pointer(&args))
+ return args.r1, args.r2
+}
+func syscall9_trampoline()
// golang.org/x/sys linknames syscall.syscall9
// (in addition to standard package syscall).
@@ -71,24 +105,11 @@ func syscall6()
//go:linkname syscall_syscall9 syscall.syscall9
//go:nosplit
func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, r1, r2, err}
- entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall9)), unsafe.Pointer(&args))
- exitsyscall()
- return args.r1, args.r2, args.err
-}
-func syscall9()
-
-//go:linkname syscall_syscall6X syscall.syscall6X
-//go:nosplit
-func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&args))
+ r1, r2, err = syscall_rawSyscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9)
exitsyscall()
- return args.r1, args.r2, args.err
+ return r1, r2, err
}
-func syscall6X()
// golang.org/x/sys linknames syscall.syscallPtr
// (in addition to standard package syscall).
@@ -97,13 +118,11 @@ func syscall6X()
//go:linkname syscall_syscallPtr syscall.syscallPtr
//go:nosplit
func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallPtr)), unsafe.Pointer(&args))
+ r1, r2, err = syscall_rawSyscallPtr(fn, a1, a2, a3)
exitsyscall()
- return args.r1, args.r2, args.err
+ return r1, r2, err
}
-func syscallPtr()
// golang.org/x/sys linknames syscall_rawSyscall
// (in addition to standard package syscall).
@@ -112,9 +131,30 @@ func syscallPtr()
//go:linkname syscall_rawSyscall syscall.rawSyscall
//go:nosplit
func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args))
- return args.r1, args.r2, args.err
+ r1, r2 = syscall(fn, a1, a2, a3)
+ // Check if r1 low 32 bits is -1, indicating an error.
+ if int32(r1) == -1 {
+ err = libcError()
+ }
+ return r1, r2, err
+}
+
+//go:nosplit
+func syscall_rawSyscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ r1, r2 = syscall(fn, a1, a2, a3)
+ if r1 == ^uintptr(0) {
+ err = libcError()
+ }
+ return r1, r2, err
+}
+
+//go:nosplit
+func syscall_rawSyscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ r1, r2 = syscall(fn, a1, a2, a3)
+ if r1 == 0 {
+ err = libcError()
+ }
+ return r1, r2, err
}
// golang.org/x/sys linknames syscall_rawSyscall6
@@ -124,9 +164,31 @@ func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
//go:nosplit
func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args))
- return args.r1, args.r2, args.err
+ r1, r2 = syscall6(fn, a1, a2, a3, a4, a5, a6)
+ // Check if r1 low 32 bits is -1, indicating an error.
+ if int32(r1) == -1 {
+ err = libcError()
+ }
+ return r1, r2, err
+}
+
+//go:nosplit
+func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ r1, r2 = syscall6(fn, a1, a2, a3, a4, a5, a6)
+ if r1 == ^uintptr(0) {
+ err = libcError()
+ }
+ return r1, r2, err
+}
+
+//go:nosplit
+func syscall_rawSyscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
+ r1, r2 = syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9)
+ // Check if r1 low 32 bits is -1, indicating an error.
+ if int32(r1) == -1 {
+ err = libcError()
+ }
+ return r1, r2, err
}
// crypto_x509_syscall is used in crypto/x509/internal/macos to call into Security.framework and CF.
diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s
index 0091546f20..32aa657274 100644
--- a/src/runtime/sys_darwin_amd64.s
+++ b/src/runtime/sys_darwin_amd64.s
@@ -509,8 +509,8 @@ TEXT runtime·arc4random_buf_trampoline(SB),NOSPLIT,$0
CALL libc_arc4random_buf(SB)
RET
-// syscall calls a function in libc on behalf of the syscall package.
-// syscall takes a pointer to a struct like:
+// syscall_trampoline calls a function in libc on behalf of the syscall package.
+// syscall_trampoline takes a pointer to a struct like:
// struct {
// fn uintptr
// a1 uintptr
@@ -518,14 +518,10 @@ TEXT runtime·arc4random_buf_trampoline(SB),NOSPLIT,$0
// a3 uintptr
// r1 uintptr
// r2 uintptr
-// err uintptr
// }
-// syscall must be called on the g0 stack with the
+// syscall_trampoline must be called on the g0 stack with the
// C calling convention (use libcCall).
-//
-// syscall expects a 32-bit result and tests for 32-bit -1
-// to decide there was an error.
-TEXT runtime·syscall(SB),NOSPLIT,$16
+TEXT runtime·syscall_trampoline(SB),NOSPLIT,$16
MOVQ (0*8)(DI), CX // fn
MOVQ (2*8)(DI), SI // a2
MOVQ (3*8)(DI), DX // a3
@@ -539,99 +535,11 @@ TEXT runtime·syscall(SB),NOSPLIT,$16
MOVQ AX, (4*8)(DI) // r1
MOVQ DX, (5*8)(DI) // r2
- // Standard libc functions return -1 on error
- // and set errno.
- CMPL AX, $-1 // Note: high 32 bits are junk
- JNE ok
-
- // Get error code from libc.
- CALL libc_error(SB)
- MOVLQSX (AX), AX
- MOVQ (SP), DI
- MOVQ AX, (6*8)(DI) // err
-
-ok:
XORL AX, AX // no error (it's ignored anyway)
RET
-// syscallX calls a function in libc on behalf of the syscall package.
-// syscallX takes a pointer to a struct like:
-// struct {
-// fn uintptr
-// a1 uintptr
-// a2 uintptr
-// a3 uintptr
-// r1 uintptr
-// r2 uintptr
-// err uintptr
-// }
-// syscallX must be called on the g0 stack with the
-// C calling convention (use libcCall).
-//
-// syscallX is like syscall but expects a 64-bit result
-// and tests for 64-bit -1 to decide there was an error.
-TEXT runtime·syscallX(SB),NOSPLIT,$16
- MOVQ (0*8)(DI), CX // fn
- MOVQ (2*8)(DI), SI // a2
- MOVQ (3*8)(DI), DX // a3
- MOVQ DI, (SP)
- MOVQ (1*8)(DI), DI // a1
- XORL AX, AX // vararg: say "no float args"
-
- CALL CX
-
- MOVQ (SP), DI
- MOVQ AX, (4*8)(DI) // r1
- MOVQ DX, (5*8)(DI) // r2
-
- // Standard libc functions return -1 on error
- // and set errno.
- CMPQ AX, $-1
- JNE ok
-
- // Get error code from libc.
- CALL libc_error(SB)
- MOVLQSX (AX), AX
- MOVQ (SP), DI
- MOVQ AX, (6*8)(DI) // err
-
-ok:
- XORL AX, AX // no error (it's ignored anyway)
- RET
-
-// syscallPtr is like syscallX except that the libc function reports an
-// error by returning NULL and setting errno.
-TEXT runtime·syscallPtr(SB),NOSPLIT,$16
- MOVQ (0*8)(DI), CX // fn
- MOVQ (2*8)(DI), SI // a2
- MOVQ (3*8)(DI), DX // a3
- MOVQ DI, (SP)
- MOVQ (1*8)(DI), DI // a1
- XORL AX, AX // vararg: say "no float args"
-
- CALL CX
-
- MOVQ (SP), DI
- MOVQ AX, (4*8)(DI) // r1
- MOVQ DX, (5*8)(DI) // r2
-
- // syscallPtr libc functions return NULL on error
- // and set errno.
- TESTQ AX, AX
- JNE ok
-
- // Get error code from libc.
- CALL libc_error(SB)
- MOVLQSX (AX), AX
- MOVQ (SP), DI
- MOVQ AX, (6*8)(DI) // err
-
-ok:
- XORL AX, AX // no error (it's ignored anyway)
- RET
-
-// syscall6 calls a function in libc on behalf of the syscall package.
-// syscall6 takes a pointer to a struct like:
+// syscall6_trampoline calls a function in libc on behalf of the syscall package.
+// syscall6_trampoline takes a pointer to a struct like:
// struct {
// fn uintptr
// a1 uintptr
@@ -642,14 +550,10 @@ ok:
// a6 uintptr
// r1 uintptr
// r2 uintptr
-// err uintptr
// }
-// syscall6 must be called on the g0 stack with the
+// syscall6_trampoline must be called on the g0 stack with the
// C calling convention (use libcCall).
-//
-// syscall6 expects a 32-bit result and tests for 32-bit -1
-// to decide there was an error.
-TEXT runtime·syscall6(SB),NOSPLIT,$16
+TEXT runtime·syscall6_trampoline(SB),NOSPLIT,$16
MOVQ (0*8)(DI), R11// fn
MOVQ (2*8)(DI), SI // a2
MOVQ (3*8)(DI), DX // a3
@@ -666,68 +570,11 @@ TEXT runtime·syscall6(SB),NOSPLIT,$16
MOVQ AX, (7*8)(DI) // r1
MOVQ DX, (8*8)(DI) // r2
- CMPL AX, $-1
- JNE ok
-
- CALL libc_error(SB)
- MOVLQSX (AX), AX
- MOVQ (SP), DI
- MOVQ AX, (9*8)(DI) // err
-
-ok:
XORL AX, AX // no error (it's ignored anyway)
RET
-// syscall6X calls a function in libc on behalf of the syscall package.
-// syscall6X takes a pointer to a struct like:
-// struct {
-// fn uintptr
-// a1 uintptr
-// a2 uintptr
-// a3 uintptr
-// a4 uintptr
-// a5 uintptr
-// a6 uintptr
-// r1 uintptr
-// r2 uintptr
-// err uintptr
-// }
-// syscall6X must be called on the g0 stack with the
-// C calling convention (use libcCall).
-//
-// syscall6X is like syscall6 but expects a 64-bit result
-// and tests for 64-bit -1 to decide there was an error.
-TEXT runtime·syscall6X(SB),NOSPLIT,$16
- MOVQ (0*8)(DI), R11// fn
- MOVQ (2*8)(DI), SI // a2
- MOVQ (3*8)(DI), DX // a3
- MOVQ (4*8)(DI), CX // a4
- MOVQ (5*8)(DI), R8 // a5
- MOVQ (6*8)(DI), R9 // a6
- MOVQ DI, (SP)
- MOVQ (1*8)(DI), DI // a1
- XORL AX, AX // vararg: say "no float args"
-
- CALL R11
-
- MOVQ (SP), DI
- MOVQ AX, (7*8)(DI) // r1
- MOVQ DX, (8*8)(DI) // r2
-
- CMPQ AX, $-1
- JNE ok
-
- CALL libc_error(SB)
- MOVLQSX (AX), AX
- MOVQ (SP), DI
- MOVQ AX, (9*8)(DI) // err
-
-ok:
- XORL AX, AX // no error (it's ignored anyway)
- RET
-
-// syscall9 calls a function in libc on behalf of the syscall package.
-// syscall9 takes a pointer to a struct like:
+// syscall9_trampoline calls a function in libc on behalf of the syscall package.
+// syscall9_trampoline takes a pointer to a struct like:
// struct {
// fn uintptr
// a1 uintptr
@@ -743,12 +590,9 @@ ok:
// r2 uintptr
// err uintptr
// }
-// syscall9 must be called on the g0 stack with the
+// syscall9_trampoline must be called on the g0 stack with the
// C calling convention (use libcCall).
-//
-// syscall9 expects a 32-bit result and tests for 32-bit -1
-// to decide there was an error.
-TEXT runtime·syscall9(SB),NOSPLIT,$32
+TEXT runtime·syscall9_trampoline(SB),NOSPLIT,$32
MOVQ (0*8)(DI), R13// fn
MOVQ (2*8)(DI), SI // a2
MOVQ (3*8)(DI), DX // a3
@@ -771,16 +615,12 @@ TEXT runtime·syscall9(SB),NOSPLIT,$32
MOVQ AX, (10*8)(DI) // r1
MOVQ DX, (11*8)(DI) // r2
- CMPL AX, $-1
- JNE ok
+ XORL AX, AX // no error (it's ignored anyway)
+ RET
+TEXT runtime·libc_error_trampoline(SB),NOSPLIT,$0
CALL libc_error(SB)
MOVLQSX (AX), AX
- MOVQ 24(SP), DI
- MOVQ AX, (12*8)(DI) // err
-
-ok:
- XORL AX, AX // no error (it's ignored anyway)
RET
// syscall_x509 is for crypto/x509. It is like syscall6 but does not check for errors,
diff --git a/src/runtime/sys_darwin_arm64.s b/src/runtime/sys_darwin_arm64.s
index 788fdf87b7..adbb2adafe 100644
--- a/src/runtime/sys_darwin_arm64.s
+++ b/src/runtime/sys_darwin_arm64.s
@@ -481,8 +481,8 @@ TEXT runtime·arc4random_buf_trampoline(SB),NOSPLIT,$0
BL libc_arc4random_buf(SB)
RET
-// syscall calls a function in libc on behalf of the syscall package.
-// syscall takes a pointer to a struct like:
+// syscall_trampoline calls a function in libc on behalf of the syscall package.
+// syscall_trampoline takes a pointer to a struct like:
// struct {
// fn uintptr
// a1 uintptr
@@ -490,12 +490,11 @@ TEXT runtime·arc4random_buf_trampoline(SB),NOSPLIT,$0
// a3 uintptr
// r1 uintptr
// r2 uintptr
-// err uintptr
// }
-// syscall must be called on the g0 stack with the
+// syscall_trampoline must be called on the g0 stack with the
// C calling convention (use libcCall).
-TEXT runtime·syscall(SB),NOSPLIT,$0
- SUB $16, RSP // push structure pointer
+TEXT runtime·syscall_trampoline(SB),NOSPLIT,$0
+ SUB $16, RSP // push structure pointer
MOVD R0, 8(RSP)
MOVD 0(R0), R12 // fn
@@ -517,87 +516,10 @@ TEXT runtime·syscall(SB),NOSPLIT,$0
ADD $16, RSP
MOVD R0, 32(R2) // save r1
MOVD R1, 40(R2) // save r2
- CMPW $-1, R0
- BNE ok
- SUB $16, RSP // push structure pointer
- MOVD R2, 8(RSP)
- BL libc_error(SB)
- MOVW (R0), R0
- MOVD 8(RSP), R2 // pop structure pointer
- ADD $16, RSP
- MOVD R0, 48(R2) // save err
-ok:
- RET
-
-// syscallX calls a function in libc on behalf of the syscall package.
-// syscallX takes a pointer to a struct like:
-// struct {
-// fn uintptr
-// a1 uintptr
-// a2 uintptr
-// a3 uintptr
-// r1 uintptr
-// r2 uintptr
-// err uintptr
-// }
-// syscallX must be called on the g0 stack with the
-// C calling convention (use libcCall).
-TEXT runtime·syscallX(SB),NOSPLIT,$0
- SUB $16, RSP // push structure pointer
- MOVD R0, (RSP)
-
- MOVD 0(R0), R12 // fn
- MOVD 16(R0), R1 // a2
- MOVD 24(R0), R2 // a3
- MOVD 8(R0), R0 // a1
- BL (R12)
-
- MOVD (RSP), R2 // pop structure pointer
- ADD $16, RSP
- MOVD R0, 32(R2) // save r1
- MOVD R1, 40(R2) // save r2
- CMP $-1, R0
- BNE ok
- SUB $16, RSP // push structure pointer
- MOVD R2, (RSP)
- BL libc_error(SB)
- MOVW (R0), R0
- MOVD (RSP), R2 // pop structure pointer
- ADD $16, RSP
- MOVD R0, 48(R2) // save err
-ok:
- RET
-
-// syscallPtr is like syscallX except that the libc function reports an
-// error by returning NULL and setting errno.
-TEXT runtime·syscallPtr(SB),NOSPLIT,$0
- SUB $16, RSP // push structure pointer
- MOVD R0, (RSP)
-
- MOVD 0(R0), R12 // fn
- MOVD 16(R0), R1 // a2
- MOVD 24(R0), R2 // a3
- MOVD 8(R0), R0 // a1
- BL (R12)
-
- MOVD (RSP), R2 // pop structure pointer
- ADD $16, RSP
- MOVD R0, 32(R2) // save r1
- MOVD R1, 40(R2) // save r2
- CMP $0, R0
- BNE ok
- SUB $16, RSP // push structure pointer
- MOVD R2, (RSP)
- BL libc_error(SB)
- MOVW (R0), R0
- MOVD (RSP), R2 // pop structure pointer
- ADD $16, RSP
- MOVD R0, 48(R2) // save err
-ok:
RET
-// syscall6 calls a function in libc on behalf of the syscall package.
-// syscall6 takes a pointer to a struct like:
+// syscall6_trampoline calls a function in libc on behalf of the syscall package.
+// syscall6_trampoline takes a pointer to a struct like:
// struct {
// fn uintptr
// a1 uintptr
@@ -608,11 +530,10 @@ ok:
// a6 uintptr
// r1 uintptr
// r2 uintptr
-// err uintptr
// }
-// syscall6 must be called on the g0 stack with the
+// syscall6_trampoline must be called on the g0 stack with the
// C calling convention (use libcCall).
-TEXT runtime·syscall6(SB),NOSPLIT,$0
+TEXT runtime·syscall6_trampoline(SB),NOSPLIT,$0
SUB $16, RSP // push structure pointer
MOVD R0, 8(RSP)
@@ -625,7 +546,7 @@ TEXT runtime·syscall6(SB),NOSPLIT,$0
MOVD 8(R0), R0 // a1
// If fn is declared as vararg, we have to pass the vararg arguments on the stack.
- // See syscall above. The only function this applies to is openat, for which the 4th
+ // See syscall_trampoline above. The only function this applies to is openat, for which the 4th
// arg must be on the stack.
MOVD R3, (RSP)
@@ -635,65 +556,10 @@ TEXT runtime·syscall6(SB),NOSPLIT,$0
ADD $16, RSP
MOVD R0, 56(R2) // save r1
MOVD R1, 64(R2) // save r2
- CMPW $-1, R0
- BNE ok
- SUB $16, RSP // push structure pointer
- MOVD R2, 8(RSP)
- BL libc_error(SB)
- MOVW (R0), R0
- MOVD 8(RSP), R2 // pop structure pointer
- ADD $16, RSP
- MOVD R0, 72(R2) // save err
-ok:
- RET
-
-// syscall6X calls a function in libc on behalf of the syscall package.
-// syscall6X takes a pointer to a struct like:
-// struct {
-// fn uintptr
-// a1 uintptr
-// a2 uintptr
-// a3 uintptr
-// a4 uintptr
-// a5 uintptr
-// a6 uintptr
-// r1 uintptr
-// r2 uintptr
-// err uintptr
-// }
-// syscall6X must be called on the g0 stack with the
-// C calling convention (use libcCall).
-TEXT runtime·syscall6X(SB),NOSPLIT,$0
- SUB $16, RSP // push structure pointer
- MOVD R0, (RSP)
-
- MOVD 0(R0), R12 // fn
- MOVD 16(R0), R1 // a2
- MOVD 24(R0), R2 // a3
- MOVD 32(R0), R3 // a4
- MOVD 40(R0), R4 // a5
- MOVD 48(R0), R5 // a6
- MOVD 8(R0), R0 // a1
- BL (R12)
-
- MOVD (RSP), R2 // pop structure pointer
- ADD $16, RSP
- MOVD R0, 56(R2) // save r1
- MOVD R1, 64(R2) // save r2
- CMP $-1, R0
- BNE ok
- SUB $16, RSP // push structure pointer
- MOVD R2, (RSP)
- BL libc_error(SB)
- MOVW (R0), R0
- MOVD (RSP), R2 // pop structure pointer
- ADD $16, RSP
- MOVD R0, 72(R2) // save err
-ok:
RET
-// syscall9 calls a function in libc on behalf of the syscall package.
-// syscall9 takes a pointer to a struct like:
+// syscall9_trampoline calls a function in libc on behalf of the syscall package.
+// syscall9_trampoline takes a pointer to a struct like:
// struct {
// fn uintptr
// a1 uintptr
@@ -707,11 +573,10 @@ ok:
// a9 uintptr
// r1 uintptr
// r2 uintptr
-// err uintptr
// }
-// syscall9 must be called on the g0 stack with the
+// syscall9_trampoline must be called on the g0 stack with the
// C calling convention (use libcCall).
-TEXT runtime·syscall9(SB),NOSPLIT,$0
+TEXT runtime·syscall9_trampoline(SB),NOSPLIT,$0
SUB $16, RSP // push structure pointer
MOVD R0, 8(RSP)
@@ -724,29 +589,20 @@ TEXT runtime·syscall9(SB),NOSPLIT,$0
MOVD 56(R0), R6 // a7
MOVD 64(R0), R7 // a8
MOVD 72(R0), R8 // a9
+ MOVD R8, 0(RSP) // the 9th arg and onwards must be passed on the stack
MOVD 8(R0), R0 // a1
- // If fn is declared as vararg, we have to pass the vararg arguments on the stack.
- // See syscall above. The only function this applies to is openat, for which the 4th
- // arg must be on the stack.
- MOVD R3, (RSP)
-
BL (R12)
MOVD 8(RSP), R2 // pop structure pointer
ADD $16, RSP
MOVD R0, 80(R2) // save r1
MOVD R1, 88(R2) // save r2
- CMPW $-1, R0
- BNE ok
- SUB $16, RSP // push structure pointer
- MOVD R2, 8(RSP)
+ RET
+
+TEXT runtime·libc_error_trampoline(SB),NOSPLIT,$0
BL libc_error(SB)
MOVW (R0), R0
- MOVD 8(RSP), R2 // pop structure pointer
- ADD $16, RSP
- MOVD R0, 96(R2) // save err
-ok:
RET
// syscall_x509 is for crypto/x509. It is like syscall6 but does not check for errors,
diff --git a/src/runtime/syscall_test.go b/src/runtime/syscall_test.go
new file mode 100644
index 0000000000..18f3e8e315
--- /dev/null
+++ b/src/runtime/syscall_test.go
@@ -0,0 +1,28 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "internal/testenv"
+ "runtime"
+ "testing"
+)
+
+func TestSyscallArgs(t *testing.T) {
+ if runtime.GOOS != "darwin" {
+ t.Skipf("skipping test: GOARCH=%s", runtime.GOARCH)
+ }
+ testenv.MustHaveCGO(t)
+
+ exe, err := buildTestProg(t, "testsyscall")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cmd := testenv.Command(t, exe)
+ if out, err := cmd.CombinedOutput(); err != nil {
+ t.Fatalf("test program failed: %v\n%s", err, out)
+ }
+}
diff --git a/src/runtime/testdata/testsyscall/testsyscall.go b/src/runtime/testdata/testsyscall/testsyscall.go
new file mode 100644
index 0000000000..23cca16494
--- /dev/null
+++ b/src/runtime/testdata/testsyscall/testsyscall.go
@@ -0,0 +1,65 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ _ "runtime/testdata/testsyscall/testsyscallc" // unfortunately, we can't put C and assembly in the package
+ _ "unsafe" // for go:linkname
+)
+
+//go:linkname syscall_syscall syscall.syscall
+func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+
+//go:linkname syscall_syscall6 syscall.syscall6
+func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+
+//go:linkname syscall_syscall9 syscall.syscall9
+func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr)
+
+var (
+ syscall_check0_trampoline_addr uintptr
+ syscall_check1_trampoline_addr uintptr
+ syscall_check2_trampoline_addr uintptr
+ syscall_check3_trampoline_addr uintptr
+ syscall_check4_trampoline_addr uintptr
+ syscall_check5_trampoline_addr uintptr
+ syscall_check6_trampoline_addr uintptr
+ syscall_check7_trampoline_addr uintptr
+ syscall_check8_trampoline_addr uintptr
+ syscall_check9_trampoline_addr uintptr
+)
+
+func main() {
+ if ret, _, _ := syscall_syscall(syscall_check0_trampoline_addr, 0, 0, 0); ret != 1 {
+ panic("hello0")
+ }
+ if ret, _, _ := syscall_syscall(syscall_check1_trampoline_addr, 1, 0, 0); ret != 1 {
+ panic("hello1")
+ }
+ if ret, _, _ := syscall_syscall(syscall_check2_trampoline_addr, 1, 2, 0); ret != 1 {
+ panic("hello2")
+ }
+ if ret, _, _ := syscall_syscall(syscall_check3_trampoline_addr, 1, 2, 3); ret != 1 {
+ panic("hello3")
+ }
+ if ret, _, _ := syscall_syscall6(syscall_check4_trampoline_addr, 1, 2, 3, 4, 0, 0); ret != 1 {
+ panic("hello4")
+ }
+ if ret, _, _ := syscall_syscall6(syscall_check5_trampoline_addr, 1, 2, 3, 4, 5, 0); ret != 1 {
+ panic("hello5")
+ }
+ if ret, _, _ := syscall_syscall6(syscall_check6_trampoline_addr, 1, 2, 3, 4, 5, 6); ret != 1 {
+ panic("hello6")
+ }
+ if ret, _, _ := syscall_syscall9(syscall_check7_trampoline_addr, 1, 2, 3, 4, 5, 6, 7, 0, 0); ret != 1 {
+ panic("hello7")
+ }
+ if ret, _, _ := syscall_syscall9(syscall_check8_trampoline_addr, 1, 2, 3, 4, 5, 6, 7, 8, 0); ret != 1 {
+ panic("hello8")
+ }
+ if ret, _, _ := syscall_syscall9(syscall_check9_trampoline_addr, 1, 2, 3, 4, 5, 6, 7, 8, 9); ret != 1 {
+ panic("hello9")
+ }
+}
diff --git a/src/runtime/testdata/testsyscall/testsyscall.s b/src/runtime/testdata/testsyscall/testsyscall.s
new file mode 100644
index 0000000000..c8d556dfd9
--- /dev/null
+++ b/src/runtime/testdata/testsyscall/testsyscall.s
@@ -0,0 +1,55 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT syscall_check0_trampoline<>(SB),NOSPLIT,$0-0
+ JMP syscall_check0(SB)
+GLOBL ·syscall_check0_trampoline_addr(SB), RODATA, $8
+DATA ·syscall_check0_trampoline_addr(SB)/8, $syscall_check0_trampoline<>(SB)
+
+TEXT syscall_check1_trampoline<>(SB),NOSPLIT,$0-0
+ JMP syscall_check1(SB)
+GLOBL ·syscall_check1_trampoline_addr(SB), RODATA, $8
+DATA ·syscall_check1_trampoline_addr(SB)/8, $syscall_check1_trampoline<>(SB)
+
+TEXT syscall_check2_trampoline<>(SB),NOSPLIT,$0-0
+ JMP syscall_check2(SB)
+GLOBL ·syscall_check2_trampoline_addr(SB), RODATA, $8
+DATA ·syscall_check2_trampoline_addr(SB)/8, $syscall_check2_trampoline<>(SB)
+
+TEXT syscall_check3_trampoline<>(SB),NOSPLIT,$0-0
+ JMP syscall_check3(SB)
+GLOBL ·syscall_check3_trampoline_addr(SB), RODATA, $8
+DATA ·syscall_check3_trampoline_addr(SB)/8, $syscall_check3_trampoline<>(SB)
+
+TEXT syscall_check4_trampoline<>(SB),NOSPLIT,$0-0
+ JMP syscall_check4(SB)
+GLOBL ·syscall_check4_trampoline_addr(SB), RODATA, $8
+DATA ·syscall_check4_trampoline_addr(SB)/8, $syscall_check4_trampoline<>(SB)
+
+TEXT syscall_check5_trampoline<>(SB),NOSPLIT,$0-0
+ JMP syscall_check5(SB)
+GLOBL ·syscall_check5_trampoline_addr(SB), RODATA, $8
+DATA ·syscall_check5_trampoline_addr(SB)/8, $syscall_check5_trampoline<>(SB)
+
+TEXT syscall_check6_trampoline<>(SB),NOSPLIT,$0-0
+ JMP syscall_check6(SB)
+GLOBL ·syscall_check6_trampoline_addr(SB), RODATA, $8
+DATA ·syscall_check6_trampoline_addr(SB)/8, $syscall_check6_trampoline<>(SB)
+
+TEXT syscall_check7_trampoline<>(SB),NOSPLIT,$0-0
+ JMP syscall_check7(SB)
+GLOBL ·syscall_check7_trampoline_addr(SB), RODATA, $8
+DATA ·syscall_check7_trampoline_addr(SB)/8, $syscall_check7_trampoline<>(SB)
+
+TEXT syscall_check8_trampoline<>(SB),NOSPLIT,$0-0
+ JMP syscall_check8(SB)
+GLOBL ·syscall_check8_trampoline_addr(SB), RODATA, $8
+DATA ·syscall_check8_trampoline_addr(SB)/8, $syscall_check8_trampoline<>(SB)
+
+TEXT syscall_check9_trampoline<>(SB),NOSPLIT,$0-0
+ JMP syscall_check9(SB)
+GLOBL ·syscall_check9_trampoline_addr(SB), RODATA, $8
+DATA ·syscall_check9_trampoline_addr(SB)/8, $syscall_check9_trampoline<>(SB)
diff --git a/src/runtime/testdata/testsyscall/testsyscallc/testsyscallc.go b/src/runtime/testdata/testsyscall/testsyscallc/testsyscallc.go
new file mode 100644
index 0000000000..0b2a220b59
--- /dev/null
+++ b/src/runtime/testdata/testsyscall/testsyscallc/testsyscallc.go
@@ -0,0 +1,48 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testsyscallc
+
+/*
+int syscall_check0(void) {
+ return 1;
+}
+
+int syscall_check1(int a1) {
+ return a1 == 1;
+}
+
+int syscall_check2(int a1, int a2) {
+ return a1 == 1 && a2 == 2;
+}
+
+int syscall_check3(int a1, int a2, int a3) {
+ return a1 == 1 && a2 == 2 && a3 == 3;
+}
+
+int syscall_check4(int a1, int a2, int a3, int a4) {
+ return a1 == 1 && a2 == 2 && a3 == 3 && a4 == 4;
+}
+
+int syscall_check5(int a1, int a2, int a3, int a4, int a5) {
+ return a1 == 1 && a2 == 2 && a3 == 3 && a4 == 4 && a5 == 5;
+}
+
+int syscall_check6(int a1, int a2, int a3, int a4, int a5, int a6) {
+ return a1 == 1 && a2 == 2 && a3 == 3 && a4 == 4 && a5 == 5 && a6 == 6;
+}
+
+int syscall_check7(int a1, int a2, int a3, int a4, int a5, int a6, int a7) {
+ return a1 == 1 && a2 == 2 && a3 == 3 && a4 == 4 && a5 == 5 && a6 == 6 && a7 == 7;
+}
+
+int syscall_check8(int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8) {
+ return a1 == 1 && a2 == 2 && a3 == 3 && a4 == 4 && a5 == 5 && a6 == 6 && a7 == 7 && a8 == 8;
+}
+
+int syscall_check9(int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8, int a9) {
+ return a1 == 1 && a2 == 2 && a3 == 3 && a4 == 4 && a5 == 5 && a6 == 6 && a7 == 7 && a8 == 8 && a9 == 9;
+}
+*/
+import "C"