aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/asm_386.s33
-rw-r--r--src/runtime/crash_cgo_test.go14
-rw-r--r--src/runtime/crash_unix_test.go2
-rw-r--r--src/runtime/debug/mod.go157
-rw-r--r--src/runtime/debug/mod_test.go75
-rw-r--r--src/runtime/defs1_netbsd_386.go1
-rw-r--r--src/runtime/defs1_netbsd_amd64.go1
-rw-r--r--src/runtime/defs1_netbsd_arm.go1
-rw-r--r--src/runtime/defs1_netbsd_arm64.go1
-rw-r--r--src/runtime/defs1_solaris_amd64.go1
-rw-r--r--src/runtime/defs_dragonfly.go1
-rw-r--r--src/runtime/defs_dragonfly_amd64.go1
-rw-r--r--src/runtime/defs_freebsd.go1
-rw-r--r--src/runtime/defs_freebsd_386.go1
-rw-r--r--src/runtime/defs_freebsd_amd64.go1
-rw-r--r--src/runtime/defs_freebsd_arm.go1
-rw-r--r--src/runtime/defs_freebsd_arm64.go1
-rw-r--r--src/runtime/defs_linux.go3
-rw-r--r--src/runtime/defs_linux_386.go3
-rw-r--r--src/runtime/defs_linux_amd64.go3
-rw-r--r--src/runtime/defs_linux_arm.go2
-rw-r--r--src/runtime/defs_linux_arm64.go3
-rw-r--r--src/runtime/defs_linux_mips64x.go3
-rw-r--r--src/runtime/defs_linux_mipsx.go3
-rw-r--r--src/runtime/defs_linux_ppc64.go3
-rw-r--r--src/runtime/defs_linux_ppc64le.go3
-rw-r--r--src/runtime/defs_linux_riscv64.go3
-rw-r--r--src/runtime/defs_linux_s390x.go3
-rw-r--r--src/runtime/defs_netbsd.go1
-rw-r--r--src/runtime/defs_openbsd.go1
-rw-r--r--src/runtime/defs_openbsd_386.go1
-rw-r--r--src/runtime/defs_openbsd_amd64.go1
-rw-r--r--src/runtime/defs_openbsd_arm.go1
-rw-r--r--src/runtime/defs_openbsd_arm64.go1
-rw-r--r--src/runtime/defs_openbsd_mips64.go1
-rw-r--r--src/runtime/defs_solaris.go1
-rw-r--r--src/runtime/export_aix_test.go1
-rw-r--r--src/runtime/export_darwin_test.go2
-rw-r--r--src/runtime/export_pipe2_test.go6
-rw-r--r--src/runtime/export_test.go20
-rw-r--r--src/runtime/export_unix_test.go1
-rw-r--r--src/runtime/funcdata.h1
-rw-r--r--src/runtime/histogram.go52
-rw-r--r--src/runtime/histogram_test.go40
-rw-r--r--src/runtime/internal/atomic/atomic_arm.s42
-rw-r--r--src/runtime/internal/syscall/asm_linux_386.s34
-rw-r--r--src/runtime/internal/syscall/asm_linux_amd64.s33
-rw-r--r--src/runtime/internal/syscall/asm_linux_arm.s32
-rw-r--r--src/runtime/internal/syscall/asm_linux_arm64.s29
-rw-r--r--src/runtime/internal/syscall/asm_linux_mips64x.s29
-rw-r--r--src/runtime/internal/syscall/asm_linux_mipsx.s34
-rw-r--r--src/runtime/internal/syscall/asm_linux_ppc64x.s28
-rw-r--r--src/runtime/internal/syscall/asm_linux_riscv64.s29
-rw-r--r--src/runtime/internal/syscall/asm_linux_s390x.s28
-rw-r--r--src/runtime/internal/syscall/syscall_linux.go12
-rw-r--r--src/runtime/memmove_ppc64x.s46
-rw-r--r--src/runtime/mfinal.go24
-rw-r--r--src/runtime/mfinal_test.go9
-rw-r--r--src/runtime/mgcpacer.go72
-rw-r--r--src/runtime/mgcpacer_test.go45
-rw-r--r--src/runtime/mgcscavenge.go58
-rw-r--r--src/runtime/mkpreempt.go41
-rw-r--r--src/runtime/nbpipe_pipe2.go13
-rw-r--r--src/runtime/nbpipe_pipe_test.go38
-rw-r--r--src/runtime/nbpipe_test.go25
-rw-r--r--src/runtime/os3_solaris.go22
-rw-r--r--src/runtime/os_aix.go9
-rw-r--r--src/runtime/os_darwin.go9
-rw-r--r--src/runtime/os_dragonfly.go11
-rw-r--r--src/runtime/os_freebsd.go11
-rw-r--r--src/runtime/os_linux.go214
-rw-r--r--src/runtime/os_netbsd.go11
-rw-r--r--src/runtime/os_openbsd.go9
-rw-r--r--src/runtime/os_openbsd_syscall2.go2
-rw-r--r--src/runtime/pprof/pprof_test.go67
-rw-r--r--src/runtime/preempt_arm64.s178
-rw-r--r--src/runtime/proc.go304
-rw-r--r--src/runtime/proc_runtime_test.go17
-rw-r--r--src/runtime/race_arm64.s18
-rw-r--r--src/runtime/runtime-gdb_test.go8
-rw-r--r--src/runtime/runtime2.go19
-rw-r--r--src/runtime/signal_unix.go16
-rw-r--r--src/runtime/sigqueue.go48
-rw-r--r--src/runtime/sigqueue_plan9.go7
-rw-r--r--src/runtime/symtab.go1
-rw-r--r--src/runtime/sys_darwin.go56
-rw-r--r--src/runtime/sys_darwin_amd64.s9
-rw-r--r--src/runtime/sys_darwin_arm64.s9
-rw-r--r--src/runtime/sys_dragonfly_amd64.s30
-rw-r--r--src/runtime/sys_freebsd_386.s32
-rw-r--r--src/runtime/sys_freebsd_amd64.s30
-rw-r--r--src/runtime/sys_freebsd_arm.s32
-rw-r--r--src/runtime/sys_freebsd_arm64.s26
-rw-r--r--src/runtime/sys_linux_386.s24
-rw-r--r--src/runtime/sys_linux_amd64.s24
-rw-r--r--src/runtime/sys_linux_arm.s23
-rw-r--r--src/runtime/sys_linux_arm64.s168
-rw-r--r--src/runtime/sys_linux_mips64x.s26
-rw-r--r--src/runtime/sys_linux_mipsx.s33
-rw-r--r--src/runtime/sys_linux_ppc64x.s20
-rw-r--r--src/runtime/sys_linux_riscv64.s24
-rw-r--r--src/runtime/sys_linux_s390x.s24
-rw-r--r--src/runtime/sys_netbsd_386.s32
-rw-r--r--src/runtime/sys_netbsd_amd64.s30
-rw-r--r--src/runtime/sys_netbsd_arm.s28
-rw-r--r--src/runtime/sys_netbsd_arm64.s24
-rw-r--r--src/runtime/sys_openbsd2.go10
-rw-r--r--src/runtime/sys_openbsd_mips64.s26
-rw-r--r--src/runtime/testdata/testprogcgo/aprof.go2
-rw-r--r--src/runtime/trace.go18
-rw-r--r--src/runtime/traceback.go6
111 files changed, 1593 insertions, 1311 deletions
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index 594cd5ed0d..e16880c950 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -937,8 +937,9 @@ aes0to15:
PAND masks<>(SB)(BX*8), X1
final1:
- AESENC X0, X1 // scramble input, xor in seed
- AESENC X1, X1 // scramble combo 2 times
+ PXOR X0, X1 // xor data with seed
+ AESENC X1, X1 // scramble combo 3 times
+ AESENC X1, X1
AESENC X1, X1
MOVL X1, (DX)
RET
@@ -971,9 +972,13 @@ aes17to32:
MOVOU (AX), X2
MOVOU -16(AX)(BX*1), X3
+ // xor with seed
+ PXOR X0, X2
+ PXOR X1, X3
+
// scramble 3 times
- AESENC X0, X2
- AESENC X1, X3
+ AESENC X2, X2
+ AESENC X3, X3
AESENC X2, X2
AESENC X3, X3
AESENC X2, X2
@@ -1000,10 +1005,15 @@ aes33to64:
MOVOU -32(AX)(BX*1), X6
MOVOU -16(AX)(BX*1), X7
- AESENC X0, X4
- AESENC X1, X5
- AESENC X2, X6
- AESENC X3, X7
+ PXOR X0, X4
+ PXOR X1, X5
+ PXOR X2, X6
+ PXOR X3, X7
+
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
AESENC X4, X4
AESENC X5, X5
@@ -1069,7 +1079,12 @@ aesloop:
DECL BX
JNE aesloop
- // 2 more scrambles to finish
+ // 3 more scrambles to finish
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
AESENC X4, X4
AESENC X5, X5
AESENC X6, X6
diff --git a/src/runtime/crash_cgo_test.go b/src/runtime/crash_cgo_test.go
index 8c250f72d6..37509b1292 100644
--- a/src/runtime/crash_cgo_test.go
+++ b/src/runtime/crash_cgo_test.go
@@ -234,6 +234,7 @@ func TestCgoCrashTraceback(t *testing.T) {
switch platform := runtime.GOOS + "/" + runtime.GOARCH; platform {
case "darwin/amd64":
case "linux/amd64":
+ case "linux/arm64":
case "linux/ppc64le":
default:
t.Skipf("not yet supported on %s", platform)
@@ -251,6 +252,7 @@ func TestCgoCrashTracebackGo(t *testing.T) {
switch platform := runtime.GOOS + "/" + runtime.GOARCH; platform {
case "darwin/amd64":
case "linux/amd64":
+ case "linux/arm64":
case "linux/ppc64le":
default:
t.Skipf("not yet supported on %s", platform)
@@ -284,7 +286,7 @@ func TestCgoTracebackContextPreemption(t *testing.T) {
func testCgoPprof(t *testing.T, buildArg, runArg, top, bottom string) {
t.Parallel()
- if runtime.GOOS != "linux" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "ppc64le") {
+ if runtime.GOOS != "linux" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "ppc64le" && runtime.GOARCH != "arm64") {
t.Skipf("not yet supported on %s/%s", runtime.GOOS, runtime.GOARCH)
}
testenv.MustHaveGoRun(t)
@@ -626,13 +628,11 @@ func TestSegv(t *testing.T) {
// a VDSO call via asmcgocall.
testenv.SkipFlaky(t, 50504)
}
- if testenv.Builder() == "linux-mips64le-mengzhuo" && strings.Contains(got, "runtime: unknown pc") {
- // Runtime sometimes throw "unknown pc" when generating the traceback.
- // Curiously, that doesn't seem to happen on the linux-mips64le-rtrk
- // builder.
- testenv.SkipFlaky(t, 50605)
- }
}
+ if test == "SegvInCgo" && strings.Contains(got, "runtime: unknown pc") {
+ testenv.SkipFlaky(t, 50979)
+ }
+
nowant := "runtime: "
if strings.Contains(got, nowant) {
t.Errorf("unexpectedly saw %q in output", nowant)
diff --git a/src/runtime/crash_unix_test.go b/src/runtime/crash_unix_test.go
index 1eb10f9b60..a218205af4 100644
--- a/src/runtime/crash_unix_test.go
+++ b/src/runtime/crash_unix_test.go
@@ -132,7 +132,7 @@ func TestCrashDumpsAllThreads(t *testing.T) {
out := outbuf.Bytes()
n := bytes.Count(out, []byte("main.crashDumpsAllThreadsLoop("))
if n != 4 {
- t.Errorf("found %d instances of main.loop; expected 4", n)
+ t.Errorf("found %d instances of main.crashDumpsAllThreadsLoop; expected 4", n)
t.Logf("%s", out)
}
}
diff --git a/src/runtime/debug/mod.go b/src/runtime/debug/mod.go
index 14a496a8eb..688e2581ed 100644
--- a/src/runtime/debug/mod.go
+++ b/src/runtime/debug/mod.go
@@ -5,9 +5,9 @@
package debug
import (
- "bytes"
"fmt"
"runtime"
+ "strconv"
"strings"
)
@@ -23,8 +23,8 @@ func ReadBuildInfo() (info *BuildInfo, ok bool) {
return nil, false
}
data = data[16 : len(data)-16]
- bi := &BuildInfo{}
- if err := bi.UnmarshalText([]byte(data)); err != nil {
+ bi, err := ParseBuildInfo(data)
+ if err != nil {
return nil, false
}
@@ -63,8 +63,18 @@ type BuildSetting struct {
Key, Value string
}
-func (bi *BuildInfo) MarshalText() ([]byte, error) {
- buf := &bytes.Buffer{}
+// quoteKey reports whether key is required to be quoted.
+func quoteKey(key string) bool {
+ return len(key) == 0 || strings.ContainsAny(key, "= \t\r\n\"`")
+}
+
+// quoteValue reports whether value is required to be quoted.
+func quoteValue(value string) bool {
+ return strings.ContainsAny(value, " \t\r\n\"`")
+}
+
+func (bi *BuildInfo) String() string {
+ buf := new(strings.Builder)
if bi.GoVersion != "" {
fmt.Fprintf(buf, "go\t%s\n", bi.GoVersion)
}
@@ -76,12 +86,8 @@ func (bi *BuildInfo) MarshalText() ([]byte, error) {
buf.WriteString(word)
buf.WriteByte('\t')
buf.WriteString(m.Path)
- mv := m.Version
- if mv == "" {
- mv = "(devel)"
- }
buf.WriteByte('\t')
- buf.WriteString(mv)
+ buf.WriteString(m.Version)
if m.Replace == nil {
buf.WriteByte('\t')
buf.WriteString(m.Sum)
@@ -91,27 +97,28 @@ func (bi *BuildInfo) MarshalText() ([]byte, error) {
}
buf.WriteByte('\n')
}
- if bi.Main.Path != "" {
+ if bi.Main != (Module{}) {
formatMod("mod", bi.Main)
}
for _, dep := range bi.Deps {
formatMod("dep", *dep)
}
for _, s := range bi.Settings {
- if strings.ContainsAny(s.Key, "= \t\n") {
- return nil, fmt.Errorf("invalid build setting key %q", s.Key)
+ key := s.Key
+ if quoteKey(key) {
+ key = strconv.Quote(key)
}
- if strings.Contains(s.Value, "\n") {
- return nil, fmt.Errorf("invalid build setting value for key %q: contains newline", s.Value)
+ value := s.Value
+ if quoteValue(value) {
+ value = strconv.Quote(value)
}
- fmt.Fprintf(buf, "build\t%s=%s\n", s.Key, s.Value)
+ fmt.Fprintf(buf, "build\t%s=%s\n", key, value)
}
- return buf.Bytes(), nil
+ return buf.String()
}
-func (bi *BuildInfo) UnmarshalText(data []byte) (err error) {
- *bi = BuildInfo{}
+func ParseBuildInfo(data string) (bi *BuildInfo, err error) {
lineNum := 1
defer func() {
if err != nil {
@@ -120,67 +127,69 @@ func (bi *BuildInfo) UnmarshalText(data []byte) (err error) {
}()
var (
- pathLine = []byte("path\t")
- modLine = []byte("mod\t")
- depLine = []byte("dep\t")
- repLine = []byte("=>\t")
- buildLine = []byte("build\t")
- newline = []byte("\n")
- tab = []byte("\t")
+ pathLine = "path\t"
+ modLine = "mod\t"
+ depLine = "dep\t"
+ repLine = "=>\t"
+ buildLine = "build\t"
+ newline = "\n"
+ tab = "\t"
)
- readModuleLine := func(elem [][]byte) (Module, error) {
+ readModuleLine := func(elem []string) (Module, error) {
if len(elem) != 2 && len(elem) != 3 {
return Module{}, fmt.Errorf("expected 2 or 3 columns; got %d", len(elem))
}
+ version := elem[1]
sum := ""
if len(elem) == 3 {
- sum = string(elem[2])
+ sum = elem[2]
}
return Module{
- Path: string(elem[0]),
- Version: string(elem[1]),
+ Path: elem[0],
+ Version: version,
Sum: sum,
}, nil
}
+ bi = new(BuildInfo)
var (
last *Module
- line []byte
+ line string
ok bool
)
// Reverse of BuildInfo.String(), except for go version.
for len(data) > 0 {
- line, data, ok = bytes.Cut(data, newline)
+ line, data, ok = strings.Cut(data, newline)
if !ok {
break
}
switch {
- case bytes.HasPrefix(line, pathLine):
+ case strings.HasPrefix(line, pathLine):
elem := line[len(pathLine):]
bi.Path = string(elem)
- case bytes.HasPrefix(line, modLine):
- elem := bytes.Split(line[len(modLine):], tab)
+ case strings.HasPrefix(line, modLine):
+ elem := strings.Split(line[len(modLine):], tab)
last = &bi.Main
*last, err = readModuleLine(elem)
if err != nil {
- return err
+ return nil, err
}
- case bytes.HasPrefix(line, depLine):
- elem := bytes.Split(line[len(depLine):], tab)
+ case strings.HasPrefix(line, depLine):
+ elem := strings.Split(line[len(depLine):], tab)
last = new(Module)
bi.Deps = append(bi.Deps, last)
*last, err = readModuleLine(elem)
if err != nil {
- return err
+ return nil, err
}
- case bytes.HasPrefix(line, repLine):
- elem := bytes.Split(line[len(repLine):], tab)
+ case strings.HasPrefix(line, repLine):
+ elem := strings.Split(line[len(repLine):], tab)
if len(elem) != 3 {
- return fmt.Errorf("expected 3 columns for replacement; got %d", len(elem))
+ return nil, fmt.Errorf("expected 3 columns for replacement; got %d", len(elem))
}
if last == nil {
- return fmt.Errorf("replacement with no module on previous line")
+ return nil, fmt.Errorf("replacement with no module on previous line")
}
last.Replace = &Module{
Path: string(elem[0]),
@@ -188,17 +197,63 @@ func (bi *BuildInfo) UnmarshalText(data []byte) (err error) {
Sum: string(elem[2]),
}
last = nil
- case bytes.HasPrefix(line, buildLine):
- key, val, ok := strings.Cut(string(line[len(buildLine):]), "=")
- if !ok {
- return fmt.Errorf("invalid build line")
+ case strings.HasPrefix(line, buildLine):
+ kv := line[len(buildLine):]
+ if len(kv) < 1 {
+ return nil, fmt.Errorf("build line missing '='")
+ }
+
+ var key, rawValue string
+ switch kv[0] {
+ case '=':
+ return nil, fmt.Errorf("build line with missing key")
+
+ case '`', '"':
+ rawKey, err := strconv.QuotedPrefix(kv)
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted key in build line")
+ }
+ if len(kv) == len(rawKey) {
+ return nil, fmt.Errorf("build line missing '=' after quoted key")
+ }
+ if c := kv[len(rawKey)]; c != '=' {
+ return nil, fmt.Errorf("unexpected character after quoted key: %q", c)
+ }
+ key, _ = strconv.Unquote(rawKey)
+ rawValue = kv[len(rawKey)+1:]
+
+ default:
+ var ok bool
+ key, rawValue, ok = strings.Cut(kv, "=")
+ if !ok {
+ return nil, fmt.Errorf("build line missing '=' after key")
+ }
+ if quoteKey(key) {
+ return nil, fmt.Errorf("unquoted key %q must be quoted", key)
+ }
}
- if key == "" {
- return fmt.Errorf("empty key")
+
+ var value string
+ if len(rawValue) > 0 {
+ switch rawValue[0] {
+ case '`', '"':
+ var err error
+ value, err = strconv.Unquote(rawValue)
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted value in build line")
+ }
+
+ default:
+ value = rawValue
+ if quoteValue(value) {
+ return nil, fmt.Errorf("unquoted value %q must be quoted", value)
+ }
+ }
}
- bi.Settings = append(bi.Settings, BuildSetting{Key: key, Value: val})
+
+ bi.Settings = append(bi.Settings, BuildSetting{Key: key, Value: value})
}
lineNum++
}
- return nil
+ return bi, nil
}
diff --git a/src/runtime/debug/mod_test.go b/src/runtime/debug/mod_test.go
new file mode 100644
index 0000000000..b2917692f4
--- /dev/null
+++ b/src/runtime/debug/mod_test.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug_test
+
+import (
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "testing"
+)
+
+// strip removes two leading tabs after each newline of s.
+func strip(s string) string {
+ replaced := strings.ReplaceAll(s, "\n\t\t", "\n")
+ if len(replaced) > 0 && replaced[0] == '\n' {
+ replaced = replaced[1:]
+ }
+ return replaced
+}
+
+func FuzzParseBuildInfoRoundTrip(f *testing.F) {
+ // Package built from outside a module, missing some fields..
+ f.Add(strip(`
+ path rsc.io/fortune
+ mod rsc.io/fortune v1.0.0
+ `))
+
+ // Package built from the standard library, missing some fields..
+ f.Add(`path cmd/test2json`)
+
+ // Package built from inside a module.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ mod example.com/m (devel)
+ build -compiler=gc
+ `))
+
+ // Package built in GOPATH mode.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ build -compiler=gc
+ `))
+
+ // Escaped build info.
+ f.Add(strip(`
+ go 1.18
+ path example.com/m
+ build CRAZY_ENV="requires\nescaping"
+ `))
+
+ f.Fuzz(func(t *testing.T, s string) {
+ bi, err := debug.ParseBuildInfo(s)
+ if err != nil {
+ // Not a round-trippable BuildInfo string.
+ t.Log(err)
+ return
+ }
+
+ // s2 could have different escaping from s.
+ // However, it should parse to exactly the same contents.
+ s2 := bi.String()
+ bi2, err := debug.ParseBuildInfo(s2)
+ if err != nil {
+ t.Fatalf("%v:\n%s", err, s2)
+ }
+
+ if !reflect.DeepEqual(bi2, bi) {
+ t.Fatalf("Parsed representation differs.\ninput:\n%s\noutput:\n%s", s, s2)
+ }
+ })
+}
diff --git a/src/runtime/defs1_netbsd_386.go b/src/runtime/defs1_netbsd_386.go
index a4548e6f06..b6e47a008d 100644
--- a/src/runtime/defs1_netbsd_386.go
+++ b/src/runtime/defs1_netbsd_386.go
@@ -7,7 +7,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x400000
diff --git a/src/runtime/defs1_netbsd_amd64.go b/src/runtime/defs1_netbsd_amd64.go
index 4b0e79ebb6..b8292fa3cc 100644
--- a/src/runtime/defs1_netbsd_amd64.go
+++ b/src/runtime/defs1_netbsd_amd64.go
@@ -7,7 +7,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x400000
diff --git a/src/runtime/defs1_netbsd_arm.go b/src/runtime/defs1_netbsd_arm.go
index 2b5d5990d3..d2cb4865b6 100644
--- a/src/runtime/defs1_netbsd_arm.go
+++ b/src/runtime/defs1_netbsd_arm.go
@@ -7,7 +7,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x400000
diff --git a/src/runtime/defs1_netbsd_arm64.go b/src/runtime/defs1_netbsd_arm64.go
index 740dc77658..7776fe1d99 100644
--- a/src/runtime/defs1_netbsd_arm64.go
+++ b/src/runtime/defs1_netbsd_arm64.go
@@ -7,7 +7,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x400000
diff --git a/src/runtime/defs1_solaris_amd64.go b/src/runtime/defs1_solaris_amd64.go
index 19e8a2512e..3c13f33331 100644
--- a/src/runtime/defs1_solaris_amd64.go
+++ b/src/runtime/defs1_solaris_amd64.go
@@ -13,7 +13,6 @@ const (
_ETIMEDOUT = 0x91
_EWOULDBLOCK = 0xb
_EINPROGRESS = 0x96
- _ENOSYS = 0x59
_PROT_NONE = 0x0
_PROT_READ = 0x1
diff --git a/src/runtime/defs_dragonfly.go b/src/runtime/defs_dragonfly.go
index 47a2e4d123..952163b555 100644
--- a/src/runtime/defs_dragonfly.go
+++ b/src/runtime/defs_dragonfly.go
@@ -31,7 +31,6 @@ const (
EFAULT = C.EFAULT
EBUSY = C.EBUSY
EAGAIN = C.EAGAIN
- ENOSYS = C.ENOSYS
O_NONBLOCK = C.O_NONBLOCK
O_CLOEXEC = C.O_CLOEXEC
diff --git a/src/runtime/defs_dragonfly_amd64.go b/src/runtime/defs_dragonfly_amd64.go
index f3c6ecd04b..4358c1e0c2 100644
--- a/src/runtime/defs_dragonfly_amd64.go
+++ b/src/runtime/defs_dragonfly_amd64.go
@@ -10,7 +10,6 @@ const (
_EFAULT = 0xe
_EBUSY = 0x10
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x20000
diff --git a/src/runtime/defs_freebsd.go b/src/runtime/defs_freebsd.go
index 9ba97c8459..3fbd580ac5 100644
--- a/src/runtime/defs_freebsd.go
+++ b/src/runtime/defs_freebsd.go
@@ -48,7 +48,6 @@ const (
EINTR = C.EINTR
EFAULT = C.EFAULT
EAGAIN = C.EAGAIN
- ENOSYS = C.ENOSYS
O_NONBLOCK = C.O_NONBLOCK
O_CLOEXEC = C.O_CLOEXEC
diff --git a/src/runtime/defs_freebsd_386.go b/src/runtime/defs_freebsd_386.go
index f822934d58..ff4dcfa5fe 100644
--- a/src/runtime/defs_freebsd_386.go
+++ b/src/runtime/defs_freebsd_386.go
@@ -16,7 +16,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_freebsd_amd64.go b/src/runtime/defs_freebsd_amd64.go
index 0b696cf227..f537c898e4 100644
--- a/src/runtime/defs_freebsd_amd64.go
+++ b/src/runtime/defs_freebsd_amd64.go
@@ -16,7 +16,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_freebsd_arm.go b/src/runtime/defs_freebsd_arm.go
index b6f3e790cf..2e20ae7d78 100644
--- a/src/runtime/defs_freebsd_arm.go
+++ b/src/runtime/defs_freebsd_arm.go
@@ -16,7 +16,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_freebsd_arm64.go b/src/runtime/defs_freebsd_arm64.go
index 0759a1238f..1838108fdb 100644
--- a/src/runtime/defs_freebsd_arm64.go
+++ b/src/runtime/defs_freebsd_arm64.go
@@ -16,7 +16,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
diff --git a/src/runtime/defs_linux.go b/src/runtime/defs_linux.go
index fa94e388f4..e55bb6bbbc 100644
--- a/src/runtime/defs_linux.go
+++ b/src/runtime/defs_linux.go
@@ -37,7 +37,6 @@ const (
EINTR = C.EINTR
EAGAIN = C.EAGAIN
ENOMEM = C.ENOMEM
- ENOSYS = C.ENOSYS
PROT_NONE = C.PROT_NONE
PROT_READ = C.PROT_READ
@@ -91,6 +90,8 @@ const (
SIGPWR = C.SIGPWR
SIGSYS = C.SIGSYS
+ SIGRTMIN = C.SIGRTMIN
+
FPE_INTDIV = C.FPE_INTDIV
FPE_INTOVF = C.FPE_INTOVF
FPE_FLTDIV = C.FPE_FLTDIV
diff --git a/src/runtime/defs_linux_386.go b/src/runtime/defs_linux_386.go
index 24fb58bbf8..5376bded2b 100644
--- a/src/runtime/defs_linux_386.go
+++ b/src/runtime/defs_linux_386.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -64,6 +63,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_amd64.go b/src/runtime/defs_linux_amd64.go
index 36da22f8ce..da4d357532 100644
--- a/src/runtime/defs_linux_amd64.go
+++ b/src/runtime/defs_linux_amd64.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -64,6 +63,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_arm.go b/src/runtime/defs_linux_arm.go
index 13d06969e3..18aa0931e5 100644
--- a/src/runtime/defs_linux_arm.go
+++ b/src/runtime/defs_linux_arm.go
@@ -11,7 +11,6 @@ const (
_EINTR = 0x4
_ENOMEM = 0xc
_EAGAIN = 0xb
- _ENOSYS = 0x26
_PROT_NONE = 0
_PROT_READ = 0x1
@@ -63,6 +62,7 @@ const (
_SIGIO = 0x1d
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_arm64.go b/src/runtime/defs_linux_arm64.go
index f9ee9cbc35..c5d7d7e3fd 100644
--- a/src/runtime/defs_linux_arm64.go
+++ b/src/runtime/defs_linux_arm64.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -64,6 +63,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_mips64x.go b/src/runtime/defs_linux_mips64x.go
index 2601082ee1..e645248131 100644
--- a/src/runtime/defs_linux_mips64x.go
+++ b/src/runtime/defs_linux_mips64x.go
@@ -12,7 +12,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x59
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -66,6 +65,8 @@ const (
_SIGXCPU = 0x1e
_SIGXFSZ = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_mipsx.go b/src/runtime/defs_linux_mipsx.go
index 37651ef7e4..5afb6f423f 100644
--- a/src/runtime/defs_linux_mipsx.go
+++ b/src/runtime/defs_linux_mipsx.go
@@ -12,7 +12,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x59
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -66,6 +65,8 @@ const (
_SIGXCPU = 0x1e
_SIGXFSZ = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_ppc64.go b/src/runtime/defs_linux_ppc64.go
index c7aa7234c1..f3e305e34e 100644
--- a/src/runtime/defs_linux_ppc64.go
+++ b/src/runtime/defs_linux_ppc64.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -63,6 +62,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_ppc64le.go b/src/runtime/defs_linux_ppc64le.go
index c7aa7234c1..f3e305e34e 100644
--- a/src/runtime/defs_linux_ppc64le.go
+++ b/src/runtime/defs_linux_ppc64le.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -63,6 +62,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_riscv64.go b/src/runtime/defs_linux_riscv64.go
index 747e26bc4b..29496acdcb 100644
--- a/src/runtime/defs_linux_riscv64.go
+++ b/src/runtime/defs_linux_riscv64.go
@@ -10,7 +10,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -65,6 +64,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_linux_s390x.go b/src/runtime/defs_linux_s390x.go
index 740d8100c5..817a29ed30 100644
--- a/src/runtime/defs_linux_s390x.go
+++ b/src/runtime/defs_linux_s390x.go
@@ -10,7 +10,6 @@ const (
_EINTR = 0x4
_EAGAIN = 0xb
_ENOMEM = 0xc
- _ENOSYS = 0x26
_PROT_NONE = 0x0
_PROT_READ = 0x1
@@ -64,6 +63,8 @@ const (
_SIGPWR = 0x1e
_SIGSYS = 0x1f
+ _SIGRTMIN = 0x20
+
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
diff --git a/src/runtime/defs_netbsd.go b/src/runtime/defs_netbsd.go
index df8bc579f2..6b084c06b5 100644
--- a/src/runtime/defs_netbsd.go
+++ b/src/runtime/defs_netbsd.go
@@ -33,7 +33,6 @@ const (
EINTR = C.EINTR
EFAULT = C.EFAULT
EAGAIN = C.EAGAIN
- ENOSYS = C.ENOSYS
O_NONBLOCK = C.O_NONBLOCK
O_CLOEXEC = C.O_CLOEXEC
diff --git a/src/runtime/defs_openbsd.go b/src/runtime/defs_openbsd.go
index ec7d82a33c..cbf53eb9ef 100644
--- a/src/runtime/defs_openbsd.go
+++ b/src/runtime/defs_openbsd.go
@@ -34,7 +34,6 @@ const (
EINTR = C.EINTR
EFAULT = C.EFAULT
EAGAIN = C.EAGAIN
- ENOSYS = C.ENOSYS
O_NONBLOCK = C.O_NONBLOCK
O_CLOEXEC = C.O_CLOEXEC
diff --git a/src/runtime/defs_openbsd_386.go b/src/runtime/defs_openbsd_386.go
index a866ec880a..35c559bb45 100644
--- a/src/runtime/defs_openbsd_386.go
+++ b/src/runtime/defs_openbsd_386.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x10000
diff --git a/src/runtime/defs_openbsd_amd64.go b/src/runtime/defs_openbsd_amd64.go
index 46f1245201..d7432daedd 100644
--- a/src/runtime/defs_openbsd_amd64.go
+++ b/src/runtime/defs_openbsd_amd64.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x10000
diff --git a/src/runtime/defs_openbsd_arm.go b/src/runtime/defs_openbsd_arm.go
index 6f128c4284..471b3063fb 100644
--- a/src/runtime/defs_openbsd_arm.go
+++ b/src/runtime/defs_openbsd_arm.go
@@ -9,7 +9,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x10000
diff --git a/src/runtime/defs_openbsd_arm64.go b/src/runtime/defs_openbsd_arm64.go
index d2b947feb2..5300ab087c 100644
--- a/src/runtime/defs_openbsd_arm64.go
+++ b/src/runtime/defs_openbsd_arm64.go
@@ -10,7 +10,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x10000
diff --git a/src/runtime/defs_openbsd_mips64.go b/src/runtime/defs_openbsd_mips64.go
index 28d70b7a01..a8789ef451 100644
--- a/src/runtime/defs_openbsd_mips64.go
+++ b/src/runtime/defs_openbsd_mips64.go
@@ -16,7 +16,6 @@ const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
- _ENOSYS = 0x4e
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x10000
diff --git a/src/runtime/defs_solaris.go b/src/runtime/defs_solaris.go
index ec16c9dcce..f626498525 100644
--- a/src/runtime/defs_solaris.go
+++ b/src/runtime/defs_solaris.go
@@ -43,7 +43,6 @@ const (
ETIMEDOUT = C.ETIMEDOUT
EWOULDBLOCK = C.EWOULDBLOCK
EINPROGRESS = C.EINPROGRESS
- ENOSYS = C.ENOSYS
PROT_NONE = C.PROT_NONE
PROT_READ = C.PROT_READ
diff --git a/src/runtime/export_aix_test.go b/src/runtime/export_aix_test.go
index 162552d04c..51df951738 100644
--- a/src/runtime/export_aix_test.go
+++ b/src/runtime/export_aix_test.go
@@ -5,3 +5,4 @@
package runtime
var Fcntl = syscall_fcntl1
+var SetNonblock = setNonblock
diff --git a/src/runtime/export_darwin_test.go b/src/runtime/export_darwin_test.go
index e9b6eb36da..66e2c02c4f 100644
--- a/src/runtime/export_darwin_test.go
+++ b/src/runtime/export_darwin_test.go
@@ -11,3 +11,5 @@ func Fcntl(fd, cmd, arg uintptr) (uintptr, uintptr) {
}
return uintptr(r), 0
}
+
+var SetNonblock = setNonblock
diff --git a/src/runtime/export_pipe2_test.go b/src/runtime/export_pipe2_test.go
index bdf39c60df..8d49009b43 100644
--- a/src/runtime/export_pipe2_test.go
+++ b/src/runtime/export_pipe2_test.go
@@ -7,9 +7,5 @@
package runtime
func Pipe() (r, w int32, errno int32) {
- r, w, errno = pipe2(0)
- if errno == _ENOSYS {
- return pipe()
- }
- return r, w, errno
+ return pipe2(0)
}
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 0f21838721..0ac15ce82c 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -1199,6 +1199,8 @@ func (th *TimeHistogram) Record(duration int64) {
(*timeHistogram)(th).record(duration)
}
+var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
+
func SetIntArgRegs(a int) int {
lock(&finlock)
old := intArgRegs
@@ -1330,3 +1332,21 @@ func Releasem() {
}
var Timediv = timediv
+
+type PIController struct {
+ piController
+}
+
+func NewPIController(kp, ti, tt, min, max float64) *PIController {
+ return &PIController{piController{
+ kp: kp,
+ ti: ti,
+ tt: tt,
+ min: min,
+ max: max,
+ }}
+}
+
+func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
+ return c.piController.next(input, setpoint, period)
+}
diff --git a/src/runtime/export_unix_test.go b/src/runtime/export_unix_test.go
index 9f046b95e0..4a587cb780 100644
--- a/src/runtime/export_unix_test.go
+++ b/src/runtime/export_unix_test.go
@@ -9,7 +9,6 @@ package runtime
import "unsafe"
var NonblockingPipe = nonblockingPipe
-var SetNonblock = setNonblock
var Closeonexec = closeonexec
func sigismember(mask *sigset, i int) bool {
diff --git a/src/runtime/funcdata.h b/src/runtime/funcdata.h
index a454dcaa69..2e2bb30446 100644
--- a/src/runtime/funcdata.h
+++ b/src/runtime/funcdata.h
@@ -20,6 +20,7 @@
#define FUNCDATA_OpenCodedDeferInfo 4 /* info for func with open-coded defers */
#define FUNCDATA_ArgInfo 5
#define FUNCDATA_ArgLiveInfo 6
+#define FUNCDATA_WrapInfo 7
// Pseudo-assembly statements.
diff --git a/src/runtime/histogram.go b/src/runtime/histogram.go
index 0cccbcca16..cd7e29a8c8 100644
--- a/src/runtime/histogram.go
+++ b/src/runtime/histogram.go
@@ -47,7 +47,7 @@ const (
// │ └---- Next 4 bits -> sub-bucket 1
// └------- Bit 5 set -> super-bucket 2
//
- // Following this pattern, bucket 45 will have the bit 48 set. We don't
+ // Following this pattern, super-bucket 44 will have the bit 47 set. We don't
// have any buckets for higher values, so the highest sub-bucket will
// contain values of 2^48-1 nanoseconds or approx. 3 days. This range is
// more than enough to handle durations produced by the runtime.
@@ -139,36 +139,30 @@ func float64NegInf() float64 {
func timeHistogramMetricsBuckets() []float64 {
b := make([]float64, timeHistTotalBuckets+1)
b[0] = float64NegInf()
- for i := 0; i < timeHistNumSuperBuckets; i++ {
- superBucketMin := uint64(0)
- // The (inclusive) minimum for the first non-negative bucket is 0.
- if i > 0 {
- // The minimum for the second bucket will be
- // 1 << timeHistSubBucketBits, indicating that all
- // sub-buckets are represented by the next timeHistSubBucketBits
- // bits.
- // Thereafter, we shift up by 1 each time, so we can represent
- // this pattern as (i-1)+timeHistSubBucketBits.
- superBucketMin = uint64(1) << uint(i-1+timeHistSubBucketBits)
- }
- // subBucketShift is the amount that we need to shift the sub-bucket
- // index to combine it with the bucketMin.
- subBucketShift := uint(0)
- if i > 1 {
- // The first two super buckets are exact with respect to integers,
- // so we'll never have to shift the sub-bucket index. Thereafter,
- // we shift up by 1 with each subsequent bucket.
- subBucketShift = uint(i - 2)
- }
+ // Super-bucket 0 has no bits above timeHistSubBucketBits
+ // set, so just iterate over each bucket and assign the
+ // incrementing bucket.
+ for i := 0; i < timeHistNumSubBuckets; i++ {
+ bucketNanos := uint64(i)
+ b[i+1] = float64(bucketNanos) / 1e9
+ }
+ // Generate the rest of the super-buckets. It's easier to reason
+ // about if we cut out the 0'th bucket, so subtract one since
+ // we just handled that bucket.
+ for i := 0; i < timeHistNumSuperBuckets-1; i++ {
for j := 0; j < timeHistNumSubBuckets; j++ {
- // j is the sub-bucket index. By shifting the index into position to
- // combine with the bucket minimum, we obtain the minimum value for that
- // sub-bucket.
- subBucketMin := superBucketMin + (uint64(j) << subBucketShift)
-
- // Convert the subBucketMin which is in nanoseconds to a float64 seconds value.
+ // Set the super-bucket bit.
+ bucketNanos := uint64(1) << (i + timeHistSubBucketBits)
+ // Set the sub-bucket bits.
+ bucketNanos |= uint64(j) << i
+ // The index for this bucket is going to be the (i+1)'th super bucket
+ // (note that we're starting from zero, but handled the first super-bucket
+ // earlier, so we need to compensate), and the j'th sub bucket.
+ // Add 1 because we left space for -Inf.
+ bucketIndex := (i+1)*timeHistNumSubBuckets + j + 1
+ // Convert nanoseconds to seconds via a division.
// These values will all be exactly representable by a float64.
- b[i*timeHistNumSubBuckets+j+1] = float64(subBucketMin) / 1e9
+ b[bucketIndex] = float64(bucketNanos) / 1e9
}
}
b[len(b)-1] = float64Inf()
diff --git a/src/runtime/histogram_test.go b/src/runtime/histogram_test.go
index dbc64fa559..b12b65a41e 100644
--- a/src/runtime/histogram_test.go
+++ b/src/runtime/histogram_test.go
@@ -68,3 +68,43 @@ func TestTimeHistogram(t *testing.T) {
dummyTimeHistogram = TimeHistogram{}
}
+
+func TestTimeHistogramMetricsBuckets(t *testing.T) {
+ buckets := TimeHistogramMetricsBuckets()
+
+ nonInfBucketsLen := TimeHistNumSubBuckets * TimeHistNumSuperBuckets
+ expBucketsLen := nonInfBucketsLen + 2 // Count -Inf and +Inf.
+ if len(buckets) != expBucketsLen {
+ t.Fatalf("unexpected length of buckets: got %d, want %d", len(buckets), expBucketsLen)
+ }
+ // Check the first non-Inf 2*TimeHistNumSubBuckets buckets in order, skipping the
+ // first bucket which should be -Inf (checked later).
+ //
+ // Because of the way this scheme works, the bottom TimeHistNumSubBuckets
+ // buckets are fully populated, and then the next TimeHistNumSubBuckets
+ // have the TimeHistSubBucketBits'th bit set, while the bottom are once
+ // again fully populated.
+ for i := 1; i <= 2*TimeHistNumSubBuckets+1; i++ {
+ if got, want := buckets[i], float64(i-1)/1e9; got != want {
+ t.Errorf("expected bucket %d to have value %e, got %e", i, want, got)
+ }
+ }
+ // Check some values.
+ idxToBucket := map[int]float64{
+ 0: math.Inf(-1),
+ 33: float64(0x10<<1) / 1e9,
+ 34: float64(0x11<<1) / 1e9,
+ 49: float64(0x10<<2) / 1e9,
+ 58: float64(0x19<<2) / 1e9,
+ 65: float64(0x10<<3) / 1e9,
+ 513: float64(0x10<<31) / 1e9,
+ 519: float64(0x16<<31) / 1e9,
+ expBucketsLen - 2: float64(0x1f<<43) / 1e9,
+ expBucketsLen - 1: math.Inf(1),
+ }
+ for idx, bucket := range idxToBucket {
+ if got, want := buckets[idx], bucket; got != want {
+ t.Errorf("expected bucket %d to have value %e, got %e", idx, want, got)
+ }
+ }
+}
diff --git a/src/runtime/internal/atomic/atomic_arm.s b/src/runtime/internal/atomic/atomic_arm.s
index be3fd3a395..92cbe8a34f 100644
--- a/src/runtime/internal/atomic/atomic_arm.s
+++ b/src/runtime/internal/atomic/atomic_arm.s
@@ -229,16 +229,22 @@ store64loop:
// functions tail-call into the appropriate implementation, which
// means they must not open a frame. Hence, when they go down the
// panic path, at that point they push the LR to create a real frame
-// (they don't need to pop it because panic won't return).
+// (they don't need to pop it because panic won't return; however, we
+// do need to set the SP delta back).
+
+// Check if R1 is 8-byte aligned, panic if not.
+// Clobbers R2.
+#define CHECK_ALIGN \
+ AND.S $7, R1, R2 \
+ BEQ 4(PC) \
+ MOVW.W R14, -4(R13) /* prepare a real frame */ \
+ BL ·panicUnaligned(SB) \
+ ADD $4, R13 /* compensate SP delta */
TEXT ·Cas64(SB),NOSPLIT,$-4-21
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
- // make unaligned atomic access panic
- AND.S $7, R1, R2
- BEQ 3(PC)
- MOVW.W R14, -4(R13) // prepare a real frame
- BL ·panicUnaligned(SB)
+ CHECK_ALIGN
MOVB runtime·goarm(SB), R11
CMP $7, R11
@@ -249,11 +255,7 @@ TEXT ·Cas64(SB),NOSPLIT,$-4-21
TEXT ·Xadd64(SB),NOSPLIT,$-4-20
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
- // make unaligned atomic access panic
- AND.S $7, R1, R2
- BEQ 3(PC)
- MOVW.W R14, -4(R13) // prepare a real frame
- BL ·panicUnaligned(SB)
+ CHECK_ALIGN
MOVB runtime·goarm(SB), R11
CMP $7, R11
@@ -264,11 +266,7 @@ TEXT ·Xadd64(SB),NOSPLIT,$-4-20
TEXT ·Xchg64(SB),NOSPLIT,$-4-20
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
- // make unaligned atomic access panic
- AND.S $7, R1, R2
- BEQ 3(PC)
- MOVW.W R14, -4(R13) // prepare a real frame
- BL ·panicUnaligned(SB)
+ CHECK_ALIGN
MOVB runtime·goarm(SB), R11
CMP $7, R11
@@ -279,11 +277,7 @@ TEXT ·Xchg64(SB),NOSPLIT,$-4-20
TEXT ·Load64(SB),NOSPLIT,$-4-12
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
- // make unaligned atomic access panic
- AND.S $7, R1, R2
- BEQ 3(PC)
- MOVW.W R14, -4(R13) // prepare a real frame
- BL ·panicUnaligned(SB)
+ CHECK_ALIGN
MOVB runtime·goarm(SB), R11
CMP $7, R11
@@ -294,11 +288,7 @@ TEXT ·Load64(SB),NOSPLIT,$-4-12
TEXT ·Store64(SB),NOSPLIT,$-4-12
NO_LOCAL_POINTERS
MOVW addr+0(FP), R1
- // make unaligned atomic access panic
- AND.S $7, R1, R2
- BEQ 3(PC)
- MOVW.W R14, -4(R13) // prepare a real frame
- BL ·panicUnaligned(SB)
+ CHECK_ALIGN
MOVB runtime·goarm(SB), R11
CMP $7, R11
diff --git a/src/runtime/internal/syscall/asm_linux_386.s b/src/runtime/internal/syscall/asm_linux_386.s
new file mode 100644
index 0000000000..15aae4d8bd
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_386.s
@@ -0,0 +1,34 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// See ../sys_linux_386.s for the reason why we always use int 0x80
+// instead of the glibc-specific "CALL 0x10(GS)".
+#define INVOKE_SYSCALL INT $0x80
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+//
+// Syscall # in AX, args in BX CX DX SI DI BP, return in AX
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ MOVL num+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL a4+16(FP), SI
+ MOVL a5+20(FP), DI
+ MOVL a6+24(FP), BP
+ INVOKE_SYSCALL
+ CMPL AX, $0xfffff001
+ JLS ok
+ MOVL $-1, r1+28(FP)
+ MOVL $0, r2+32(FP)
+ NEGL AX
+ MOVL AX, errno+36(FP)
+ RET
+ok:
+ MOVL AX, r1+28(FP)
+ MOVL DX, r2+32(FP)
+ MOVL $0, errno+36(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_amd64.s b/src/runtime/internal/syscall/asm_linux_amd64.s
new file mode 100644
index 0000000000..961d9bd640
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_amd64.s
@@ -0,0 +1,33 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+//
+// Syscall # in AX, args in DI SI DX R10 R8 R9, return in AX DX.
+//
+// Note that this differs from "standard" ABI convention, which would pass 4th
+// arg in CX, not R10.
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOVQ num+0(FP), AX // syscall entry
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ a4+32(FP), R10
+ MOVQ a5+40(FP), R8
+ MOVQ a6+48(FP), R9
+ SYSCALL
+ CMPQ AX, $0xfffffffffffff001
+ JLS ok
+ MOVQ $-1, r1+56(FP)
+ MOVQ $0, r2+64(FP)
+ NEGQ AX
+ MOVQ AX, errno+72(FP)
+ RET
+ok:
+ MOVQ AX, r1+56(FP)
+ MOVQ DX, r2+64(FP)
+ MOVQ $0, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_arm.s b/src/runtime/internal/syscall/asm_linux_arm.s
new file mode 100644
index 0000000000..dbf1826d94
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_arm.s
@@ -0,0 +1,32 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ MOVW num+0(FP), R7 // syscall entry
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ MOVW a4+16(FP), R3
+ MOVW a5+20(FP), R4
+ MOVW a6+24(FP), R5
+ SWI $0
+ MOVW $0xfffff001, R6
+ CMP R6, R0
+ BLS ok
+ MOVW $-1, R1
+ MOVW R1, r1+28(FP)
+ MOVW $0, R2
+ MOVW R2, r2+32(FP)
+ RSB $0, R0, R0
+ MOVW R0, errno+36(FP)
+ RET
+ok:
+ MOVW R0, r1+28(FP)
+ MOVW R1, r2+32(FP)
+ MOVW $0, R0
+ MOVW R0, errno+36(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_arm64.s b/src/runtime/internal/syscall/asm_linux_arm64.s
new file mode 100644
index 0000000000..83e862ff72
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOVD num+0(FP), R8 // syscall entry
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD a4+32(FP), R3
+ MOVD a5+40(FP), R4
+ MOVD a6+48(FP), R5
+ SVC
+ CMN $4095, R0
+ BCC ok
+ MOVD $-1, R4
+ MOVD R4, r1+56(FP)
+ MOVD ZR, r2+64(FP)
+ NEG R0, R0
+ MOVD R0, errno+72(FP)
+ RET
+ok:
+ MOVD R0, r1+56(FP)
+ MOVD R1, r2+64(FP)
+ MOVD ZR, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_mips64x.s b/src/runtime/internal/syscall/asm_linux_mips64x.s
new file mode 100644
index 0000000000..0e88a2d8ac
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_mips64x.s
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips64 || mips64le)
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOVV num+0(FP), R2 // syscall entry
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV a4+32(FP), R7
+ MOVV a5+40(FP), R8
+ MOVV a6+48(FP), R9
+ SYSCALL
+ BEQ R7, ok
+ MOVV $-1, R1
+ MOVV R1, r1+56(FP)
+ MOVV R0, r2+64(FP)
+ MOVV R2, errno+72(FP)
+ RET
+ok:
+ MOVV R2, r1+56(FP)
+ MOVV R3, r2+64(FP)
+ MOVV R0, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_mipsx.s b/src/runtime/internal/syscall/asm_linux_mipsx.s
new file mode 100644
index 0000000000..050029eaa1
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_mipsx.s
@@ -0,0 +1,34 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (mips || mipsle)
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+//
+// The 5th and 6th arg go at sp+16, sp+20.
+// Note that frame size of 20 means that 24 bytes gets reserved on stack.
+TEXT ·Syscall6(SB),NOSPLIT,$20-40
+ MOVW num+0(FP), R2 // syscall entry
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW a4+16(FP), R7
+ MOVW a5+20(FP), R8
+ MOVW a6+24(FP), R9
+ MOVW R8, 16(R29)
+ MOVW R9, 20(R29)
+ SYSCALL
+ BEQ R7, ok
+ MOVW $-1, R1
+ MOVW R1, r1+28(FP)
+ MOVW R0, r2+32(FP)
+ MOVW R2, errno+36(FP)
+ RET
+ok:
+ MOVW R2, r1+28(FP)
+ MOVW R3, r2+32(FP)
+ MOVW R0, errno+36(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_ppc64x.s b/src/runtime/internal/syscall/asm_linux_ppc64x.s
new file mode 100644
index 0000000000..8cf8737df8
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_ppc64x.s
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && (ppc64 || ppc64le)
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOVD num+0(FP), R9 // syscall entry
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD a4+32(FP), R6
+ MOVD a5+40(FP), R7
+ MOVD a6+48(FP), R8
+ SYSCALL R9
+ MOVD R0, r2+64(FP) // r2 is not used. Always set to 0.
+ BVC ok
+ MOVD $-1, R4
+ MOVD R4, r1+56(FP)
+ MOVD R3, errno+72(FP)
+ RET
+ok:
+ MOVD R3, r1+56(FP)
+ MOVD R0, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_riscv64.s b/src/runtime/internal/syscall/asm_linux_riscv64.s
new file mode 100644
index 0000000000..a8652fdd6b
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_riscv64.s
@@ -0,0 +1,29 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOV num+0(FP), A7 // syscall entry
+ MOV a1+8(FP), A0
+ MOV a2+16(FP), A1
+ MOV a3+24(FP), A2
+ MOV a4+32(FP), A3
+ MOV a5+40(FP), A4
+ MOV a6+48(FP), A5
+ ECALL
+ MOV $-4096, T0
+ BLTU T0, A0, err
+ MOV A0, r1+56(FP)
+ MOV A1, r2+64(FP)
+ MOV ZERO, errno+72(FP)
+ RET
+err:
+ MOV $-1, T0
+ MOV T0, r1+56(FP)
+ MOV ZERO, r2+64(FP)
+ SUB A0, ZERO, A0
+ MOV A0, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/asm_linux_s390x.s b/src/runtime/internal/syscall/asm_linux_s390x.s
new file mode 100644
index 0000000000..1b27f29390
--- /dev/null
+++ b/src/runtime/internal/syscall/asm_linux_s390x.s
@@ -0,0 +1,28 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ MOVD num+0(FP), R1 // syscall entry
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD a4+32(FP), R5
+ MOVD a5+40(FP), R6
+ MOVD a6+48(FP), R7
+ SYSCALL
+ MOVD $0xfffffffffffff001, R8
+ CMPUBLT R2, R8, ok
+ MOVD $-1, r1+56(FP)
+ MOVD $0, r2+64(FP)
+ NEG R2, R2
+ MOVD R2, errno+72(FP)
+ RET
+ok:
+ MOVD R2, r1+56(FP)
+ MOVD R3, r2+64(FP)
+ MOVD $0, errno+72(FP)
+ RET
diff --git a/src/runtime/internal/syscall/syscall_linux.go b/src/runtime/internal/syscall/syscall_linux.go
new file mode 100644
index 0000000000..06d5f21e7c
--- /dev/null
+++ b/src/runtime/internal/syscall/syscall_linux.go
@@ -0,0 +1,12 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package syscall provides the syscall primitives required for the runtime.
+package syscall
+
+// TODO(https://go.dev/issue/51087): This package is incomplete and currently
+// only contains very minimal support for Linux.
+
+// Syscall6 calls system call number 'num' with arguments a1-6.
+func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr)
diff --git a/src/runtime/memmove_ppc64x.s b/src/runtime/memmove_ppc64x.s
index e69e71a4a1..2152fb4f69 100644
--- a/src/runtime/memmove_ppc64x.s
+++ b/src/runtime/memmove_ppc64x.s
@@ -139,36 +139,38 @@ backwardtailloop:
BC 16, 0, backwardtailloop // bndz
nobackwardtail:
- BC 4, 5, LR // ble CR1 lr
+ BC 4, 5, LR // blelr cr1, return if DWORDS == 0
+ SRDCC $2,DWORDS,QWORDS // Compute number of 32B blocks and compare to 0
+ BNE backward32setup // If QWORDS != 0, start the 32B copy loop.
-backwardlarge:
- MOVD DWORDS, CTR
- SUB TGT, SRC, TMP // Use vsx if moving
- CMP TMP, $32 // at least 32 byte chunks
- BLT backwardlargeloop // and distance >= 32
- SRDCC $2,DWORDS,QWORDS // 32 byte chunks
- BNE backward32setup
+backward24:
+ // DWORDS is a value between 1-3.
+ CMP DWORDS, $2
-backwardlargeloop:
MOVD -8(SRC), TMP
- SUB $8,SRC
MOVD TMP, -8(TGT)
- SUB $8,TGT
- BC 16, 0, backwardlargeloop // bndz
+ BC 12, 0, LR // bltlr, return if DWORDS == 1
+
+ MOVD -16(SRC), TMP
+ MOVD TMP, -16(TGT)
+ BC 12, 2, LR // beqlr, return if DWORDS == 2
+
+ MOVD -24(SRC), TMP
+ MOVD TMP, -24(TGT)
RET
backward32setup:
- MOVD QWORDS, CTR // set up loop ctr
- MOVD $16, IDX16 // 32 bytes at a time
+ ANDCC $3,DWORDS // Compute remaining DWORDS and compare to 0
+ MOVD QWORDS, CTR // set up loop ctr
+ MOVD $16, IDX16 // 32 bytes at a time
backward32loop:
SUB $32, TGT
SUB $32, SRC
- LXVD2X (R0)(TGT), VS32 // load 16 bytes
- LXVD2X (IDX16)(TGT), VS33
- STXVD2X VS32, (R0)(SRC) // store 16 bytes
- STXVD2X VS33, (IDX16)(SRC)
- BC 16, 0, backward32loop // bndz
- BC 4, 5, LR // ble CR1 lr
- MOVD DWORDS, CTR
- BR backwardlargeloop
+ LXVD2X (R0)(SRC), VS32 // load 16x2 bytes
+ LXVD2X (IDX16)(SRC), VS33
+ STXVD2X VS32, (R0)(TGT) // store 16x2 bytes
+ STXVD2X VS33, (IDX16)(TGT)
+ BC 16, 0, backward32loop // bndz
+ BC 12, 2, LR // beqlr, return if DWORDS == 0
+ BR backward24
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index e2ac5d4993..10623e4d67 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -187,21 +187,15 @@ func runfinq() {
f := &fb.fin[i-1]
var regs abi.RegArgs
- var framesz uintptr
- if argRegs > 0 {
- // The args can always be passed in registers if they're
- // available, because platforms we support always have no
- // argument registers available, or more than 2.
- //
- // But unfortunately because we can have an arbitrary
- // amount of returns and it would be complex to try and
- // figure out how many of those can get passed in registers,
- // just conservatively assume none of them do.
- framesz = f.nret
- } else {
- // Need to pass arguments on the stack too.
- framesz = unsafe.Sizeof((any)(nil)) + f.nret
- }
+ // The args may be passed in registers or on stack. Even for
+ // the register case, we still need the spill slots.
+ // TODO: revisit if we remove spill slots.
+ //
+ // Unfortunately because we can have an arbitrary
+ // amount of returns and it would be complex to try and
+ // figure out how many of those can get passed in registers,
+ // just conservatively assume none of them do.
+ framesz := unsafe.Sizeof((any)(nil)) + f.nret
if framecap < framesz {
// The frame does not contain pointers interesting for GC,
// all not yet finalized objects are stored in finq.
diff --git a/src/runtime/mfinal_test.go b/src/runtime/mfinal_test.go
index 04ba7a6830..902ccc57f8 100644
--- a/src/runtime/mfinal_test.go
+++ b/src/runtime/mfinal_test.go
@@ -42,6 +42,15 @@ func TestFinalizerType(t *testing.T) {
{func(x *int) any { return Tintptr(x) }, func(v *int) { finalize(v) }},
{func(x *int) any { return (*Tint)(x) }, func(v *Tint) { finalize((*int)(v)) }},
{func(x *int) any { return (*Tint)(x) }, func(v Tinter) { finalize((*int)(v.(*Tint))) }},
+ // Test case for argument spill slot.
+ // If the spill slot was not counted for the frame size, it will (incorrectly) choose
+ // call32 as the result has (exactly) 32 bytes. When the argument actually spills,
+ // it clobbers the caller's frame (likely the return PC).
+ {func(x *int) any { return x }, func(v any) [4]int64 {
+ print() // force spill
+ finalize(v.(*int))
+ return [4]int64{}
+ }},
}
for i, tt := range finalizerTests {
diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go
index f06560201a..d54dbc26c2 100644
--- a/src/runtime/mgcpacer.go
+++ b/src/runtime/mgcpacer.go
@@ -154,6 +154,8 @@ type gcControllerState struct {
// For goexperiment.PacerRedesign.
consMarkController piController
+ _ uint32 // Padding for atomics on 32-bit platforms.
+
// heapGoal is the goal heapLive for when next GC ends.
// Set to ^uint64(0) if disabled.
//
@@ -670,10 +672,31 @@ func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) floa
currentConsMark := (float64(c.heapLive-c.trigger) * (utilization + idleUtilization)) /
(float64(scanWork) * (1 - utilization))
- // Update cons/mark controller.
- // Period for this is 1 GC cycle.
+ // Update cons/mark controller. The time period for this is 1 GC cycle.
+ //
+ // This use of a PI controller might seem strange. So, here's an explanation:
+ //
+ // currentConsMark represents the consMark we *should've* had to be perfectly
+ // on-target for this cycle. Given that we assume the next GC will be like this
+ // one in the steady-state, it stands to reason that we should just pick that
+ // as our next consMark. In practice, however, currentConsMark is too noisy:
+ // we're going to be wildly off-target in each GC cycle if we do that.
+ //
+ // What we do instead is make a long-term assumption: there is some steady-state
+ // consMark value, but it's obscured by noise. By constantly shooting for this
+ // noisy-but-perfect consMark value, the controller will bounce around a bit,
+ // but its average behavior, in aggregate, should be less noisy and closer to
+ // the true long-term consMark value, provided its tuned to be slightly overdamped.
+ var ok bool
oldConsMark := c.consMark
- c.consMark = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
+ c.consMark, ok = c.consMarkController.next(c.consMark, currentConsMark, 1.0)
+ if !ok {
+ // The error spiraled out of control. This is incredibly unlikely seeing
+ // as this controller is essentially just a smoothing function, but it might
+ // mean that something went very wrong with how currentConsMark was calculated.
+ // Just reset consMark and keep going.
+ c.consMark = 0
+ }
if debug.gcpacertrace > 0 {
printlock()
@@ -681,6 +704,9 @@ func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) floa
print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ")
print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.stackScan+c.globalsScan, " B exp.) ")
print("in ", c.trigger, " B -> ", c.heapLive, " B (∆goal ", int64(c.heapLive)-int64(c.heapGoal), ", cons/mark ", oldConsMark, ")")
+ if !ok {
+ print("[controller reset]")
+ }
println()
printunlock()
}
@@ -1263,15 +1289,38 @@ type piController struct {
// PI controller state.
errIntegral float64 // Integral of the error from t=0 to now.
+
+ // Error flags.
+ errOverflow bool // Set if errIntegral ever overflowed.
+ inputOverflow bool // Set if an operation with the input overflowed.
}
-func (c *piController) next(input, setpoint, period float64) float64 {
+// next provides a new sample to the controller.
+//
+// input is the sample, setpoint is the desired point, and period is how much
+// time (in whatever unit makes the most sense) has passed since the last sample.
+//
+// Returns a new value for the variable it's controlling, and whether the operation
+// completed successfully. One reason this might fail is if error has been growing
+// in an unbounded manner, to the point of overflow.
+//
+// In the specific case of an error overflow occurs, the errOverflow field will be
+// set and the rest of the controller's internal state will be fully reset.
+func (c *piController) next(input, setpoint, period float64) (float64, bool) {
// Compute the raw output value.
prop := c.kp * (setpoint - input)
rawOutput := prop + c.errIntegral
// Clamp rawOutput into output.
output := rawOutput
+ if isInf(output) || isNaN(output) {
+ // The input had a large enough magnitude that either it was already
+ // overflowed, or some operation with it overflowed.
+ // Set a flag and reset. That's the safest thing to do.
+ c.reset()
+ c.inputOverflow = true
+ return c.min, false
+ }
if output < c.min {
output = c.min
} else if output > c.max {
@@ -1281,6 +1330,19 @@ func (c *piController) next(input, setpoint, period float64) float64 {
// Update the controller's state.
if c.ti != 0 && c.tt != 0 {
c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput)
+ if isInf(c.errIntegral) || isNaN(c.errIntegral) {
+ // So much error has accumulated that we managed to overflow.
+ // The assumptions around the controller have likely broken down.
+ // Set a flag and reset. That's the safest thing to do.
+ c.reset()
+ c.errOverflow = true
+ return c.min, false
+ }
}
- return output
+ return output, true
+}
+
+// reset resets the controller state, except for controller error flags.
+func (c *piController) reset() {
+ c.errIntegral = 0
}
diff --git a/src/runtime/mgcpacer_test.go b/src/runtime/mgcpacer_test.go
index 9ec0e5172b..10a8ca2520 100644
--- a/src/runtime/mgcpacer_test.go
+++ b/src/runtime/mgcpacer_test.go
@@ -715,3 +715,48 @@ func (f float64Stream) limit(min, max float64) float64Stream {
return v
}
}
+
+func FuzzPIController(f *testing.F) {
+ isNormal := func(x float64) bool {
+ return !math.IsInf(x, 0) && !math.IsNaN(x)
+ }
+ isPositive := func(x float64) bool {
+ return isNormal(x) && x > 0
+ }
+ // Seed with constants from controllers in the runtime.
+ // It's not critical that we keep these in sync, they're just
+ // reasonable seed inputs.
+ f.Add(0.3375, 3.2e6, 1e9, 0.001, 1000.0, 0.01)
+ f.Add(0.9, 4.0, 1000.0, -1000.0, 1000.0, 0.84)
+ f.Fuzz(func(t *testing.T, kp, ti, tt, min, max, setPoint float64) {
+ // Ignore uninteresting invalid parameters. These parameters
+ // are constant, so in practice surprising values will be documented
+ // or will be other otherwise immediately visible.
+ //
+ // We just want to make sure that given a non-Inf, non-NaN input,
+ // we always get a non-Inf, non-NaN output.
+ if !isPositive(kp) || !isPositive(ti) || !isPositive(tt) {
+ return
+ }
+ if !isNormal(min) || !isNormal(max) || min > max {
+ return
+ }
+ // Use a random source, but make it deterministic.
+ rs := rand.New(rand.NewSource(800))
+ randFloat64 := func() float64 {
+ return math.Float64frombits(rs.Uint64())
+ }
+ p := NewPIController(kp, ti, tt, min, max)
+ state := float64(0)
+ for i := 0; i < 100; i++ {
+ input := randFloat64()
+ // Ignore the "ok" parameter. We're just trying to break it.
+ // state is intentionally completely uncorrelated with the input.
+ var ok bool
+ state, ok = p.Next(input, setPoint, 1.0)
+ if !isNormal(state) {
+ t.Fatalf("got NaN or Inf result from controller: %f %v", state, ok)
+ }
+ }
+ })
+}
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index c27e189af9..5f50378adf 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -165,11 +165,12 @@ func gcPaceScavenger(heapGoal, lastHeapGoal uint64) {
// Sleep/wait state of the background scavenger.
var scavenge struct {
- lock mutex
- g *g
- parked bool
- timer *timer
- sysmonWake uint32 // Set atomically.
+ lock mutex
+ g *g
+ parked bool
+ timer *timer
+ sysmonWake uint32 // Set atomically.
+ printControllerReset bool // Whether the scavenger is in cooldown.
}
// readyForScavenger signals sysmon to wake the scavenger because
@@ -295,8 +296,14 @@ func bgscavenge(c chan int) {
max: 1000.0, // 1000:1
}
// It doesn't really matter what value we start at, but we can't be zero, because
- // that'll cause divide-by-zero issues.
- critSleepRatio := 0.001
+ // that'll cause divide-by-zero issues. Pick something conservative which we'll
+ // also use as a fallback.
+ const startingCritSleepRatio = 0.001
+ critSleepRatio := startingCritSleepRatio
+ // Duration left in nanoseconds during which we avoid using the controller and
+ // we hold critSleepRatio at a conservative value. Used if the controller's
+ // assumptions fail to hold.
+ controllerCooldown := int64(0)
for {
released := uintptr(0)
crit := float64(0)
@@ -383,9 +390,22 @@ func bgscavenge(c chan int) {
// because of the additional overheads of using scavenged memory.
crit *= 1 + scavengeCostRatio
- // Go to sleep for our current sleepNS.
+ // Go to sleep based on how much time we spent doing work.
slept := scavengeSleep(int64(crit / critSleepRatio))
+ // Stop here if we're cooling down from the controller.
+ if controllerCooldown > 0 {
+ // crit and slept aren't exact measures of time, but it's OK to be a bit
+ // sloppy here. We're just hoping we're avoiding some transient bad behavior.
+ t := slept + int64(crit)
+ if t > controllerCooldown {
+ controllerCooldown = 0
+ } else {
+ controllerCooldown -= t
+ }
+ continue
+ }
+
// Calculate the CPU time spent.
//
// This may be slightly inaccurate with respect to GOMAXPROCS, but we're
@@ -395,7 +415,20 @@ func bgscavenge(c chan int) {
cpuFraction := float64(crit) / ((float64(slept) + crit) * float64(gomaxprocs))
// Update the critSleepRatio, adjusting until we reach our ideal fraction.
- critSleepRatio = critSleepController.next(cpuFraction, idealFraction, float64(slept)+crit)
+ var ok bool
+ critSleepRatio, ok = critSleepController.next(cpuFraction, idealFraction, float64(slept)+crit)
+ if !ok {
+ // The core assumption of the controller, that we can get a proportional
+ // response, broke down. This may be transient, so temporarily switch to
+ // sleeping a fixed, conservative amount.
+ critSleepRatio = startingCritSleepRatio
+ controllerCooldown = 5e9 // 5 seconds.
+
+ // Signal the scav trace printer to output this.
+ lock(&scavenge.lock)
+ scavenge.printControllerReset = true
+ unlock(&scavenge.lock)
+ }
}
}
@@ -434,7 +467,11 @@ func (p *pageAlloc) scavenge(nbytes uintptr) uintptr {
// released should be the amount of memory released since the last time this
// was called, and forced indicates whether the scavenge was forced by the
// application.
+//
+// scavenge.lock must be held.
func printScavTrace(gen uint32, released uintptr, forced bool) {
+ assertLockHeld(&scavenge.lock)
+
printlock()
print("scav ", gen, " ",
released>>10, " KiB work, ",
@@ -443,6 +480,9 @@ func printScavTrace(gen uint32, released uintptr, forced bool) {
)
if forced {
print(" (forced)")
+ } else if scavenge.printControllerReset {
+ print(" [controller reset]")
+ scavenge.printControllerReset = false
}
println()
printunlock()
diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go
index 17c9b75d69..37a8cf8a5d 100644
--- a/src/runtime/mkpreempt.go
+++ b/src/runtime/mkpreempt.go
@@ -122,7 +122,7 @@ func header(arch string) {
fmt.Fprintf(out, "// Code generated by mkpreempt.go; DO NOT EDIT.\n\n")
if beLe[arch] {
base := arch[:len(arch)-1]
- fmt.Fprintf(out, "//go:build %s || %sle\n", base, base)
+ fmt.Fprintf(out, "//go:build %s || %sle\n\n", base, base)
}
fmt.Fprintf(out, "#include \"go_asm.h\"\n")
fmt.Fprintf(out, "#include \"textflag.h\"\n\n")
@@ -147,8 +147,9 @@ type layout struct {
type regPos struct {
pos int
- op string
- reg string
+ saveOp string
+ restoreOp string
+ reg string
// If this register requires special save and restore, these
// give those operations with a %d placeholder for the stack
@@ -157,7 +158,12 @@ type regPos struct {
}
func (l *layout) add(op, reg string, size int) {
- l.regs = append(l.regs, regPos{op: op, reg: reg, pos: l.stack})
+ l.regs = append(l.regs, regPos{saveOp: op, restoreOp: op, reg: reg, pos: l.stack})
+ l.stack += size
+}
+
+func (l *layout) add2(sop, rop, reg string, size int) {
+ l.regs = append(l.regs, regPos{saveOp: sop, restoreOp: rop, reg: reg, pos: l.stack})
l.stack += size
}
@@ -171,7 +177,7 @@ func (l *layout) save() {
if reg.save != "" {
p(reg.save, reg.pos)
} else {
- p("%s %s, %d(%s)", reg.op, reg.reg, reg.pos, l.sp)
+ p("%s %s, %d(%s)", reg.saveOp, reg.reg, reg.pos, l.sp)
}
}
}
@@ -182,7 +188,7 @@ func (l *layout) restore() {
if reg.restore != "" {
p(reg.restore, reg.pos)
} else {
- p("%s %d(%s), %s", reg.op, reg.pos, l.sp, reg.reg)
+ p("%s %d(%s), %s", reg.restoreOp, reg.pos, l.sp, reg.reg)
}
}
}
@@ -324,12 +330,13 @@ func genARM64() {
// R27 (REGTMP), R28 (g), R29 (FP), R30 (LR), R31 (SP) are special
// and not saved here.
var l = layout{sp: "RSP", stack: 8} // add slot to save PC of interrupted instruction
- for i := 0; i <= 26; i++ {
+ for i := 0; i < 26; i += 2 {
if i == 18 {
+ i--
continue // R18 is not used, skip
}
- reg := fmt.Sprintf("R%d", i)
- l.add("MOVD", reg, 8)
+ reg := fmt.Sprintf("(R%d, R%d)", i, i+1)
+ l.add2("STP", "LDP", reg, 16)
}
// Add flag registers.
l.addSpecial(
@@ -342,9 +349,9 @@ func genARM64() {
8)
// TODO: FPCR? I don't think we'll change it, so no need to save.
// Add floating point registers F0-F31.
- for i := 0; i <= 31; i++ {
- reg := fmt.Sprintf("F%d", i)
- l.add("FMOVD", reg, 8)
+ for i := 0; i < 31; i += 2 {
+ reg := fmt.Sprintf("(F%d, F%d)", i, i+1)
+ l.add2("FSTPD", "FLDPD", reg, 16)
}
if l.stack%16 != 0 {
l.stack += 8 // SP needs 16-byte alignment
@@ -353,10 +360,8 @@ func genARM64() {
// allocate frame, save PC of interrupted instruction (in LR)
p("MOVD R30, %d(RSP)", -l.stack)
p("SUB $%d, RSP", l.stack)
- p("#ifdef GOOS_linux")
p("MOVD R29, -8(RSP)") // save frame pointer (only used on Linux)
p("SUB $8, RSP, R29") // set up new frame pointer
- p("#endif")
// On iOS, save the LR again after decrementing SP. We run the
// signal handler on the G stack (as it doesn't support sigaltstack),
// so any writes below SP may be clobbered.
@@ -369,11 +374,9 @@ func genARM64() {
l.restore()
p("MOVD %d(RSP), R30", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it
- p("#ifdef GOOS_linux")
- p("MOVD -8(RSP), R29") // restore frame pointer
- p("#endif")
- p("MOVD (RSP), R27") // load PC to REGTMP
- p("ADD $%d, RSP", l.stack+16) // pop frame (including the space pushed by sigctxt.pushCall)
+ p("MOVD -8(RSP), R29") // restore frame pointer
+ p("MOVD (RSP), R27") // load PC to REGTMP
+ p("ADD $%d, RSP", l.stack+16) // pop frame (including the space pushed by sigctxt.pushCall)
p("JMP (R27)")
}
diff --git a/src/runtime/nbpipe_pipe2.go b/src/runtime/nbpipe_pipe2.go
index 6a555bcd99..22d60b4a63 100644
--- a/src/runtime/nbpipe_pipe2.go
+++ b/src/runtime/nbpipe_pipe2.go
@@ -7,16 +7,5 @@
package runtime
func nonblockingPipe() (r, w int32, errno int32) {
- r, w, errno = pipe2(_O_NONBLOCK | _O_CLOEXEC)
- if errno == -_ENOSYS {
- r, w, errno = pipe()
- if errno != 0 {
- return -1, -1, errno
- }
- closeonexec(r)
- setNonblock(r)
- closeonexec(w)
- setNonblock(w)
- }
- return r, w, errno
+ return pipe2(_O_NONBLOCK | _O_CLOEXEC)
}
diff --git a/src/runtime/nbpipe_pipe_test.go b/src/runtime/nbpipe_pipe_test.go
new file mode 100644
index 0000000000..c8cb3cf691
--- /dev/null
+++ b/src/runtime/nbpipe_pipe_test.go
@@ -0,0 +1,38 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin
+
+package runtime_test
+
+import (
+ "runtime"
+ "syscall"
+ "testing"
+)
+
+func TestSetNonblock(t *testing.T) {
+ t.Parallel()
+
+ r, w, errno := runtime.Pipe()
+ if errno != 0 {
+ t.Fatal(syscall.Errno(errno))
+ }
+ defer func() {
+ runtime.Close(r)
+ runtime.Close(w)
+ }()
+
+ checkIsPipe(t, r, w)
+
+ runtime.SetNonblock(r)
+ runtime.SetNonblock(w)
+ checkNonblocking(t, r, "reader")
+ checkNonblocking(t, w, "writer")
+
+ runtime.Closeonexec(r)
+ runtime.Closeonexec(w)
+ checkCloseonexec(t, r, "reader")
+ checkCloseonexec(t, w, "writer")
+}
diff --git a/src/runtime/nbpipe_test.go b/src/runtime/nbpipe_test.go
index 36342cfde8..b6869e7974 100644
--- a/src/runtime/nbpipe_test.go
+++ b/src/runtime/nbpipe_test.go
@@ -66,28 +66,3 @@ func checkCloseonexec(t *testing.T, fd int32, name string) {
t.Errorf("FD_CLOEXEC not set in %s flags %#x", name, flags)
}
}
-
-func TestSetNonblock(t *testing.T) {
- t.Parallel()
-
- r, w, errno := runtime.Pipe()
- if errno != 0 {
- t.Fatal(syscall.Errno(errno))
- }
- defer func() {
- runtime.Close(r)
- runtime.Close(w)
- }()
-
- checkIsPipe(t, r, w)
-
- runtime.SetNonblock(r)
- runtime.SetNonblock(w)
- checkNonblocking(t, r, "reader")
- checkNonblocking(t, w, "writer")
-
- runtime.Closeonexec(r)
- runtime.Closeonexec(w)
- checkCloseonexec(t, r, "reader")
- checkCloseonexec(t, w, "writer")
-}
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
index 2e946656d0..5aee04d5a8 100644
--- a/src/runtime/os3_solaris.go
+++ b/src/runtime/os3_solaris.go
@@ -562,13 +562,6 @@ func write1(fd uintptr, buf unsafe.Pointer, nbyte int32) int32 {
}
//go:nosplit
-func pipe() (r, w int32, errno int32) {
- var p [2]int32
- _, e := sysvicall1Err(&libc_pipe, uintptr(noescape(unsafe.Pointer(&p))))
- return p[0], p[1], int32(e)
-}
-
-//go:nosplit
func pipe2(flags int32) (r, w int32, errno int32) {
var p [2]int32
_, e := sysvicall2Err(&libc_pipe2, uintptr(noescape(unsafe.Pointer(&p))), uintptr(flags))
@@ -580,12 +573,6 @@ func closeonexec(fd int32) {
fcntl(fd, _F_SETFD, _FD_CLOEXEC)
}
-//go:nosplit
-func setNonblock(fd int32) {
- flags := fcntl(fd, _F_GETFL, 0)
- fcntl(fd, _F_SETFL, flags|_O_NONBLOCK)
-}
-
func osyield1()
//go:nosplit
@@ -634,3 +621,12 @@ func sysauxv(auxv []uintptr) {
}
}
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go
index aeff593d50..292ff94795 100644
--- a/src/runtime/os_aix.go
+++ b/src/runtime/os_aix.go
@@ -373,3 +373,12 @@ func setNonblock(fd int32) {
flags := fcntl(fd, _F_GETFL, 0)
fcntl(fd, _F_SETFL, flags|_O_NONBLOCK)
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go
index 0f0eb6c6fd..9065b76375 100644
--- a/src/runtime/os_darwin.go
+++ b/src/runtime/os_darwin.go
@@ -459,3 +459,12 @@ func sysargs(argc int32, argv **byte) {
func signalM(mp *m, sig int) {
pthread_kill(pthread(mp.procid), uint32(sig))
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go
index cba2e42ab0..a56706b415 100644
--- a/src/runtime/os_dragonfly.go
+++ b/src/runtime/os_dragonfly.go
@@ -62,10 +62,8 @@ func kqueue() int32
//go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
-func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
func closeonexec(fd int32)
-func setNonblock(fd int32)
// From DragonFly's <sys/sysctl.h>
const (
@@ -324,3 +322,12 @@ func raise(sig uint32) {
func signalM(mp *m, sig int) {
lwp_kill(-1, int32(mp.procid), sig)
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_freebsd.go b/src/runtime/os_freebsd.go
index c63b0e3d69..e4d15474d8 100644
--- a/src/runtime/os_freebsd.go
+++ b/src/runtime/os_freebsd.go
@@ -47,10 +47,8 @@ func kqueue() int32
//go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
-func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
func closeonexec(fd int32)
-func setNonblock(fd int32)
// From FreeBSD's <sys/sysctl.h>
const (
@@ -460,3 +458,12 @@ func raise(sig uint32) {
func signalM(mp *m, sig int) {
thr_kill(thread(mp.procid), sig)
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index 32a1e1b4f7..efb54ff20e 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -8,9 +8,15 @@ import (
"internal/abi"
"internal/goarch"
"runtime/internal/atomic"
+ "runtime/internal/syscall"
"unsafe"
)
+// sigPerThreadSyscall is the same signal (SIGSETXID) used by glibc for
+// per-thread syscalls on Linux. We use it for the same purpose in non-cgo
+// binaries.
+const sigPerThreadSyscall = _SIGRTMIN + 1
+
type mOS struct {
// profileTimer holds the ID of the POSIX interval timer for profiling CPU
// usage on this thread.
@@ -21,6 +27,10 @@ type mOS struct {
// are in signal handling code, access to that field uses atomic operations.
profileTimer int32
profileTimerValid uint32
+
+ // needPerThreadSyscall indicates that a per-thread syscall is required
+ // for doAllThreadsSyscall.
+ needPerThreadSyscall atomic.Uint8
}
//go:noescape
@@ -436,9 +446,7 @@ func osyield_no_g() {
osyield()
}
-func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
-func setNonblock(fd int32)
const (
_si_max_size = 128
@@ -664,3 +672,205 @@ func setThreadCPUProfiler(hz int32) {
mp.profileTimer = timerid
atomic.Store(&mp.profileTimerValid, 1)
}
+
+// perThreadSyscallArgs contains the system call number, arguments, and
+// expected return values for a system call to be executed on all threads.
+type perThreadSyscallArgs struct {
+ trap uintptr
+ a1 uintptr
+ a2 uintptr
+ a3 uintptr
+ a4 uintptr
+ a5 uintptr
+ a6 uintptr
+ r1 uintptr
+ r2 uintptr
+}
+
+// perThreadSyscall is the system call to execute for the ongoing
+// doAllThreadsSyscall.
+//
+// perThreadSyscall may only be written while mp.needPerThreadSyscall == 0 on
+// all Ms.
+var perThreadSyscall perThreadSyscallArgs
+
+// syscall_runtime_doAllThreadsSyscall and executes a specified system call on
+// all Ms.
+//
+// The system call is expected to succeed and return the same value on every
+// thread. If any threads do not match, the runtime throws.
+//
+//go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall
+//go:uintptrescapes
+func syscall_runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ if iscgo {
+ // In cgo, we are not aware of threads created in C, so this approach will not work.
+ panic("doAllThreadsSyscall not supported with cgo enabled")
+ }
+
+ // STW to guarantee that user goroutines see an atomic change to thread
+ // state. Without STW, goroutines could migrate Ms while change is in
+ // progress and e.g., see state old -> new -> old -> new.
+ //
+ // N.B. Internally, this function does not depend on STW to
+ // successfully change every thread. It is only needed for user
+ // expectations, per above.
+ stopTheWorld("doAllThreadsSyscall")
+
+ // This function depends on several properties:
+ //
+ // 1. All OS threads that already exist are associated with an M in
+ // allm. i.e., we won't miss any pre-existing threads.
+ // 2. All Ms listed in allm will eventually have an OS thread exist.
+ // i.e., they will set procid and be able to receive signals.
+ // 3. OS threads created after we read allm will clone from a thread
+ // that has executed the system call. i.e., they inherit the
+ // modified state.
+ //
+ // We achieve these through different mechanisms:
+ //
+ // 1. Addition of new Ms to allm in allocm happens before clone of its
+ // OS thread later in newm.
+ // 2. newm does acquirem to avoid being preempted, ensuring that new Ms
+ // created in allocm will eventually reach OS thread clone later in
+ // newm.
+ // 3. We take allocmLock for write here to prevent allocation of new Ms
+ // while this function runs. Per (1), this prevents clone of OS
+ // threads that are not yet in allm.
+ allocmLock.lock()
+
+ // Disable preemption, preventing us from changing Ms, as we handle
+ // this M specially.
+ //
+ // N.B. STW and lock() above do this as well, this is added for extra
+ // clarity.
+ acquirem()
+
+ // N.B. allocmLock also prevents concurrent execution of this function,
+ // serializing use of perThreadSyscall, mp.needPerThreadSyscall, and
+ // ensuring all threads execute system calls from multiple calls in the
+ // same order.
+
+ r1, r2, errno := syscall.Syscall6(trap, a1, a2, a3, a4, a5, a6)
+ if GOARCH == "ppc64" || GOARCH == "ppc64le" {
+ // TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
+ r2 = 0
+ }
+ if errno != 0 {
+ releasem(getg().m)
+ allocmLock.unlock()
+ startTheWorld()
+ return r1, r2, errno
+ }
+
+ perThreadSyscall = perThreadSyscallArgs{
+ trap: trap,
+ a1: a1,
+ a2: a2,
+ a3: a3,
+ a4: a4,
+ a5: a5,
+ a6: a6,
+ r1: r1,
+ r2: r2,
+ }
+
+ // Wait for all threads to start.
+ //
+ // As described above, some Ms have been added to allm prior to
+ // allocmLock, but not yet completed OS clone and set procid.
+ //
+ // At minimum we must wait for a thread to set procid before we can
+ // send it a signal.
+ //
+ // We take this one step further and wait for all threads to start
+ // before sending any signals. This prevents system calls from getting
+ // applied twice: once in the parent and once in the child, like so:
+ //
+ // A B C
+ // add C to allm
+ // doAllThreadsSyscall
+ // allocmLock.lock()
+ // signal B
+ // <receive signal>
+ // execute syscall
+ // <signal return>
+ // clone C
+ // <thread start>
+ // set procid
+ // signal C
+ // <receive signal>
+ // execute syscall
+ // <signal return>
+ //
+ // In this case, thread C inherited the syscall-modified state from
+ // thread B and did not need to execute the syscall, but did anyway
+ // because doAllThreadsSyscall could not be sure whether it was
+ // required.
+ //
+ // Some system calls may not be idempotent, so we ensure each thread
+ // executes the system call exactly once.
+ for mp := allm; mp != nil; mp = mp.alllink {
+ for atomic.Load64(&mp.procid) == 0 {
+ // Thread is starting.
+ osyield()
+ }
+ }
+
+ // Signal every other thread, where they will execute perThreadSyscall
+ // from the signal handler.
+ gp := getg()
+ tid := gp.m.procid
+ for mp := allm; mp != nil; mp = mp.alllink {
+ if atomic.Load64(&mp.procid) == tid {
+ // Our thread already performed the syscall.
+ continue
+ }
+ mp.needPerThreadSyscall.Store(1)
+ signalM(mp, sigPerThreadSyscall)
+ }
+
+ // Wait for all threads to complete.
+ for mp := allm; mp != nil; mp = mp.alllink {
+ if mp.procid == tid {
+ continue
+ }
+ for mp.needPerThreadSyscall.Load() != 0 {
+ osyield()
+ }
+ }
+
+ perThreadSyscall = perThreadSyscallArgs{}
+
+ releasem(getg().m)
+ allocmLock.unlock()
+ startTheWorld()
+
+ return r1, r2, errno
+}
+
+// runPerThreadSyscall runs perThreadSyscall for this M if required.
+//
+// This function throws if the system call returns with anything other than the
+// expected values.
+//go:nosplit
+func runPerThreadSyscall() {
+ gp := getg()
+ if gp.m.needPerThreadSyscall.Load() == 0 {
+ return
+ }
+
+ args := perThreadSyscall
+ r1, r2, errno := syscall.Syscall6(args.trap, args.a1, args.a2, args.a3, args.a4, args.a5, args.a6)
+ if GOARCH == "ppc64" || GOARCH == "ppc64le" {
+ // TODO(https://go.dev/issue/51192 ): ppc64 doesn't use r2.
+ r2 = 0
+ }
+ if errno != 0 || r1 != args.r1 || r2 != args.r2 {
+ print("trap:", args.trap, ", a123456=[", args.a1, ",", args.a2, ",", args.a3, ",", args.a4, ",", args.a5, ",", args.a6, "]\n")
+ print("results: got {r1=", r1, ",r2=", r2, ",errno=", errno, "}, want {r1=", args.r1, ",r2=", args.r2, ",errno=0\n")
+ throw("AllThreadsSyscall6 results differ between threads; runtime corrupted")
+ }
+
+ gp.m.needPerThreadSyscall.Store(0)
+}
diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go
index cd9508c706..88a4a8b90e 100644
--- a/src/runtime/os_netbsd.go
+++ b/src/runtime/os_netbsd.go
@@ -78,10 +78,8 @@ func kqueue() int32
//go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
-func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
func closeonexec(fd int32)
-func setNonblock(fd int32)
const (
_ESRCH = 3
@@ -428,3 +426,12 @@ func raise(sig uint32) {
func signalM(mp *m, sig int) {
lwp_kill(int32(mp.procid), sig)
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go
index 2d0e71de53..1a00b890db 100644
--- a/src/runtime/os_openbsd.go
+++ b/src/runtime/os_openbsd.go
@@ -286,3 +286,12 @@ func raise(sig uint32) {
func signalM(mp *m, sig int) {
thrkill(int32(mp.procid), sig)
}
+
+// sigPerThreadSyscall is only used on linux, so we assign a bogus signal
+// number.
+const sigPerThreadSyscall = 1 << 31
+
+//go:nosplit
+func runPerThreadSyscall() {
+ throw("runPerThreadSyscall only valid on linux")
+}
diff --git a/src/runtime/os_openbsd_syscall2.go b/src/runtime/os_openbsd_syscall2.go
index 99542fb2de..a48f5fa88a 100644
--- a/src/runtime/os_openbsd_syscall2.go
+++ b/src/runtime/os_openbsd_syscall2.go
@@ -70,7 +70,6 @@ func sigprocmask(how int32, new, old *sigset) {
}
}
-func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
//go:noescape
@@ -95,6 +94,5 @@ func nanotime1() int64
func sigaltstack(new, old *stackt)
func closeonexec(fd int32)
-func setNonblock(fd int32)
func walltime() (sec int64, nsec int32)
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index 1a44ab7ad7..322579cdc4 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -794,7 +794,7 @@ func use(x [8 << 18]byte) {}
func TestBlockProfile(t *testing.T) {
type TestCase struct {
name string
- f func()
+ f func(*testing.T)
stk []string
re string
}
@@ -903,7 +903,7 @@ func TestBlockProfile(t *testing.T) {
runtime.SetBlockProfileRate(1)
defer runtime.SetBlockProfileRate(0)
for _, test := range tests {
- test.f()
+ test.f(t)
}
t.Run("debug=1", func(t *testing.T) {
@@ -979,42 +979,73 @@ func containsStack(got [][]string, want []string) bool {
return false
}
-const blockDelay = 10 * time.Millisecond
+// awaitBlockedGoroutine spins on runtime.Gosched until a runtime stack dump
+// shows a goroutine in the given state with a stack frame in
+// runtime/pprof.<fName>.
+func awaitBlockedGoroutine(t *testing.T, state, fName string) {
+ re := fmt.Sprintf(`(?m)^goroutine \d+ \[%s\]:\n(?:.+\n\t.+\n)*runtime/pprof\.%s`, regexp.QuoteMeta(state), fName)
+ r := regexp.MustCompile(re)
-func blockChanRecv() {
+ if deadline, ok := t.Deadline(); ok {
+ if d := time.Until(deadline); d > 1*time.Second {
+ timer := time.AfterFunc(d-1*time.Second, func() {
+ debug.SetTraceback("all")
+ panic(fmt.Sprintf("timed out waiting for %#q", re))
+ })
+ defer timer.Stop()
+ }
+ }
+
+ buf := make([]byte, 64<<10)
+ for {
+ runtime.Gosched()
+ n := runtime.Stack(buf, true)
+ if n == len(buf) {
+ // Buffer wasn't large enough for a full goroutine dump.
+ // Resize it and try again.
+ buf = make([]byte, 2*len(buf))
+ continue
+ }
+ if r.Match(buf[:n]) {
+ return
+ }
+ }
+}
+
+func blockChanRecv(t *testing.T) {
c := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "chan receive", "blockChanRecv")
c <- true
}()
<-c
}
-func blockChanSend() {
+func blockChanSend(t *testing.T) {
c := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "chan send", "blockChanSend")
<-c
}()
c <- true
}
-func blockChanClose() {
+func blockChanClose(t *testing.T) {
c := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "chan receive", "blockChanClose")
close(c)
}()
<-c
}
-func blockSelectRecvAsync() {
+func blockSelectRecvAsync(t *testing.T) {
const numTries = 3
c := make(chan bool, 1)
c2 := make(chan bool, 1)
go func() {
for i := 0; i < numTries; i++ {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "select", "blockSelectRecvAsync")
c <- true
}
}()
@@ -1026,11 +1057,11 @@ func blockSelectRecvAsync() {
}
}
-func blockSelectSendSync() {
+func blockSelectSendSync(t *testing.T) {
c := make(chan bool)
c2 := make(chan bool)
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "select", "blockSelectSendSync")
<-c
}()
select {
@@ -1039,11 +1070,11 @@ func blockSelectSendSync() {
}
}
-func blockMutex() {
+func blockMutex(t *testing.T) {
var mu sync.Mutex
mu.Lock()
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "semacquire", "blockMutex")
mu.Unlock()
}()
// Note: Unlock releases mu before recording the mutex event,
@@ -1053,12 +1084,12 @@ func blockMutex() {
mu.Lock()
}
-func blockCond() {
+func blockCond(t *testing.T) {
var mu sync.Mutex
c := sync.NewCond(&mu)
mu.Lock()
go func() {
- time.Sleep(blockDelay)
+ awaitBlockedGoroutine(t, "sync.Cond.Wait", "blockCond")
mu.Lock()
c.Signal()
mu.Unlock()
@@ -1144,7 +1175,7 @@ func TestMutexProfile(t *testing.T) {
t.Fatalf("need MutexProfileRate 0, got %d", old)
}
- blockMutex()
+ blockMutex(t)
t.Run("debug=1", func(t *testing.T) {
var w bytes.Buffer
diff --git a/src/runtime/preempt_arm64.s b/src/runtime/preempt_arm64.s
index 36ee13282c..c27d475dee 100644
--- a/src/runtime/preempt_arm64.s
+++ b/src/runtime/preempt_arm64.s
@@ -6,142 +6,80 @@
TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0
MOVD R30, -496(RSP)
SUB $496, RSP
- #ifdef GOOS_linux
MOVD R29, -8(RSP)
SUB $8, RSP, R29
- #endif
#ifdef GOOS_ios
MOVD R30, (RSP)
#endif
- MOVD R0, 8(RSP)
- MOVD R1, 16(RSP)
- MOVD R2, 24(RSP)
- MOVD R3, 32(RSP)
- MOVD R4, 40(RSP)
- MOVD R5, 48(RSP)
- MOVD R6, 56(RSP)
- MOVD R7, 64(RSP)
- MOVD R8, 72(RSP)
- MOVD R9, 80(RSP)
- MOVD R10, 88(RSP)
- MOVD R11, 96(RSP)
- MOVD R12, 104(RSP)
- MOVD R13, 112(RSP)
- MOVD R14, 120(RSP)
- MOVD R15, 128(RSP)
- MOVD R16, 136(RSP)
- MOVD R17, 144(RSP)
- MOVD R19, 152(RSP)
- MOVD R20, 160(RSP)
- MOVD R21, 168(RSP)
- MOVD R22, 176(RSP)
- MOVD R23, 184(RSP)
- MOVD R24, 192(RSP)
- MOVD R25, 200(RSP)
- MOVD R26, 208(RSP)
+ STP (R0, R1), 8(RSP)
+ STP (R2, R3), 24(RSP)
+ STP (R4, R5), 40(RSP)
+ STP (R6, R7), 56(RSP)
+ STP (R8, R9), 72(RSP)
+ STP (R10, R11), 88(RSP)
+ STP (R12, R13), 104(RSP)
+ STP (R14, R15), 120(RSP)
+ STP (R16, R17), 136(RSP)
+ STP (R19, R20), 152(RSP)
+ STP (R21, R22), 168(RSP)
+ STP (R23, R24), 184(RSP)
+ STP (R25, R26), 200(RSP)
MOVD NZCV, R0
MOVD R0, 216(RSP)
MOVD FPSR, R0
MOVD R0, 224(RSP)
- FMOVD F0, 232(RSP)
- FMOVD F1, 240(RSP)
- FMOVD F2, 248(RSP)
- FMOVD F3, 256(RSP)
- FMOVD F4, 264(RSP)
- FMOVD F5, 272(RSP)
- FMOVD F6, 280(RSP)
- FMOVD F7, 288(RSP)
- FMOVD F8, 296(RSP)
- FMOVD F9, 304(RSP)
- FMOVD F10, 312(RSP)
- FMOVD F11, 320(RSP)
- FMOVD F12, 328(RSP)
- FMOVD F13, 336(RSP)
- FMOVD F14, 344(RSP)
- FMOVD F15, 352(RSP)
- FMOVD F16, 360(RSP)
- FMOVD F17, 368(RSP)
- FMOVD F18, 376(RSP)
- FMOVD F19, 384(RSP)
- FMOVD F20, 392(RSP)
- FMOVD F21, 400(RSP)
- FMOVD F22, 408(RSP)
- FMOVD F23, 416(RSP)
- FMOVD F24, 424(RSP)
- FMOVD F25, 432(RSP)
- FMOVD F26, 440(RSP)
- FMOVD F27, 448(RSP)
- FMOVD F28, 456(RSP)
- FMOVD F29, 464(RSP)
- FMOVD F30, 472(RSP)
- FMOVD F31, 480(RSP)
+ FSTPD (F0, F1), 232(RSP)
+ FSTPD (F2, F3), 248(RSP)
+ FSTPD (F4, F5), 264(RSP)
+ FSTPD (F6, F7), 280(RSP)
+ FSTPD (F8, F9), 296(RSP)
+ FSTPD (F10, F11), 312(RSP)
+ FSTPD (F12, F13), 328(RSP)
+ FSTPD (F14, F15), 344(RSP)
+ FSTPD (F16, F17), 360(RSP)
+ FSTPD (F18, F19), 376(RSP)
+ FSTPD (F20, F21), 392(RSP)
+ FSTPD (F22, F23), 408(RSP)
+ FSTPD (F24, F25), 424(RSP)
+ FSTPD (F26, F27), 440(RSP)
+ FSTPD (F28, F29), 456(RSP)
+ FSTPD (F30, F31), 472(RSP)
CALL ·asyncPreempt2(SB)
- FMOVD 480(RSP), F31
- FMOVD 472(RSP), F30
- FMOVD 464(RSP), F29
- FMOVD 456(RSP), F28
- FMOVD 448(RSP), F27
- FMOVD 440(RSP), F26
- FMOVD 432(RSP), F25
- FMOVD 424(RSP), F24
- FMOVD 416(RSP), F23
- FMOVD 408(RSP), F22
- FMOVD 400(RSP), F21
- FMOVD 392(RSP), F20
- FMOVD 384(RSP), F19
- FMOVD 376(RSP), F18
- FMOVD 368(RSP), F17
- FMOVD 360(RSP), F16
- FMOVD 352(RSP), F15
- FMOVD 344(RSP), F14
- FMOVD 336(RSP), F13
- FMOVD 328(RSP), F12
- FMOVD 320(RSP), F11
- FMOVD 312(RSP), F10
- FMOVD 304(RSP), F9
- FMOVD 296(RSP), F8
- FMOVD 288(RSP), F7
- FMOVD 280(RSP), F6
- FMOVD 272(RSP), F5
- FMOVD 264(RSP), F4
- FMOVD 256(RSP), F3
- FMOVD 248(RSP), F2
- FMOVD 240(RSP), F1
- FMOVD 232(RSP), F0
+ FLDPD 472(RSP), (F30, F31)
+ FLDPD 456(RSP), (F28, F29)
+ FLDPD 440(RSP), (F26, F27)
+ FLDPD 424(RSP), (F24, F25)
+ FLDPD 408(RSP), (F22, F23)
+ FLDPD 392(RSP), (F20, F21)
+ FLDPD 376(RSP), (F18, F19)
+ FLDPD 360(RSP), (F16, F17)
+ FLDPD 344(RSP), (F14, F15)
+ FLDPD 328(RSP), (F12, F13)
+ FLDPD 312(RSP), (F10, F11)
+ FLDPD 296(RSP), (F8, F9)
+ FLDPD 280(RSP), (F6, F7)
+ FLDPD 264(RSP), (F4, F5)
+ FLDPD 248(RSP), (F2, F3)
+ FLDPD 232(RSP), (F0, F1)
MOVD 224(RSP), R0
MOVD R0, FPSR
MOVD 216(RSP), R0
MOVD R0, NZCV
- MOVD 208(RSP), R26
- MOVD 200(RSP), R25
- MOVD 192(RSP), R24
- MOVD 184(RSP), R23
- MOVD 176(RSP), R22
- MOVD 168(RSP), R21
- MOVD 160(RSP), R20
- MOVD 152(RSP), R19
- MOVD 144(RSP), R17
- MOVD 136(RSP), R16
- MOVD 128(RSP), R15
- MOVD 120(RSP), R14
- MOVD 112(RSP), R13
- MOVD 104(RSP), R12
- MOVD 96(RSP), R11
- MOVD 88(RSP), R10
- MOVD 80(RSP), R9
- MOVD 72(RSP), R8
- MOVD 64(RSP), R7
- MOVD 56(RSP), R6
- MOVD 48(RSP), R5
- MOVD 40(RSP), R4
- MOVD 32(RSP), R3
- MOVD 24(RSP), R2
- MOVD 16(RSP), R1
- MOVD 8(RSP), R0
+ LDP 200(RSP), (R25, R26)
+ LDP 184(RSP), (R23, R24)
+ LDP 168(RSP), (R21, R22)
+ LDP 152(RSP), (R19, R20)
+ LDP 136(RSP), (R16, R17)
+ LDP 120(RSP), (R14, R15)
+ LDP 104(RSP), (R12, R13)
+ LDP 88(RSP), (R10, R11)
+ LDP 72(RSP), (R8, R9)
+ LDP 56(RSP), (R6, R7)
+ LDP 40(RSP), (R4, R5)
+ LDP 24(RSP), (R2, R3)
+ LDP 8(RSP), (R0, R1)
MOVD 496(RSP), R30
- #ifdef GOOS_linux
MOVD -8(RSP), R29
- #endif
MOVD (RSP), R27
ADD $512, RSP
JMP (R27)
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 1be7a60830..df16e0f9b6 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -167,10 +167,6 @@ func main() {
mainStarted = true
if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
- // For runtime_syscall_doAllThreadsSyscall, we
- // register sysmon is not ready for the world to be
- // stopped.
- atomic.Store(&sched.sysmonStarting, 1)
systemstack(func() {
newm(sysmon, nil, -1)
})
@@ -187,7 +183,6 @@ func main() {
if g.m != &m0 {
throw("runtime.main not on m0")
}
- m0.doesPark = true
// Record when the world started.
// Must be before doInit for tracing init.
@@ -802,8 +797,18 @@ func mcommoninit(mp *m, id int64) {
mp.id = mReserveID()
}
- // cputicks is not very random in startup virtual machine
- mp.fastrand = uint64(int64Hash(uint64(mp.id), fastrandseed^uintptr(cputicks())))
+ lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
+ hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
+ if lo|hi == 0 {
+ hi = 1
+ }
+ // Same behavior as for 1.17.
+ // TODO: Simplify ths.
+ if goarch.BigEndian {
+ mp.fastrand = uint64(lo)<<32 | uint64(hi)
+ } else {
+ mp.fastrand = uint64(hi)<<32 | uint64(lo)
+ }
mpreinit(mp)
if mp.gsignal != nil {
@@ -1437,22 +1442,12 @@ func mstartm0() {
initsig(false)
}
-// mPark causes a thread to park itself - temporarily waking for
-// fixups but otherwise waiting to be fully woken. This is the
-// only way that m's should park themselves.
+// mPark causes a thread to park itself, returning once woken.
//go:nosplit
func mPark() {
- g := getg()
- for {
- notesleep(&g.m.park)
- // Note, because of signal handling by this parked m,
- // a preemptive mDoFixup() may actually occur via
- // mDoFixupAndOSYield(). (See golang.org/issue/44193)
- noteclear(&g.m.park)
- if !mDoFixup() {
- return
- }
- }
+ gp := getg()
+ notesleep(&gp.m.park)
+ noteclear(&gp.m.park)
}
// mexit tears down and exits the current thread.
@@ -1659,145 +1654,6 @@ func forEachP(fn func(*p)) {
releasem(mp)
}
-// syscall_runtime_doAllThreadsSyscall serializes Go execution and
-// executes a specified fn() call on all m's.
-//
-// The boolean argument to fn() indicates whether the function's
-// return value will be consulted or not. That is, fn(true) should
-// return true if fn() succeeds, and fn(true) should return false if
-// it failed. When fn(false) is called, its return status will be
-// ignored.
-//
-// syscall_runtime_doAllThreadsSyscall first invokes fn(true) on a
-// single, coordinating, m, and only if it returns true does it go on
-// to invoke fn(false) on all of the other m's known to the process.
-//
-//go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall
-func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
- if iscgo {
- panic("doAllThreadsSyscall not supported with cgo enabled")
- }
- if fn == nil {
- return
- }
- for atomic.Load(&sched.sysmonStarting) != 0 {
- osyield()
- }
-
- // We don't want this thread to handle signals for the
- // duration of this critical section. The underlying issue
- // being that this locked coordinating m is the one monitoring
- // for fn() execution by all the other m's of the runtime,
- // while no regular go code execution is permitted (the world
- // is stopped). If this present m were to get distracted to
- // run signal handling code, and find itself waiting for a
- // second thread to execute go code before being able to
- // return from that signal handling, a deadlock will result.
- // (See golang.org/issue/44193.)
- lockOSThread()
- var sigmask sigset
- sigsave(&sigmask)
- sigblock(false)
-
- stopTheWorldGC("doAllThreadsSyscall")
- if atomic.Load(&newmHandoff.haveTemplateThread) != 0 {
- // Ensure that there are no in-flight thread
- // creations: don't want to race with allm.
- lock(&newmHandoff.lock)
- for !newmHandoff.waiting {
- unlock(&newmHandoff.lock)
- osyield()
- lock(&newmHandoff.lock)
- }
- unlock(&newmHandoff.lock)
- }
- if netpollinited() {
- netpollBreak()
- }
- sigRecvPrepareForFixup()
- _g_ := getg()
- if raceenabled {
- // For m's running without racectx, we loan out the
- // racectx of this call.
- lock(&mFixupRace.lock)
- mFixupRace.ctx = _g_.racectx
- unlock(&mFixupRace.lock)
- }
- if ok := fn(true); ok {
- tid := _g_.m.procid
- for mp := allm; mp != nil; mp = mp.alllink {
- if mp.procid == tid {
- // This m has already completed fn()
- // call.
- continue
- }
- // Be wary of mp's without procid values if
- // they are known not to park. If they are
- // marked as parking with a zero procid, then
- // they will be racing with this code to be
- // allocated a procid and we will annotate
- // them with the need to execute the fn when
- // they acquire a procid to run it.
- if mp.procid == 0 && !mp.doesPark {
- // Reaching here, we are either
- // running Windows, or cgo linked
- // code. Neither of which are
- // currently supported by this API.
- throw("unsupported runtime environment")
- }
- // stopTheWorldGC() doesn't guarantee stopping
- // all the threads, so we lock here to avoid
- // the possibility of racing with mp.
- lock(&mp.mFixup.lock)
- mp.mFixup.fn = fn
- atomic.Store(&mp.mFixup.used, 1)
- if mp.doesPark {
- // For non-service threads this will
- // cause the wakeup to be short lived
- // (once the mutex is unlocked). The
- // next real wakeup will occur after
- // startTheWorldGC() is called.
- notewakeup(&mp.park)
- }
- unlock(&mp.mFixup.lock)
- }
- for {
- done := true
- for mp := allm; done && mp != nil; mp = mp.alllink {
- if mp.procid == tid {
- continue
- }
- done = atomic.Load(&mp.mFixup.used) == 0
- }
- if done {
- break
- }
- // if needed force sysmon and/or newmHandoff to wakeup.
- lock(&sched.lock)
- if atomic.Load(&sched.sysmonwait) != 0 {
- atomic.Store(&sched.sysmonwait, 0)
- notewakeup(&sched.sysmonnote)
- }
- unlock(&sched.lock)
- lock(&newmHandoff.lock)
- if newmHandoff.waiting {
- newmHandoff.waiting = false
- notewakeup(&newmHandoff.wake)
- }
- unlock(&newmHandoff.lock)
- osyield()
- }
- }
- if raceenabled {
- lock(&mFixupRace.lock)
- mFixupRace.ctx = 0
- unlock(&mFixupRace.lock)
- }
- startTheWorldGC()
- msigrestore(sigmask)
- unlockOSThread()
-}
-
// runSafePointFn runs the safe point function, if any, for this P.
// This should be called like
//
@@ -1847,8 +1703,14 @@ type cgothreadstart struct {
//
//go:yeswritebarrierrec
func allocm(_p_ *p, fn func(), id int64) *m {
+ allocmLock.rlock()
+
+ // The caller owns _p_, but we may borrow (i.e., acquirep) it. We must
+ // disable preemption to ensure it is not stolen, which would make the
+ // caller lose ownership.
+ acquirem()
+
_g_ := getg()
- acquirem() // disable GC because it can be called from sysmon
if _g_.m.p == 0 {
acquirep(_p_) // temporarily borrow p for mallocs in this function
}
@@ -1894,8 +1756,9 @@ func allocm(_p_ *p, fn func(), id int64) *m {
if _p_ == _g_.m.p.ptr() {
releasep()
}
- releasem(_g_.m)
+ releasem(_g_.m)
+ allocmLock.runlock()
return mp
}
@@ -2172,9 +2035,17 @@ func unlockextra(mp *m) {
atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}
-// execLock serializes exec and clone to avoid bugs or unspecified behaviour
-// around exec'ing while creating/destroying threads. See issue #19546.
-var execLock rwmutex
+var (
+ // allocmLock is locked for read when creating new Ms in allocm and their
+ // addition to allm. Thus acquiring this lock for write blocks the
+ // creation of new Ms.
+ allocmLock rwmutex
+
+ // execLock serializes exec and clone to avoid bugs or unspecified
+ // behaviour around exec'ing while creating/destroying threads. See
+ // issue #19546.
+ execLock rwmutex
+)
// newmHandoff contains a list of m structures that need new OS threads.
// This is used by newm in situations where newm itself can't safely
@@ -2204,8 +2075,19 @@ var newmHandoff struct {
// id is optional pre-allocated m ID. Omit by passing -1.
//go:nowritebarrierrec
func newm(fn func(), _p_ *p, id int64) {
+ // allocm adds a new M to allm, but they do not start until created by
+ // the OS in newm1 or the template thread.
+ //
+ // doAllThreadsSyscall requires that every M in allm will eventually
+ // start and be signal-able, even with a STW.
+ //
+ // Disable preemption here until we start the thread to ensure that
+ // newm is not preempted between allocm and starting the new thread,
+ // ensuring that anything added to allm is guaranteed to eventually
+ // start.
+ acquirem()
+
mp := allocm(_p_, fn, id)
- mp.doesPark = (_p_ != nil)
mp.nextp.set(_p_)
mp.sigmask = initSigmask
if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
@@ -2231,9 +2113,14 @@ func newm(fn func(), _p_ *p, id int64) {
notewakeup(&newmHandoff.wake)
}
unlock(&newmHandoff.lock)
+ // The M has not started yet, but the template thread does not
+ // participate in STW, so it will always process queued Ms and
+ // it is safe to releasem.
+ releasem(getg().m)
return
}
newm1(mp)
+ releasem(getg().m)
}
func newm1(mp *m) {
@@ -2281,81 +2168,6 @@ func startTemplateThread() {
releasem(mp)
}
-// mFixupRace is used to temporarily borrow the race context from the
-// coordinating m during a syscall_runtime_doAllThreadsSyscall and
-// loan it out to each of the m's of the runtime so they can execute a
-// mFixup.fn in that context.
-var mFixupRace struct {
- lock mutex
- ctx uintptr
-}
-
-// mDoFixup runs any outstanding fixup function for the running m.
-// Returns true if a fixup was outstanding and actually executed.
-//
-// Note: to avoid deadlocks, and the need for the fixup function
-// itself to be async safe, signals are blocked for the working m
-// while it holds the mFixup lock. (See golang.org/issue/44193)
-//
-//go:nosplit
-func mDoFixup() bool {
- _g_ := getg()
- if used := atomic.Load(&_g_.m.mFixup.used); used == 0 {
- return false
- }
-
- // slow path - if fixup fn is used, block signals and lock.
- var sigmask sigset
- sigsave(&sigmask)
- sigblock(false)
- lock(&_g_.m.mFixup.lock)
- fn := _g_.m.mFixup.fn
- if fn != nil {
- if gcphase != _GCoff {
- // We can't have a write barrier in this
- // context since we may not have a P, but we
- // clear fn to signal that we've executed the
- // fixup. As long as fn is kept alive
- // elsewhere, technically we should have no
- // issues with the GC, but fn is likely
- // generated in a different package altogether
- // that may change independently. Just assert
- // the GC is off so this lack of write barrier
- // is more obviously safe.
- throw("GC must be disabled to protect validity of fn value")
- }
- if _g_.racectx != 0 || !raceenabled {
- fn(false)
- } else {
- // temporarily acquire the context of the
- // originator of the
- // syscall_runtime_doAllThreadsSyscall and
- // block others from using it for the duration
- // of the fixup call.
- lock(&mFixupRace.lock)
- _g_.racectx = mFixupRace.ctx
- fn(false)
- _g_.racectx = 0
- unlock(&mFixupRace.lock)
- }
- *(*uintptr)(unsafe.Pointer(&_g_.m.mFixup.fn)) = 0
- atomic.Store(&_g_.m.mFixup.used, 0)
- }
- unlock(&_g_.m.mFixup.lock)
- msigrestore(sigmask)
- return fn != nil
-}
-
-// mDoFixupAndOSYield is called when an m is unable to send a signal
-// because the allThreadsSyscall mechanism is in progress. That is, an
-// mPark() has been interrupted with this signal handler so we need to
-// ensure the fixup is executed from this context.
-//go:nosplit
-func mDoFixupAndOSYield() {
- mDoFixup()
- osyield()
-}
-
// templateThread is a thread in a known-good state that exists solely
// to start new threads in known-good states when the calling thread
// may not be in a good state.
@@ -2392,7 +2204,6 @@ func templateThread() {
noteclear(&newmHandoff.wake)
unlock(&newmHandoff.lock)
notesleep(&newmHandoff.wake)
- mDoFixup()
}
}
@@ -5239,10 +5050,6 @@ func sysmon() {
checkdead()
unlock(&sched.lock)
- // For syscall_runtime_doAllThreadsSyscall, sysmon is
- // sufficiently up to participate in fixups.
- atomic.Store(&sched.sysmonStarting, 0)
-
lasttrace := int64(0)
idle := 0 // how many cycles in succession we had not wokeup somebody
delay := uint32(0)
@@ -5257,7 +5064,6 @@ func sysmon() {
delay = 10 * 1000
}
usleep(delay)
- mDoFixup()
// sysmon should not enter deep sleep if schedtrace is enabled so that
// it can print that information at the right time.
@@ -5294,7 +5100,6 @@ func sysmon() {
osRelax(true)
}
syscallWake = notetsleep(&sched.sysmonnote, sleep)
- mDoFixup()
if shouldRelax {
osRelax(false)
}
@@ -5337,7 +5142,6 @@ func sysmon() {
incidlelocked(1)
}
}
- mDoFixup()
if GOOS == "netbsd" && needSysmonWorkaround {
// netpoll is responsible for waiting for timer
// expiration, so we typically don't have to worry
@@ -6334,7 +6138,7 @@ func (ord *randomOrder) start(i uint32) randomEnum {
return randomEnum{
count: ord.count,
pos: i % ord.count,
- inc: ord.coprimes[i%uint32(len(ord.coprimes))],
+ inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
}
}
diff --git a/src/runtime/proc_runtime_test.go b/src/runtime/proc_runtime_test.go
index a7bde2c6df..90aed83d46 100644
--- a/src/runtime/proc_runtime_test.go
+++ b/src/runtime/proc_runtime_test.go
@@ -30,4 +30,21 @@ func RunStealOrderTest() {
}
}
}
+ // Make sure that different arguments to ord.start don't generate the
+ // same pos+inc twice.
+ for procs := 2; procs <= 64; procs++ {
+ ord.reset(uint32(procs))
+ checked := make([]bool, procs*procs)
+ // We want at least procs*len(ord.coprimes) different pos+inc values
+ // before we start repeating.
+ for i := 0; i < procs*len(ord.coprimes); i++ {
+ enum := ord.start(uint32(i))
+ j := enum.pos*uint32(procs) + enum.inc
+ if checked[j] {
+ println("procs:", procs, "pos:", enum.pos, "inc:", enum.inc)
+ panic("duplicate pos+inc during enumeration")
+ }
+ checked[j] = true
+ }
+ }
}
diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s
index 798e23294a..95fec0b9c6 100644
--- a/src/runtime/race_arm64.s
+++ b/src/runtime/race_arm64.s
@@ -188,8 +188,12 @@ ret:
// func runtime·racefuncenter(pc uintptr)
// Called from instrumented code.
-TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
+TEXT runtime·racefuncenter<ABIInternal>(SB), NOSPLIT, $0-8
+#ifdef GOEXPERIMENT_regabiargs
+ MOVD R0, R9 // callpc
+#else
MOVD callpc+0(FP), R9
+#endif
JMP racefuncenter<>(SB)
// Common code for racefuncenter
@@ -205,7 +209,7 @@ TEXT racefuncenter<>(SB), NOSPLIT, $0-0
// func runtime·racefuncexit()
// Called from instrumented code.
-TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
+TEXT runtime·racefuncexit<ABIInternal>(SB), NOSPLIT, $0-0
load_g
MOVD g_racectx(g), R0 // race context
// void __tsan_func_exit(ThreadState *thr);
@@ -392,12 +396,12 @@ racecallatomic_ignore:
// Addr is outside the good range.
// Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op.
// An attempt to synchronize on the address would cause crash.
- MOVD R9, R20 // remember the original function
+ MOVD R9, R21 // remember the original function
MOVD $__tsan_go_ignore_sync_begin(SB), R9
load_g
MOVD g_racectx(g), R0 // goroutine context
BL racecall<>(SB)
- MOVD R20, R9 // restore the original function
+ MOVD R21, R9 // restore the original function
// Call the atomic function.
// racecall will call LLVM race code which might clobber R28 (g)
load_g
@@ -424,10 +428,12 @@ TEXT runtime·racecall(SB), NOSPLIT, $0-0
JMP racecall<>(SB)
// Switches SP to g0 stack and calls (R9). Arguments already set.
-TEXT racecall<>(SB), NOSPLIT, $0-0
+// Clobbers R19, R20.
+TEXT racecall<>(SB), NOSPLIT|NOFRAME, $0-0
MOVD g_m(g), R10
// Switch to g0 stack.
MOVD RSP, R19 // callee-saved, preserved across the CALL
+ MOVD R30, R20 // callee-saved, preserved across the CALL
MOVD m_g0(R10), R11
CMP R11, g
BEQ call // already on g0
@@ -436,7 +442,7 @@ TEXT racecall<>(SB), NOSPLIT, $0-0
call:
BL R9
MOVD R19, RSP
- RET
+ JMP (R20)
// C->Go callback thunk that allows to call runtime·racesymbolize from C code.
// Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go
index 7e8723e15f..ee8c6c210f 100644
--- a/src/runtime/runtime-gdb_test.go
+++ b/src/runtime/runtime-gdb_test.go
@@ -427,6 +427,14 @@ func TestGdbBacktrace(t *testing.T) {
got, err := testenv.RunWithTimeout(t, exec.Command("gdb", args...))
t.Logf("gdb output:\n%s", got)
if err != nil {
+ if bytes.Contains(got, []byte("internal-error: wait returned unexpected status 0x0")) {
+ // GDB bug: https://sourceware.org/bugzilla/show_bug.cgi?id=28551
+ testenv.SkipFlaky(t, 43068)
+ }
+ if bytes.Contains(got, []byte("Couldn't get registers: No such process.")) {
+ // GDB bug: https://sourceware.org/bugzilla/show_bug.cgi?id=9086
+ testenv.SkipFlaky(t, 50838)
+ }
t.Fatalf("gdb exited with error: %v", err)
}
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index 3eada37840..3d01ac5171 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -547,7 +547,6 @@ type m struct {
ncgo int32 // number of cgo calls currently in progress
cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily
cgoCallers *cgoCallers // cgo traceback if crashing in cgo call
- doesPark bool // non-P running threads: sysmon and newmHandoff never use .park
park note
alllink *m // on allm
schedlink muintptr
@@ -564,16 +563,6 @@ type m struct {
syscalltick uint32
freelink *m // on sched.freem
- // mFixup is used to synchronize OS related m state
- // (credentials etc) use mutex to access. To avoid deadlocks
- // an atomic.Load() of used being zero in mDoFixupFn()
- // guarantees fn is nil.
- mFixup struct {
- lock mutex
- used uint32
- fn func(bool) bool
- }
-
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
libcall libcall
@@ -817,10 +806,6 @@ type schedt struct {
sysmonwait uint32
sysmonnote note
- // While true, sysmon not ready for mFixup calls.
- // Accessed atomically.
- sysmonStarting uint32
-
// safepointFn should be called on each P at the next GC
// safepoint if p.runSafePointFn is set.
safePointFn func(*p)
@@ -838,8 +823,6 @@ type schedt struct {
// with the rest of the runtime.
sysmonlock mutex
- _ uint32 // ensure timeToRun has 8-byte alignment
-
// timeToRun is a distribution of scheduling latencies, defined
// as the sum of time a G spends in the _Grunnable state before
// it transitions to _Grunning.
@@ -856,7 +839,7 @@ const (
_SigPanic // if the signal is from the kernel, panic
_SigDefault // if the signal isn't explicitly requested, don't monitor it
_SigGoExit // cause all runtime procs to exit (only used on Plan 9).
- _SigSetStack // add SA_ONSTACK to libc handler
+ _SigSetStack // Don't explicitly install handler, but add SA_ONSTACK to existing libc handler
_SigUnblock // always unblock; see blockableSig
_SigIgn // _SIG_DFL action is to ignore the signal
)
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index 08f266cc67..2dd4cc51a3 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -161,6 +161,13 @@ func sigInstallGoHandler(sig uint32) bool {
}
}
+ if (GOOS == "linux" || GOOS == "android") && !iscgo && sig == sigPerThreadSyscall {
+ // sigPerThreadSyscall is the same signal used by glibc for
+ // per-thread syscalls on Linux. We use it for the same purpose
+ // in non-cgo binaries.
+ return true
+ }
+
t := &sigtable[sig]
if t.flags&_SigSetStack != 0 {
return false
@@ -616,6 +623,15 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
return
}
+ if (GOOS == "linux" || GOOS == "android") && sig == sigPerThreadSyscall {
+ // sigPerThreadSyscall is the same signal used by glibc for
+ // per-thread syscalls on Linux. We use it for the same purpose
+ // in non-cgo binaries. Since this signal is not _SigNotify,
+ // there is nothing more to do once we run the syscall.
+ runPerThreadSyscall()
+ return
+ }
+
if sig == sigPreempt && debug.asyncpreemptoff == 0 {
// Might be a preemption signal.
doSigPreempt(gp, c)
diff --git a/src/runtime/sigqueue.go b/src/runtime/sigqueue.go
index 7b84a0ef65..fdf99d94a2 100644
--- a/src/runtime/sigqueue.go
+++ b/src/runtime/sigqueue.go
@@ -11,18 +11,18 @@
//
// sigsend is called by the signal handler to queue a new signal.
// signal_recv is called by the Go program to receive a newly queued signal.
+//
// Synchronization between sigsend and signal_recv is based on the sig.state
-// variable. It can be in 4 states: sigIdle, sigReceiving, sigSending and sigFixup.
-// sigReceiving means that signal_recv is blocked on sig.Note and there are no
-// new pending signals.
-// sigSending means that sig.mask *may* contain new pending signals,
-// signal_recv can't be blocked in this state.
-// sigIdle means that there are no new pending signals and signal_recv is not blocked.
-// sigFixup is a transient state that can only exist as a short
-// transition from sigReceiving and then on to sigIdle: it is
-// used to ensure the AllThreadsSyscall()'s mDoFixup() operation
-// occurs on the sleeping m, waiting to receive a signal.
+// variable. It can be in three states:
+// * sigReceiving means that signal_recv is blocked on sig.Note and there are
+// no new pending signals.
+// * sigSending means that sig.mask *may* contain new pending signals,
+// signal_recv can't be blocked in this state.
+// * sigIdle means that there are no new pending signals and signal_recv is not
+// blocked.
+//
// Transitions between states are done atomically with CAS.
+//
// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
// If several sigsends and signal_recv execute concurrently, it can lead to
// unnecessary rechecks of sig.mask, but it cannot lead to missed signals
@@ -63,7 +63,6 @@ const (
sigIdle = iota
sigReceiving
sigSending
- sigFixup
)
// sigsend delivers a signal from sighandler to the internal signal delivery queue.
@@ -117,9 +116,6 @@ Send:
notewakeup(&sig.note)
break Send
}
- case sigFixup:
- // nothing to do - we need to wait for sigIdle.
- mDoFixupAndOSYield()
}
}
@@ -127,19 +123,6 @@ Send:
return true
}
-// sigRecvPrepareForFixup is used to temporarily wake up the
-// signal_recv() running thread while it is blocked waiting for the
-// arrival of a signal. If it causes the thread to wake up, the
-// sig.state travels through this sequence: sigReceiving -> sigFixup
-// -> sigIdle -> sigReceiving and resumes. (This is only called while
-// GC is disabled.)
-//go:nosplit
-func sigRecvPrepareForFixup() {
- if atomic.Cas(&sig.state, sigReceiving, sigFixup) {
- notewakeup(&sig.note)
- }
-}
-
// Called to receive the next queued signal.
// Must only be called from a single goroutine at a time.
//go:linkname signal_recv os/signal.signal_recv
@@ -167,16 +150,7 @@ func signal_recv() uint32 {
}
notetsleepg(&sig.note, -1)
noteclear(&sig.note)
- if !atomic.Cas(&sig.state, sigFixup, sigIdle) {
- break Receive
- }
- // Getting here, the code will
- // loop around again to sleep
- // in state sigReceiving. This
- // path is taken when
- // sigRecvPrepareForFixup()
- // has been called by another
- // thread.
+ break Receive
}
case sigSending:
if atomic.Cas(&sig.state, sigSending, sigIdle) {
diff --git a/src/runtime/sigqueue_plan9.go b/src/runtime/sigqueue_plan9.go
index aebd2060e7..d5fe8f8b35 100644
--- a/src/runtime/sigqueue_plan9.go
+++ b/src/runtime/sigqueue_plan9.go
@@ -92,13 +92,6 @@ func sendNote(s *byte) bool {
return true
}
-// sigRecvPrepareForFixup is a no-op on plan9. (This would only be
-// called while GC is disabled.)
-//
-//go:nosplit
-func sigRecvPrepareForFixup() {
-}
-
// Called to receive the next queued signal.
// Must only be called from a single goroutine at a time.
//go:linkname signal_recv os/signal.signal_recv
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index 017b0a0749..ee4db47314 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -310,6 +310,7 @@ const (
_FUNCDATA_OpenCodedDeferInfo = 4
_FUNCDATA_ArgInfo = 5
_FUNCDATA_ArgLiveInfo = 6
+ _FUNCDATA_WrapInfo = 7
_ArgsSizeUnknown = -0x80000000
)
diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go
index 80dd1a0378..58b3a9171c 100644
--- a/src/runtime/sys_darwin.go
+++ b/src/runtime/sys_darwin.go
@@ -17,87 +17,91 @@ import (
//go:linkname syscall_syscall syscall.syscall
//go:nosplit
-//go:cgo_unsafe_args
func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1, args.r2, args.err
}
func syscall()
//go:linkname syscall_syscallX syscall.syscallX
//go:nosplit
-//go:cgo_unsafe_args
func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallX)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1, args.r2, args.err
}
func syscallX()
//go:linkname syscall_syscall6 syscall.syscall6
//go:nosplit
-//go:cgo_unsafe_args
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1, args.r2, args.err
}
func syscall6()
//go:linkname syscall_syscall6X syscall.syscall6X
//go:nosplit
-//go:cgo_unsafe_args
func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6X)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1, args.r2, args.err
}
func syscall6X()
//go:linkname syscall_syscallPtr syscall.syscallPtr
//go:nosplit
-//go:cgo_unsafe_args
func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallPtr)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallPtr)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1, args.r2, args.err
}
func syscallPtr()
//go:linkname syscall_rawSyscall syscall.rawSyscall
//go:nosplit
-//go:cgo_unsafe_args
func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&fn))
- return
+ args := struct{ fn, a1, a2, a3, r1, r2, err uintptr }{fn, a1, a2, a3, r1, r2, err}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall)), unsafe.Pointer(&args))
+ return args.r1, args.r2, args.err
}
//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
//go:nosplit
-//go:cgo_unsafe_args
func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&fn))
- return
+ args := struct{ fn, a1, a2, a3, a4, a5, a6, r1, r2, err uintptr }{fn, a1, a2, a3, a4, a5, a6, r1, r2, err}
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall6)), unsafe.Pointer(&args))
+ return args.r1, args.r2, args.err
}
// syscallNoErr is used in crypto/x509 to call into Security.framework and CF.
//go:linkname crypto_x509_syscall crypto/x509/internal/macos.syscall
//go:nosplit
-//go:cgo_unsafe_args
-func crypto_x509_syscall(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1 uintptr) {
+func crypto_x509_syscall(fn, a1, a2, a3, a4, a5 uintptr, f1 float64) (r1 uintptr) {
+ args := struct {
+ fn, a1, a2, a3, a4, a5 uintptr
+ f1 float64
+ r1 uintptr
+ }{fn, a1, a2, a3, a4, a5, f1, r1}
entersyscall()
- libcCall(unsafe.Pointer(abi.FuncPCABI0(syscallNoErr)), unsafe.Pointer(&fn))
+ libcCall(unsafe.Pointer(abi.FuncPCABI0(syscall_x509)), unsafe.Pointer(&args))
exitsyscall()
- return
+ return args.r1
}
-func syscallNoErr()
+func syscall_x509()
// The *_trampoline functions convert from the Go calling convention to the C calling convention
// and then call the underlying libc function. They are defined in sys_darwin_$ARCH.s.
diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s
index 5d89cda8e6..db4715d2b7 100644
--- a/src/runtime/sys_darwin_amd64.s
+++ b/src/runtime/sys_darwin_amd64.s
@@ -831,9 +831,10 @@ ok:
POPQ BP
RET
-// syscallNoErr is like syscall6 but does not check for errors, and
-// only returns one value, for use with standard C ABI library functions.
-TEXT runtime·syscallNoErr(SB),NOSPLIT,$0
+// syscall_x509 is for crypto/x509. It is like syscall6 but does not check for errors,
+// takes 5 uintptrs and 1 float64, and only returns one value,
+// for use with standard C ABI functions.
+TEXT runtime·syscall_x509(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
SUBQ $16, SP
@@ -842,7 +843,7 @@ TEXT runtime·syscallNoErr(SB),NOSPLIT,$0
MOVQ (3*8)(DI), DX // a3
MOVQ (4*8)(DI), CX // a4
MOVQ (5*8)(DI), R8 // a5
- MOVQ (6*8)(DI), R9 // a6
+ MOVQ (6*8)(DI), X0 // f1
MOVQ DI, (SP)
MOVQ (1*8)(DI), DI // a1
XORL AX, AX // vararg: say "no float args"
diff --git a/src/runtime/sys_darwin_arm64.s b/src/runtime/sys_darwin_arm64.s
index 96d2ed1076..e57ac53e10 100644
--- a/src/runtime/sys_darwin_arm64.s
+++ b/src/runtime/sys_darwin_arm64.s
@@ -736,9 +736,10 @@ TEXT runtime·syscall6X(SB),NOSPLIT,$0
ok:
RET
-// syscallNoErr is like syscall6 but does not check for errors, and
-// only returns one value, for use with standard C ABI library functions.
-TEXT runtime·syscallNoErr(SB),NOSPLIT,$0
+// syscall_x509 is for crypto/x509. It is like syscall6 but does not check for errors,
+// takes 5 uintptrs and 1 float64, and only returns one value,
+// for use with standard C ABI functions.
+TEXT runtime·syscall_x509(SB),NOSPLIT,$0
SUB $16, RSP // push structure pointer
MOVD R0, (RSP)
@@ -747,7 +748,7 @@ TEXT runtime·syscallNoErr(SB),NOSPLIT,$0
MOVD 24(R0), R2 // a3
MOVD 32(R0), R3 // a4
MOVD 40(R0), R4 // a5
- MOVD 48(R0), R5 // a6
+ FMOVD 48(R0), F0 // f1
MOVD 8(R0), R0 // a1
BL (R12)
diff --git a/src/runtime/sys_dragonfly_amd64.s b/src/runtime/sys_dragonfly_amd64.s
index d57bc2a7a4..684c9ab7f0 100644
--- a/src/runtime/sys_dragonfly_amd64.s
+++ b/src/runtime/sys_dragonfly_amd64.s
@@ -109,21 +109,6 @@ TEXT runtime·read(SB),NOSPLIT,$-8
MOVL AX, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVL $42, AX
- SYSCALL
- JCC pipeok
- MOVL $-1,r+0(FP)
- MOVL $-1,w+4(FP)
- MOVL AX, errno+8(FP)
- RET
-pipeok:
- MOVL AX, r+0(FP)
- MOVL DX, w+4(FP)
- MOVL $0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-20
MOVL $0, DI
@@ -402,18 +387,3 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
MOVL $92, AX // fcntl
SYSCALL
RET
-
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL fd+0(FP), DI // fd
- MOVQ $3, SI // F_GETFL
- MOVQ $0, DX
- MOVL $92, AX // fcntl
- SYSCALL
- MOVL fd+0(FP), DI // fd
- MOVQ $4, SI // F_SETFL
- MOVQ $4, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $92, AX // fcntl
- SYSCALL
- RET
diff --git a/src/runtime/sys_freebsd_386.s b/src/runtime/sys_freebsd_386.s
index 97e6d9ab36..aceb6fe1bf 100644
--- a/src/runtime/sys_freebsd_386.s
+++ b/src/runtime/sys_freebsd_386.s
@@ -101,21 +101,6 @@ TEXT runtime·read(SB),NOSPLIT,$-4
MOVL AX, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$8-12
- MOVL $42, AX
- INT $0x80
- JAE ok
- MOVL $0, r+0(FP)
- MOVL $0, w+4(FP)
- MOVL AX, errno+8(FP)
- RET
-ok:
- MOVL AX, r+0(FP)
- MOVL DX, w+4(FP)
- MOVL $0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$12-16
MOVL $542, AX
@@ -443,23 +428,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$32
NEGL AX
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$16-4
- MOVL $92, AX // fcntl
- MOVL fd+0(FP), BX // fd
- MOVL BX, 4(SP)
- MOVL $3, 8(SP) // F_GETFL
- MOVL $0, 12(SP)
- INT $0x80
- MOVL fd+0(FP), BX // fd
- MOVL BX, 4(SP)
- MOVL $4, 8(SP) // F_SETFL
- ORL $4, AX // O_NONBLOCK
- MOVL AX, 12(SP)
- MOVL $92, AX // fcntl
- INT $0x80
- RET
-
// func cpuset_getaffinity(level int, which int, id int64, size int, mask *byte) int32
TEXT runtime·cpuset_getaffinity(SB), NOSPLIT, $0-28
MOVL $487, AX
diff --git a/src/runtime/sys_freebsd_amd64.s b/src/runtime/sys_freebsd_amd64.s
index 165e97c60d..cc95da7e64 100644
--- a/src/runtime/sys_freebsd_amd64.s
+++ b/src/runtime/sys_freebsd_amd64.s
@@ -102,21 +102,6 @@ TEXT runtime·read(SB),NOSPLIT,$-8
MOVL AX, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVL $42, AX
- SYSCALL
- JCC ok
- MOVL $0, r+0(FP)
- MOVL $0, w+4(FP)
- MOVL AX, errno+8(FP)
- RET
-ok:
- MOVL AX, r+0(FP)
- MOVL DX, w+4(FP)
- MOVL $0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-20
LEAQ r+8(FP), DI
@@ -491,21 +476,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL fd+0(FP), DI // fd
- MOVQ $3, SI // F_GETFL
- MOVQ $0, DX
- MOVL $92, AX // fcntl
- SYSCALL
- MOVL fd+0(FP), DI // fd
- MOVQ $4, SI // F_SETFL
- MOVQ $4, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $92, AX // fcntl
- SYSCALL
- RET
-
// func cpuset_getaffinity(level int, which int, id int64, size int, mask *byte) int32
TEXT runtime·cpuset_getaffinity(SB), NOSPLIT, $0-44
MOVQ level+0(FP), DI
diff --git a/src/runtime/sys_freebsd_arm.s b/src/runtime/sys_freebsd_arm.s
index b12e47c576..88ab0fc795 100644
--- a/src/runtime/sys_freebsd_arm.s
+++ b/src/runtime/sys_freebsd_arm.s
@@ -20,7 +20,6 @@
#define SYS_close (SYS_BASE + 6)
#define SYS_getpid (SYS_BASE + 20)
#define SYS_kill (SYS_BASE + 37)
-#define SYS_pipe (SYS_BASE + 42)
#define SYS_sigaltstack (SYS_BASE + 53)
#define SYS_munmap (SYS_BASE + 73)
#define SYS_madvise (SYS_BASE + 75)
@@ -123,23 +122,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0
MOVW R0, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVW $SYS_pipe, R7
- SWI $0
- BCC ok
- MOVW $0, R1
- MOVW R1, r+0(FP)
- MOVW R1, w+4(FP)
- MOVW R0, errno+8(FP)
- RET
-ok:
- MOVW R0, r+0(FP)
- MOVW R1, w+4(FP)
- MOVW $0, R1
- MOVW R1, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVW $r+4(FP), R0
@@ -414,20 +396,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SWI $0
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVW fd+0(FP), R0 // fd
- MOVW $3, R1 // F_GETFL
- MOVW $0, R2
- MOVW $SYS_fcntl, R7
- SWI $0
- ORR $0x4, R0, R2 // O_NONBLOCK
- MOVW fd+0(FP), R0 // fd
- MOVW $4, R1 // F_SETFL
- MOVW $SYS_fcntl, R7
- SWI $0
- RET
-
// TODO: this is only valid for ARMv7+
TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
B runtime·armPublicationBarrier(SB)
diff --git a/src/runtime/sys_freebsd_arm64.s b/src/runtime/sys_freebsd_arm64.s
index 1aa09e87ca..59adf4e5f3 100644
--- a/src/runtime/sys_freebsd_arm64.s
+++ b/src/runtime/sys_freebsd_arm64.s
@@ -133,18 +133,6 @@ ok:
MOVW R0, ret+8(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVD $r+0(FP), R0
- MOVW $0, R1
- MOVD $SYS_pipe2, R8
- SVC
- BCC ok
- NEG R0, R0
-ok:
- MOVW R0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVD $r+8(FP), R0
@@ -492,20 +480,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
SVC
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVW fd+0(FP), R0
- MOVD $F_GETFL, R1
- MOVD $0, R2
- MOVD $SYS_fcntl, R8
- SVC
- ORR $O_NONBLOCK, R0, R2
- MOVW fd+0(FP), R0
- MOVW $F_SETFL, R1
- MOVW $SYS_fcntl, R7
- SVC
- RET
-
// func getCntxct(physical bool) uint32
TEXT runtime·getCntxct(SB),NOSPLIT,$0
MOVB physical+0(FP), R0
diff --git a/src/runtime/sys_linux_386.s b/src/runtime/sys_linux_386.s
index 6df812234c..fef68d51dc 100644
--- a/src/runtime/sys_linux_386.s
+++ b/src/runtime/sys_linux_386.s
@@ -32,7 +32,6 @@
#define SYS_getpid 20
#define SYS_access 33
#define SYS_kill 37
-#define SYS_pipe 42
#define SYS_brk 45
#define SYS_fcntl 55
#define SYS_munmap 91
@@ -130,14 +129,6 @@ TEXT runtime·read(SB),NOSPLIT,$0
MOVL AX, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVL $SYS_pipe, AX
- LEAL r+0(FP), BX
- INVOKE_SYSCALL
- MOVL AX, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVL $SYS_pipe2, AX
@@ -782,21 +773,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
INVOKE_SYSCALL
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL $SYS_fcntl, AX
- MOVL fd+0(FP), BX // fd
- MOVL $3, CX // F_GETFL
- MOVL $0, DX
- INVOKE_SYSCALL
- MOVL fd+0(FP), BX // fd
- MOVL $4, CX // F_SETFL
- MOVL $0x800, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $SYS_fcntl, AX
- INVOKE_SYSCALL
- RET
-
// int access(const char *name, int mode)
TEXT runtime·access(SB),NOSPLIT,$0
MOVL $SYS_access, AX
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index f0e58e11db..4be0801114 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -22,7 +22,6 @@
#define SYS_rt_sigaction 13
#define SYS_rt_sigprocmask 14
#define SYS_rt_sigreturn 15
-#define SYS_pipe 22
#define SYS_sched_yield 24
#define SYS_mincore 27
#define SYS_madvise 28
@@ -114,14 +113,6 @@ TEXT runtime·read(SB),NOSPLIT,$0-28
MOVL AX, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- LEAQ r+0(FP), DI
- MOVL $SYS_pipe, AX
- SYSCALL
- MOVL AX, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-20
LEAQ r+8(FP), DI
@@ -708,21 +699,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL fd+0(FP), DI // fd
- MOVQ $3, SI // F_GETFL
- MOVQ $0, DX
- MOVL $SYS_fcntl, AX
- SYSCALL
- MOVL fd+0(FP), DI // fd
- MOVQ $4, SI // F_SETFL
- MOVQ $0x800, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $SYS_fcntl, AX
- SYSCALL
- RET
-
// int access(const char *name, int mode)
TEXT runtime·access(SB),NOSPLIT,$0
// This uses faccessat instead of access, because Android O blocks access.
diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s
index ca443b699f..201940b4e6 100644
--- a/src/runtime/sys_linux_arm.s
+++ b/src/runtime/sys_linux_arm.s
@@ -23,7 +23,6 @@
#define SYS_close (SYS_BASE + 6)
#define SYS_getpid (SYS_BASE + 20)
#define SYS_kill (SYS_BASE + 37)
-#define SYS_pipe (SYS_BASE + 42)
#define SYS_clone (SYS_BASE + 120)
#define SYS_rt_sigreturn (SYS_BASE + 173)
#define SYS_rt_sigaction (SYS_BASE + 174)
@@ -98,14 +97,6 @@ TEXT runtime·read(SB),NOSPLIT,$0
MOVW R0, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVW $r+0(FP), R0
- MOVW $SYS_pipe, R7
- SWI $0
- MOVW R0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVW $r+4(FP), R0
@@ -717,20 +708,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SWI $0
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVW fd+0(FP), R0 // fd
- MOVW $3, R1 // F_GETFL
- MOVW $0, R2
- MOVW $SYS_fcntl, R7
- SWI $0
- ORR $0x800, R0, R2 // O_NONBLOCK
- MOVW fd+0(FP), R0 // fd
- MOVW $4, R1 // F_SETFL
- MOVW $SYS_fcntl, R7
- SWI $0
- RET
-
// b __kuser_get_tls @ 0xffff0fe0
TEXT runtime·read_tls_fallback(SB),NOSPLIT|NOFRAME,$0
MOVW $0xffff0fe0, R0
diff --git a/src/runtime/sys_linux_arm64.s b/src/runtime/sys_linux_arm64.s
index 1276c077d7..ca362ed552 100644
--- a/src/runtime/sys_linux_arm64.s
+++ b/src/runtime/sys_linux_arm64.s
@@ -113,15 +113,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW R0, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVD $r+0(FP), R0
- MOVW $0, R1
- MOVW $SYS_pipe2, R8
- SVC
- MOVW R0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVD $r+8(FP), R0
@@ -452,6 +443,7 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
BL (R11)
RET
+// Called from c-abi, R0: sig, R1: info, R2: cxt
TEXT runtime·sigtramp(SB),NOSPLIT,$192
// Save callee-save registers in the case of signal forwarding.
// Please refer to https://golang.org/issue/31827 .
@@ -511,9 +503,146 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$192
RET
-TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
- MOVD $runtime·sigtramp(SB), R3
- B (R3)
+// Called from c-abi, R0: sig, R1: info, R2: cxt
+TEXT runtime·sigprofNonGoWrapper<>(SB),NOSPLIT,$192
+ // TODO(eric): In multiple places we need to save and restore the
+ // callee-saved registers, we can define a macro for this.
+ // Save callee-save registers because it's a callback from c code.
+ MOVD R19, 8*4(RSP)
+ MOVD R20, 8*5(RSP)
+ MOVD R21, 8*6(RSP)
+ MOVD R22, 8*7(RSP)
+ MOVD R23, 8*8(RSP)
+ MOVD R24, 8*9(RSP)
+ MOVD R25, 8*10(RSP)
+ MOVD R26, 8*11(RSP)
+ MOVD R27, 8*12(RSP)
+ MOVD g, 8*13(RSP)
+ MOVD R29, 8*14(RSP)
+ FMOVD F8, 8*15(RSP)
+ FMOVD F9, 8*16(RSP)
+ FMOVD F10, 8*17(RSP)
+ FMOVD F11, 8*18(RSP)
+ FMOVD F12, 8*19(RSP)
+ FMOVD F13, 8*20(RSP)
+ FMOVD F14, 8*21(RSP)
+ FMOVD F15, 8*22(RSP)
+
+ MOVW R0, 8(RSP) // sig
+ MOVD R1, 16(RSP) // info
+ MOVD R2, 24(RSP) // ctx
+ CALL runtime·sigprofNonGo(SB)
+
+ // Restore callee-save registers.
+ MOVD 8*4(RSP), R19
+ MOVD 8*5(RSP), R20
+ MOVD 8*6(RSP), R21
+ MOVD 8*7(RSP), R22
+ MOVD 8*8(RSP), R23
+ MOVD 8*9(RSP), R24
+ MOVD 8*10(RSP), R25
+ MOVD 8*11(RSP), R26
+ MOVD 8*12(RSP), R27
+ MOVD 8*13(RSP), g
+ MOVD 8*14(RSP), R29
+ FMOVD 8*15(RSP), F8
+ FMOVD 8*16(RSP), F9
+ FMOVD 8*17(RSP), F10
+ FMOVD 8*18(RSP), F11
+ FMOVD 8*19(RSP), F12
+ FMOVD 8*20(RSP), F13
+ FMOVD 8*21(RSP), F14
+ FMOVD 8*22(RSP), F15
+ RET
+
+// Called from c-abi, R0: sig, R1: info, R2: cxt
+TEXT runtime·cgoSigtramp(SB),NOSPLIT|NOFRAME,$0
+ // The stack unwinder, presumably written in C, may not be able to
+ // handle Go frame correctly. So, this function is NOFRAME, and we
+ // save/restore LR manually.
+ MOVD LR, R10
+ // Save R27, g because they will be clobbered,
+ // we need to restore them before jump to sigtramp.
+ MOVD R27, R11
+ MOVD g, R12
+
+ // If no traceback function, do usual sigtramp.
+ MOVD runtime·cgoTraceback(SB), R6
+ CBZ R6, sigtramp
+
+ // If no traceback support function, which means that
+ // runtime/cgo was not linked in, do usual sigtramp.
+ MOVD _cgo_callers(SB), R7
+ CBZ R7, sigtramp
+
+ // Figure out if we are currently in a cgo call.
+ // If not, just do usual sigtramp.
+ // first save R0, because runtime·load_g will clobber it.
+ MOVD R0, R8
+ // Set up g register.
+ CALL runtime·load_g(SB)
+ MOVD R8, R0
+
+ CBZ g, sigtrampnog // g == nil
+ MOVD g_m(g), R6
+ CBZ R6, sigtramp // g.m == nil
+ MOVW m_ncgo(R6), R7
+ CBZW R7, sigtramp // g.m.ncgo = 0
+ MOVD m_curg(R6), R8
+ CBZ R8, sigtramp // g.m.curg == nil
+ MOVD g_syscallsp(R8), R7
+ CBZ R7, sigtramp // g.m.curg.syscallsp == 0
+ MOVD m_cgoCallers(R6), R4 // R4 is the fifth arg in C calling convention.
+ CBZ R4, sigtramp // g.m.cgoCallers == nil
+ MOVW m_cgoCallersUse(R6), R8
+ CBNZW R8, sigtramp // g.m.cgoCallersUse != 0
+
+ // Jump to a function in runtime/cgo.
+ // That function, written in C, will call the user's traceback
+ // function with proper unwind info, and will then call back here.
+ // The first three arguments, and the fifth, are already in registers.
+ // Set the two remaining arguments now.
+ MOVD runtime·cgoTraceback(SB), R3
+ MOVD $runtime·sigtramp(SB), R5
+ MOVD _cgo_callers(SB), R13
+ MOVD R10, LR // restore
+ MOVD R11, R27
+ MOVD R12, g
+ B (R13)
+
+sigtramp:
+ MOVD R10, LR // restore
+ MOVD R11, R27
+ MOVD R12, g
+ B runtime·sigtramp(SB)
+
+sigtrampnog:
+ // Signal arrived on a non-Go thread. If this is SIGPROF, get a
+ // stack trace.
+ CMPW $27, R0 // 27 == SIGPROF
+ BNE sigtramp
+
+ // Lock sigprofCallersUse (cas from 0 to 1).
+ MOVW $1, R7
+ MOVD $runtime·sigprofCallersUse(SB), R8
+load_store_loop:
+ LDAXRW (R8), R9
+ CBNZW R9, sigtramp // Skip stack trace if already locked.
+ STLXRW R7, (R8), R9
+ CBNZ R9, load_store_loop
+
+ // Jump to the traceback function in runtime/cgo.
+ // It will call back to sigprofNonGo, which will ignore the
+ // arguments passed in registers.
+ // First three arguments to traceback function are in registers already.
+ MOVD runtime·cgoTraceback(SB), R3
+ MOVD $runtime·sigprofCallers(SB), R4
+ MOVD $runtime·sigprofNonGoWrapper<>(SB), R5
+ MOVD _cgo_callers(SB), R13
+ MOVD R10, LR // restore
+ MOVD R11, R27
+ MOVD R12, g
+ B (R13)
TEXT runtime·sysMmap(SB),NOSPLIT|NOFRAME,$0
MOVD addr+0(FP), R0
@@ -740,21 +869,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
SVC
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R0 // fd
- MOVD $3, R1 // F_GETFL
- MOVD $0, R2
- MOVD $SYS_fcntl, R8
- SVC
- MOVD $0x800, R2 // O_NONBLOCK
- ORR R0, R2
- MOVW fd+0(FP), R0 // fd
- MOVD $4, R1 // F_SETFL
- MOVD $SYS_fcntl, R8
- SVC
- RET
-
// int access(const char *name, int mode)
TEXT runtime·access(SB),NOSPLIT,$0-20
MOVD $AT_FDCWD, R0
diff --git a/src/runtime/sys_linux_mips64x.s b/src/runtime/sys_linux_mips64x.s
index 0df2597993..3c7f0e7307 100644
--- a/src/runtime/sys_linux_mips64x.s
+++ b/src/runtime/sys_linux_mips64x.s
@@ -113,17 +113,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW R2, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVV $r+0(FP), R4
- MOVV R0, R5
- MOVV $SYS_pipe2, R2
- SYSCALL
- BEQ R7, 2(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVV $r+8(FP), R4
@@ -635,21 +624,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R4 // fd
- MOVV $3, R5 // F_GETFL
- MOVV $0, R6
- MOVV $SYS_fcntl, R2
- SYSCALL
- MOVW $0x80, R6 // O_NONBLOCK
- OR R2, R6
- MOVW fd+0(FP), R4 // fd
- MOVV $4, R5 // F_SETFL
- MOVV $SYS_fcntl, R2
- SYSCALL
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT|NOFRAME,$0-8
// Implemented as brk(NULL).
diff --git a/src/runtime/sys_linux_mipsx.s b/src/runtime/sys_linux_mipsx.s
index 2207e9ab98..ab4e976ee4 100644
--- a/src/runtime/sys_linux_mipsx.s
+++ b/src/runtime/sys_linux_mipsx.s
@@ -19,7 +19,6 @@
#define SYS_close 4006
#define SYS_getpid 4020
#define SYS_kill 4037
-#define SYS_pipe 4042
#define SYS_brk 4045
#define SYS_fcntl 4055
#define SYS_mmap 4090
@@ -112,23 +111,6 @@ TEXT runtime·read(SB),NOSPLIT,$0-16
MOVW R2, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVW $SYS_pipe, R2
- SYSCALL
- BEQ R7, pipeok
- MOVW $-1, R1
- MOVW R1, r+0(FP)
- MOVW R1, w+4(FP)
- SUBU R2, R0, R2 // caller expects negative errno
- MOVW R2, errno+8(FP)
- RET
-pipeok:
- MOVW R2, r+0(FP)
- MOVW R3, w+4(FP)
- MOVW R0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVW $r+4(FP), R4
@@ -559,21 +541,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0-4
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVW fd+0(FP), R4 // fd
- MOVW $3, R5 // F_GETFL
- MOVW $0, R6
- MOVW $SYS_fcntl, R2
- SYSCALL
- MOVW $0x80, R6 // O_NONBLOCK
- OR R2, R6
- MOVW fd+0(FP), R4 // fd
- MOVW $4, R5 // F_SETFL
- MOVW $SYS_fcntl, R2
- SYSCALL
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT,$0-4
// Implemented as brk(NULL).
diff --git a/src/runtime/sys_linux_ppc64x.s b/src/runtime/sys_linux_ppc64x.s
index dc3d89fae7..48f9334795 100644
--- a/src/runtime/sys_linux_ppc64x.s
+++ b/src/runtime/sys_linux_ppc64x.s
@@ -20,7 +20,6 @@
#define SYS_close 6
#define SYS_getpid 20
#define SYS_kill 37
-#define SYS_pipe 42
#define SYS_brk 45
#define SYS_fcntl 55
#define SYS_mmap 90
@@ -104,13 +103,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW R3, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- ADD $FIXED_FRAME, R1, R3
- SYSCALL $SYS_pipe
- MOVW R3, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
ADD $FIXED_FRAME+8, R1, R3
@@ -933,18 +925,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
SYSCALL $SYS_fcntl
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R3 // fd
- MOVD $3, R4 // F_GETFL
- MOVD $0, R5
- SYSCALL $SYS_fcntl
- OR $0x800, R3, R5 // O_NONBLOCK
- MOVW fd+0(FP), R3 // fd
- MOVD $4, R4 // F_SETFL
- SYSCALL $SYS_fcntl
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT|NOFRAME,$0
// Implemented as brk(NULL).
diff --git a/src/runtime/sys_linux_riscv64.s b/src/runtime/sys_linux_riscv64.s
index a3da46d136..8dde29eb92 100644
--- a/src/runtime/sys_linux_riscv64.s
+++ b/src/runtime/sys_linux_riscv64.s
@@ -118,15 +118,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW A0, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOV $r+0(FP), A0
- MOV ZERO, A1
- MOV $SYS_pipe2, A7
- ECALL
- MOVW A0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOV $r+8(FP), A0
@@ -635,21 +626,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
ECALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), A0 // fd
- MOV $3, A1 // F_GETFL
- MOV $0, A2
- MOV $SYS_fcntl, A7
- ECALL
- MOV $0x800, A2 // O_NONBLOCK
- OR A0, A2
- MOVW fd+0(FP), A0 // fd
- MOV $4, A1 // F_SETFL
- MOV $SYS_fcntl, A7
- ECALL
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT,$0-8
// Implemented as brk(NULL).
diff --git a/src/runtime/sys_linux_s390x.s b/src/runtime/sys_linux_s390x.s
index 886add8b54..03ec7f03fd 100644
--- a/src/runtime/sys_linux_s390x.s
+++ b/src/runtime/sys_linux_s390x.s
@@ -16,7 +16,6 @@
#define SYS_close 6
#define SYS_getpid 20
#define SYS_kill 37
-#define SYS_pipe 42
#define SYS_brk 45
#define SYS_fcntl 55
#define SYS_mmap 90
@@ -103,14 +102,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
MOVW R2, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVD $r+0(FP), R2
- MOVW $SYS_pipe, R1
- SYSCALL
- MOVW R2, errno+8(FP)
- RET
-
// func pipe2() (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVD $r+8(FP), R2
@@ -497,21 +488,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
SYSCALL
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R2 // fd
- MOVD $3, R3 // F_GETFL
- XOR R4, R4
- MOVW $SYS_fcntl, R1
- SYSCALL
- MOVD $0x800, R4 // O_NONBLOCK
- OR R2, R4
- MOVW fd+0(FP), R2 // fd
- MOVD $4, R3 // F_SETFL
- MOVW $SYS_fcntl, R1
- SYSCALL
- RET
-
// func sbrk0() uintptr
TEXT runtime·sbrk0(SB),NOSPLIT|NOFRAME,$0-8
// Implemented as brk(NULL).
diff --git a/src/runtime/sys_netbsd_386.s b/src/runtime/sys_netbsd_386.s
index 8a33894892..b7d4645af1 100644
--- a/src/runtime/sys_netbsd_386.s
+++ b/src/runtime/sys_netbsd_386.s
@@ -87,21 +87,6 @@ TEXT runtime·read(SB),NOSPLIT,$-4
MOVL AX, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVL $42, AX
- INT $0x80
- JCC pipeok
- MOVL $-1, r+0(FP)
- MOVL $-1, w+4(FP)
- MOVL AX, errno+8(FP)
- RET
-pipeok:
- MOVL AX, r+0(FP)
- MOVL DX, w+4(FP)
- MOVL $0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$12-16
MOVL $453, AX
@@ -484,20 +469,3 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$32
JAE 2(PC)
NEGL AX
RET
-
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$16-4
- MOVL $92, AX // fcntl
- MOVL fd+0(FP), BX // fd
- MOVL BX, 4(SP)
- MOVL $3, 8(SP) // F_GETFL
- MOVL $0, 12(SP)
- INT $0x80
- MOVL fd+0(FP), BX // fd
- MOVL BX, 4(SP)
- MOVL $4, 8(SP) // F_SETFL
- ORL $4, AX // O_NONBLOCK
- MOVL AX, 12(SP)
- MOVL $92, AX // fcntl
- INT $0x80
- RET
diff --git a/src/runtime/sys_netbsd_amd64.s b/src/runtime/sys_netbsd_amd64.s
index 02f5b4ba3b..41eddf3735 100644
--- a/src/runtime/sys_netbsd_amd64.s
+++ b/src/runtime/sys_netbsd_amd64.s
@@ -163,21 +163,6 @@ TEXT runtime·read(SB),NOSPLIT,$-8
MOVL AX, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- MOVL $42, AX
- SYSCALL
- JCC pipeok
- MOVL $-1, r+0(FP)
- MOVL $-1, w+4(FP)
- MOVL AX, errno+8(FP)
- RET
-pipeok:
- MOVL AX, r+0(FP)
- MOVL DX, w+4(FP)
- MOVL $0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-20
LEAQ r+8(FP), DI
@@ -449,18 +434,3 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
MOVL $SYS_fcntl, AX
SYSCALL
RET
-
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL fd+0(FP), DI // fd
- MOVQ $3, SI // F_GETFL
- MOVQ $0, DX
- MOVL $92, AX // fcntl
- SYSCALL
- MOVL fd+0(FP), DI // fd
- MOVQ $4, SI // F_SETFL
- MOVQ $4, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $92, AX // fcntl
- SYSCALL
- RET
diff --git a/src/runtime/sys_netbsd_arm.s b/src/runtime/sys_netbsd_arm.s
index 3a763b2a6a..bbca040994 100644
--- a/src/runtime/sys_netbsd_arm.s
+++ b/src/runtime/sys_netbsd_arm.s
@@ -96,22 +96,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0
MOVW R0, ret+12(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- SWI $0xa0002a
- BCC pipeok
- MOVW $-1,R2
- MOVW R2, r+0(FP)
- MOVW R2, w+4(FP)
- MOVW R0, errno+8(FP)
- RET
-pipeok:
- MOVW $0, R2
- MOVW R0, r+0(FP)
- MOVW R1, w+4(FP)
- MOVW R2, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVW $r+4(FP), R0
@@ -422,18 +406,6 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
SWI $SYS_fcntl
RET
-// func runtime·setNonblock(fd int32)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVW fd+0(FP), R0 // fd
- MOVW $3, R1 // F_GETFL
- MOVW $0, R2
- SWI $0xa0005c // sys_fcntl
- ORR $0x4, R0, R2 // O_NONBLOCK
- MOVW fd+0(FP), R0 // fd
- MOVW $4, R1 // F_SETFL
- SWI $0xa0005c // sys_fcntl
- RET
-
// TODO: this is only valid for ARMv7+
TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
B runtime·armPublicationBarrier(SB)
diff --git a/src/runtime/sys_netbsd_arm64.s b/src/runtime/sys_netbsd_arm64.s
index 8a0496e807..f7cce57c2d 100644
--- a/src/runtime/sys_netbsd_arm64.s
+++ b/src/runtime/sys_netbsd_arm64.s
@@ -154,17 +154,6 @@ ok:
MOVW R0, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- ADD $8, RSP, R0
- MOVW $0, R1
- SVC $SYS_pipe2
- BCC pipeok
- NEG R0, R0
-pipeok:
- MOVW R0, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
ADD $16, RSP, R0
@@ -466,16 +455,3 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
MOVW $FD_CLOEXEC, R2
SVC $SYS_fcntl
RET
-
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R0 // arg 1 - fd
- MOVD $F_GETFL, R1 // arg 2 - cmd
- MOVD $0, R2 // arg 3
- SVC $SYS_fcntl
- MOVD $O_NONBLOCK, R2
- EOR R0, R2 // arg 3 - flags
- MOVW fd+0(FP), R0 // arg 1 - fd
- MOVD $F_SETFL, R1 // arg 2 - cmd
- SVC $SYS_fcntl
- RET
diff --git a/src/runtime/sys_openbsd2.go b/src/runtime/sys_openbsd2.go
index 4d50b4f6b1..d174d87a49 100644
--- a/src/runtime/sys_openbsd2.go
+++ b/src/runtime/sys_openbsd2.go
@@ -111,10 +111,6 @@ func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
}
func write_trampoline()
-func pipe() (r, w int32, errno int32) {
- return pipe2(0)
-}
-
func pipe2(flags int32) (r, w int32, errno int32) {
var p [2]int32
args := struct {
@@ -258,12 +254,6 @@ func closeonexec(fd int32) {
fcntl(fd, _F_SETFD, _FD_CLOEXEC)
}
-//go:nosplit
-func setNonblock(fd int32) {
- flags := fcntl(fd, _F_GETFL, 0)
- fcntl(fd, _F_SETFL, flags|_O_NONBLOCK)
-}
-
// Tell the linker that the libc_* functions are to be found
// in a system library, with the libc_ prefix missing.
diff --git a/src/runtime/sys_openbsd_mips64.s b/src/runtime/sys_openbsd_mips64.s
index f8ae8e7c30..3b18bdda7a 100644
--- a/src/runtime/sys_openbsd_mips64.s
+++ b/src/runtime/sys_openbsd_mips64.s
@@ -64,17 +64,6 @@ TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0
MOVW R2, ret+24(FP)
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVV $r+0(FP), R4
- MOVW $0, R5
- MOVV $101, R2 // sys_pipe2
- SYSCALL
- BEQ R7, 2(PC)
- SUBVU R2, R0, R2 // caller expects negative errno
- MOVW R2, errno+8(FP)
- RET
-
// func pipe2(flags int32) (r, w int32, errno int32)
TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
MOVV $r+8(FP), R4
@@ -383,18 +372,3 @@ TEXT runtime·closeonexec(SB),NOSPLIT,$0
MOVV $92, R2 // sys_fcntl
SYSCALL
RET
-
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R4 // arg 1 - fd
- MOVV $3, R5 // arg 2 - cmd (F_GETFL)
- MOVV $0, R6 // arg 3
- MOVV $92, R2 // sys_fcntl
- SYSCALL
- MOVV $4, R6 // O_NONBLOCK
- OR R2, R6 // arg 3 - flags
- MOVW fd+0(FP), R4 // arg 1 - fd
- MOVV $4, R5 // arg 2 - cmd (F_SETFL)
- MOVV $92, R2 // sys_fcntl
- SYSCALL
- RET
diff --git a/src/runtime/testdata/testprogcgo/aprof.go b/src/runtime/testdata/testprogcgo/aprof.go
index c70d6333bb..16870144dd 100644
--- a/src/runtime/testdata/testprogcgo/aprof.go
+++ b/src/runtime/testdata/testprogcgo/aprof.go
@@ -10,7 +10,7 @@ package main
// This is a regression test for issue 14599, where profiling fails when the
// function is the first C function. Exported functions are the first C
// functions, so we use an exported function. Exported functions are created in
-// lexigraphical order of source files, so this file is named aprof.go to
+// lexicographical order of source files, so this file is named aprof.go to
// ensure its function is first.
// extern void CallGoNop();
diff --git a/src/runtime/trace.go b/src/runtime/trace.go
index 71a29d4316..8f60de2b05 100644
--- a/src/runtime/trace.go
+++ b/src/runtime/trace.go
@@ -229,7 +229,7 @@ func StartTrace() error {
gp.traceseq = 0
gp.tracelastp = getg().m.p
// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
- id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
+ id := trace.stackTab.put([]uintptr{startPCforTrace(gp.startpc) + sys.PCQuantum})
traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
}
if status == _Gwaiting {
@@ -1071,7 +1071,7 @@ func traceGoCreate(newg *g, pc uintptr) {
newg.traceseq = 0
newg.tracelastp = getg().m.p
// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
- id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
+ id := trace.stackTab.put([]uintptr{startPCforTrace(pc) + sys.PCQuantum})
traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
}
@@ -1244,3 +1244,17 @@ func trace_userLog(id uint64, category, message string) {
traceReleaseBuffer(pid)
}
+
+// the start PC of a goroutine for tracing purposes. If pc is a wrapper,
+// it returns the PC of the wrapped function. Otherwise it returns pc.
+func startPCforTrace(pc uintptr) uintptr {
+ f := findfunc(pc)
+ if !f.valid() {
+ return pc // should not happen, but don't care
+ }
+ w := funcdata(f, _FUNCDATA_WrapInfo)
+ if w == nil {
+ return pc // not a wrapper
+ }
+ return f.datap.textAddr(*(*uint32)(w))
+}
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 73bd0e11a9..0cdd53cc93 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -1229,9 +1229,9 @@ func isSystemGoroutine(gp *g, fixed bool) bool {
//
// On all platforms, the traceback function is invoked when a call from
// Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le,
-// and freebsd/amd64, the traceback function is also invoked when a
-// signal is received by a thread that is executing a cgo call. The
-// traceback function should not make assumptions about when it is
+// linux/arm64, and freebsd/amd64, the traceback function is also invoked
+// when a signal is received by a thread that is executing a cgo call.
+// The traceback function should not make assumptions about when it is
// called, as future versions of Go may make additional calls.
//
// The symbolizer function will be called with a single argument, a