aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorThan McIntosh <thanm@google.com>2019-11-01 10:44:44 -0400
committerThan McIntosh <thanm@google.com>2019-11-01 10:45:24 -0400
commitc0555a2a7a0ca83fdbb55219299fcfe1ff33e4df (patch)
tree25c35dc6bf5b8134906338df0f612fbe75089ca7 /src/runtime
parent219922e95b8e49cfb94da9de0c48edb22a2e7054 (diff)
parent8405cd3005a5274e80e41676146629c4086b6380 (diff)
downloadgo-c0555a2a7a0ca83fdbb55219299fcfe1ff33e4df.tar.xz
[dev.link] all: merge branch 'master' into dev.link
Fixed a couple of minor conflicts in lib.go and deadcode.go relating to debug logging. Change-Id: I58335fc42ab1f1f3409fd8354da4f26419e8fb22
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/cgo/gcc_freebsd_arm64.c68
-rw-r--r--src/runtime/cgocheck.go2
-rw-r--r--src/runtime/chan.go14
-rw-r--r--src/runtime/crash_unix_test.go24
-rw-r--r--src/runtime/defer_test.go35
-rw-r--r--src/runtime/defs_freebsd_386.go2
-rw-r--r--src/runtime/defs_freebsd_amd64.go2
-rw-r--r--src/runtime/defs_freebsd_arm.go2
-rw-r--r--src/runtime/defs_freebsd_arm64.go259
-rw-r--r--src/runtime/defs_illumos_amd64.go14
-rw-r--r--src/runtime/export_aix_test.go7
-rw-r--r--src/runtime/export_test.go8
-rw-r--r--src/runtime/export_unix_test.go45
-rw-r--r--src/runtime/heapdump.go15
-rw-r--r--src/runtime/internal/atomic/asm_386.s6
-rw-r--r--src/runtime/internal/atomic/asm_amd64.s6
-rw-r--r--src/runtime/internal/atomic/asm_mips64x.s8
-rw-r--r--src/runtime/internal/atomic/asm_mipsx.s8
-rw-r--r--src/runtime/internal/atomic/asm_ppc64x.s7
-rw-r--r--src/runtime/internal/atomic/asm_s390x.s8
-rw-r--r--src/runtime/internal/atomic/atomic_386.go3
-rw-r--r--src/runtime/internal/atomic/atomic_amd64.go3
-rw-r--r--src/runtime/internal/atomic/atomic_arm.go3
-rw-r--r--src/runtime/internal/atomic/atomic_arm64.go3
-rw-r--r--src/runtime/internal/atomic/atomic_arm64.s6
-rw-r--r--src/runtime/internal/atomic/atomic_mips64x.go3
-rw-r--r--src/runtime/internal/atomic/atomic_mipsx.go3
-rw-r--r--src/runtime/internal/atomic/atomic_ppc64x.go3
-rw-r--r--src/runtime/internal/atomic/atomic_s390x.go3
-rw-r--r--src/runtime/internal/atomic/atomic_test.go117
-rw-r--r--src/runtime/internal/atomic/atomic_wasm.go6
-rw-r--r--src/runtime/internal/atomic/sys_linux_arm.s22
-rw-r--r--src/runtime/internal/atomic/sys_nonlinux_arm.s17
-rw-r--r--src/runtime/lock_js.go2
-rw-r--r--src/runtime/mbitmap.go93
-rw-r--r--src/runtime/mgc.go3
-rw-r--r--src/runtime/mgcmark.go54
-rw-r--r--src/runtime/mgcsweep.go12
-rw-r--r--src/runtime/mgcwork.go10
-rw-r--r--src/runtime/mheap.go120
-rw-r--r--src/runtime/nbpipe_fcntl_aix_test.go17
-rw-r--r--src/runtime/nbpipe_fcntl_unix_test.go14
-rw-r--r--src/runtime/nbpipe_test.go4
-rw-r--r--src/runtime/netpoll_aix.go9
-rw-r--r--src/runtime/netpoll_solaris.go4
-rw-r--r--src/runtime/netpoll_stub.go7
-rw-r--r--src/runtime/os2_aix.go19
-rw-r--r--src/runtime/os3_solaris.go24
-rw-r--r--src/runtime/os_aix.go7
-rw-r--r--src/runtime/os_darwin.go5
-rw-r--r--src/runtime/os_dragonfly.go25
-rw-r--r--src/runtime/os_freebsd.go29
-rw-r--r--src/runtime/os_freebsd_arm64.go156
-rw-r--r--src/runtime/os_freebsd_noauxv.go2
-rw-r--r--src/runtime/os_illumos.go132
-rw-r--r--src/runtime/os_linux.go12
-rw-r--r--src/runtime/os_netbsd.go17
-rw-r--r--src/runtime/os_only_solaris.go18
-rw-r--r--src/runtime/os_openbsd.go19
-rw-r--r--src/runtime/preempt.go234
-rw-r--r--src/runtime/proc.go195
-rw-r--r--src/runtime/proc_test.go2
-rw-r--r--src/runtime/rt0_freebsd_arm64.s106
-rw-r--r--src/runtime/runtime2.go86
-rw-r--r--src/runtime/select.go4
-rw-r--r--src/runtime/signal_arm64.go2
-rw-r--r--src/runtime/signal_freebsd_arm64.go66
-rw-r--r--src/runtime/signal_unix.go13
-rw-r--r--src/runtime/stack.go109
-rw-r--r--src/runtime/sys_darwin.go10
-rw-r--r--src/runtime/sys_darwin_386.s25
-rw-r--r--src/runtime/sys_darwin_amd64.s18
-rw-r--r--src/runtime/sys_darwin_arm.s32
-rw-r--r--src/runtime/sys_darwin_arm64.s12
-rw-r--r--src/runtime/sys_dragonfly_amd64.s12
-rw-r--r--src/runtime/sys_freebsd_386.s15
-rw-r--r--src/runtime/sys_freebsd_amd64.s15
-rw-r--r--src/runtime/sys_freebsd_arm.s26
-rw-r--r--src/runtime/sys_freebsd_arm64.s543
-rw-r--r--src/runtime/sys_linux_386.s14
-rw-r--r--src/runtime/sys_linux_amd64.s14
-rw-r--r--src/runtime/sys_linux_arm.s25
-rw-r--r--src/runtime/sys_linux_arm64.s14
-rw-r--r--src/runtime/sys_linux_mips64x.s14
-rw-r--r--src/runtime/sys_linux_mipsx.s14
-rw-r--r--src/runtime/sys_linux_ppc64x.s12
-rw-r--r--src/runtime/sys_linux_s390x.s14
-rw-r--r--src/runtime/sys_netbsd_386.s7
-rw-r--r--src/runtime/sys_netbsd_amd64.s8
-rw-r--r--src/runtime/sys_netbsd_arm.s17
-rw-r--r--src/runtime/sys_netbsd_arm64.s7
-rw-r--r--src/runtime/sys_openbsd_386.s9
-rw-r--r--src/runtime/sys_openbsd_amd64.s10
-rw-r--r--src/runtime/sys_openbsd_arm.s21
-rw-r--r--src/runtime/sys_openbsd_arm64.s10
-rw-r--r--src/runtime/time.go12
-rw-r--r--src/runtime/tls_arm64.h5
-rw-r--r--src/runtime/traceback.go1
-rw-r--r--src/runtime/type.go2
-rw-r--r--src/runtime/vdso_freebsd_arm64.go21
100 files changed, 2841 insertions, 481 deletions
diff --git a/src/runtime/cgo/gcc_freebsd_arm64.c b/src/runtime/cgo/gcc_freebsd_arm64.c
new file mode 100644
index 0000000000..dd8f888290
--- /dev/null
+++ b/src/runtime/cgo/gcc_freebsd_arm64.c
@@ -0,0 +1,68 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include <sys/types.h>
+#include <errno.h>
+#include <sys/signalvar.h>
+#include <pthread.h>
+#include <signal.h>
+#include <string.h>
+#include "libcgo.h"
+#include "libcgo_unix.h"
+
+static void* threadentry(void*);
+static void (*setg_gcc)(void*);
+
+void
+x_cgo_init(G *g, void (*setg)(void*))
+{
+ pthread_attr_t attr;
+ size_t size;
+
+ setg_gcc = setg;
+ pthread_attr_init(&attr);
+ pthread_attr_getstacksize(&attr, &size);
+ g->stacklo = (uintptr)&attr - size + 4096;
+ pthread_attr_destroy(&attr);
+}
+
+void
+_cgo_sys_thread_start(ThreadStart *ts)
+{
+ pthread_attr_t attr;
+ sigset_t ign, oset;
+ pthread_t p;
+ size_t size;
+ int err;
+
+ SIGFILLSET(ign);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
+
+ pthread_attr_init(&attr);
+ pthread_attr_getstacksize(&attr, &size);
+ // Leave stacklo=0 and set stackhi=size; mstart will do the rest.
+ ts->g->stackhi = size;
+ err = _cgo_try_pthread_create(&p, &attr, threadentry, ts);
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
+
+ if (err != 0) {
+ fprintf(stderr, "runtime/cgo: pthread_create failed: %s\n", strerror(err));
+ abort();
+ }
+}
+
+extern void crosscall1(void (*fn)(void), void (*setg_gcc)(void*), void *g);
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+
+ ts = *(ThreadStart*)v;
+ free(v);
+
+ crosscall1(ts.fn, setg_gcc, (void*)ts.g);
+ return nil;
+}
diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go
index ed854e5e2b..9c5b26e4f3 100644
--- a/src/runtime/cgocheck.go
+++ b/src/runtime/cgocheck.go
@@ -133,7 +133,7 @@ func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
}
s := spanOfUnchecked(uintptr(src))
- if s.state == mSpanManual {
+ if s.state.get() == mSpanManual {
// There are no heap bits for value stored on the stack.
// For a channel receive src might be on the stack of some
// other goroutine, so we can't unwind the stack even if
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index 93afe90dad..677af99eac 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -249,7 +249,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
gp.waiting = mysg
gp.param = nil
c.sendq.enqueue(mysg)
- goparkunlock(&c.lock, waitReasonChanSend, traceEvGoBlockSend, 3)
+ gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanSend, traceEvGoBlockSend, 2)
// Ensure the value being sent is kept alive until the
// receiver copies it out. The sudog has a pointer to the
// stack object, but sudogs aren't considered as roots of the
@@ -261,6 +261,7 @@ func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
throw("G waiting list is corrupted")
}
gp.waiting = nil
+ gp.activeStackChans = false
if gp.param == nil {
if c.closed == 0 {
throw("chansend: spurious wakeup")
@@ -559,13 +560,14 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
mysg.c = c
gp.param = nil
c.recvq.enqueue(mysg)
- goparkunlock(&c.lock, waitReasonChanReceive, traceEvGoBlockRecv, 3)
+ gopark(chanparkcommit, unsafe.Pointer(&c.lock), waitReasonChanReceive, traceEvGoBlockRecv, 2)
// someone woke us up
if mysg != gp.waiting {
throw("G waiting list is corrupted")
}
gp.waiting = nil
+ gp.activeStackChans = false
if mysg.releasetime > 0 {
blockevent(mysg.releasetime-t0, 2)
}
@@ -632,6 +634,14 @@ func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int) {
goready(gp, skip+1)
}
+func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
+ // There are unlocked sudogs that point into gp's stack. Stack
+ // copying must lock the channels of those sudogs.
+ gp.activeStackChans = true
+ unlock((*mutex)(chanLock))
+ return true
+}
+
// compiler implements
//
// select {
diff --git a/src/runtime/crash_unix_test.go b/src/runtime/crash_unix_test.go
index 4be4962f90..93cee350d0 100644
--- a/src/runtime/crash_unix_test.go
+++ b/src/runtime/crash_unix_test.go
@@ -16,6 +16,7 @@ import (
"path/filepath"
"runtime"
"strings"
+ "sync"
"syscall"
"testing"
"unsafe"
@@ -309,3 +310,26 @@ func TestSignalDuringExec(t *testing.T) {
t.Fatalf("want %s, got %s\n", want, output)
}
}
+
+func TestSignalM(t *testing.T) {
+ var want, got int64
+ var wg sync.WaitGroup
+ ready := make(chan *runtime.M)
+ wg.Add(1)
+ go func() {
+ runtime.LockOSThread()
+ want, got = runtime.WaitForSigusr1(func(mp *runtime.M) {
+ ready <- mp
+ }, 1e9)
+ runtime.UnlockOSThread()
+ wg.Done()
+ }()
+ waitingM := <-ready
+ runtime.SendSigusr1(waitingM)
+ wg.Wait()
+ if got == -1 {
+ t.Fatal("signalM signal not received")
+ } else if want != got {
+ t.Fatalf("signal sent to M %d, but received on M %d", want, got)
+ }
+}
diff --git a/src/runtime/defer_test.go b/src/runtime/defer_test.go
index d830fc591f..f03bdb47d5 100644
--- a/src/runtime/defer_test.go
+++ b/src/runtime/defer_test.go
@@ -181,12 +181,16 @@ type bigStruct struct {
x, y, z, w, p, q int64
}
+type containsBigStruct struct {
+ element bigStruct
+}
+
func mknonSSAable() nonSSAable {
globint1++
return nonSSAable{0, 0, 0, 0, 5}
}
-var globint1, globint2 int
+var globint1, globint2, globint3 int
//go:noinline
func sideeffect(n int64) int64 {
@@ -194,12 +198,20 @@ func sideeffect(n int64) int64 {
return n
}
+func sideeffect2(in containsBigStruct) containsBigStruct {
+ globint3++
+ return in
+}
+
// Test that nonSSAable arguments to defer are handled correctly and only evaluated once.
func TestNonSSAableArgs(t *testing.T) {
globint1 = 0
globint2 = 0
+ globint3 = 0
var save1 byte
var save2 int64
+ var save3 int64
+ var save4 int64
defer func() {
if globint1 != 1 {
@@ -214,12 +226,33 @@ func TestNonSSAableArgs(t *testing.T) {
if save2 != 2 {
t.Fatal(fmt.Sprintf("save2: wanted: 2, got %v", save2))
}
+ if save3 != 4 {
+ t.Fatal(fmt.Sprintf("save3: wanted: 4, got %v", save3))
+ }
+ if globint3 != 1 {
+ t.Fatal(fmt.Sprintf("globint3: wanted: 1, got %v", globint3))
+ }
+ if save4 != 4 {
+ t.Fatal(fmt.Sprintf("save1: wanted: 4, got %v", save4))
+ }
}()
+ // Test function returning a non-SSAable arg
defer func(n nonSSAable) {
save1 = n[4]
}(mknonSSAable())
+ // Test composite literal that is not SSAable
defer func(b bigStruct) {
save2 = b.y
}(bigStruct{1, 2, 3, 4, 5, sideeffect(6)})
+
+ // Test struct field reference that is non-SSAable
+ foo := containsBigStruct{}
+ foo.element.z = 4
+ defer func(element bigStruct) {
+ save3 = element.z
+ }(foo.element)
+ defer func(element bigStruct) {
+ save4 = element.z
+ }(sideeffect2(foo).element)
}
diff --git a/src/runtime/defs_freebsd_386.go b/src/runtime/defs_freebsd_386.go
index 6294fc32d4..767755425c 100644
--- a/src/runtime/defs_freebsd_386.go
+++ b/src/runtime/defs_freebsd_386.go
@@ -126,6 +126,8 @@ type thrparam struct {
spare [3]uintptr
}
+type thread int32 // long
+
type sigset struct {
__bits [4]uint32
}
diff --git a/src/runtime/defs_freebsd_amd64.go b/src/runtime/defs_freebsd_amd64.go
index 840c710eeb..5a833426fd 100644
--- a/src/runtime/defs_freebsd_amd64.go
+++ b/src/runtime/defs_freebsd_amd64.go
@@ -127,6 +127,8 @@ type thrparam struct {
spare [3]uintptr
}
+type thread int64 // long
+
type sigset struct {
__bits [4]uint32
}
diff --git a/src/runtime/defs_freebsd_arm.go b/src/runtime/defs_freebsd_arm.go
index 3307c8bbae..b55dfd88cf 100644
--- a/src/runtime/defs_freebsd_arm.go
+++ b/src/runtime/defs_freebsd_arm.go
@@ -126,6 +126,8 @@ type thrparam struct {
spare [3]uintptr
}
+type thread int32 // long
+
type sigset struct {
__bits [4]uint32
}
diff --git a/src/runtime/defs_freebsd_arm64.go b/src/runtime/defs_freebsd_arm64.go
new file mode 100644
index 0000000000..5b9d504ba6
--- /dev/null
+++ b/src/runtime/defs_freebsd_arm64.go
@@ -0,0 +1,259 @@
+// created by cgo -cdefs and then converted to Go
+// cgo -cdefs defs_freebsd.go
+
+package runtime
+
+import "unsafe"
+
+const (
+ _NBBY = 0x8
+ _CTL_MAXNAME = 0x18
+ _CPU_LEVEL_WHICH = 0x3
+ _CPU_WHICH_PID = 0x2
+)
+
+const (
+ _EINTR = 0x4
+ _EFAULT = 0xe
+ _EAGAIN = 0x23
+ _ENOSYS = 0x4e
+
+ _O_NONBLOCK = 0x4
+ _O_CLOEXEC = 0x100000
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x1000
+ _MAP_SHARED = 0x1
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_FREE = 0x5
+
+ _SA_SIGINFO = 0x40
+ _SA_RESTART = 0x2
+ _SA_ONSTACK = 0x1
+
+ _CLOCK_MONOTONIC = 0x4
+ _CLOCK_REALTIME = 0x0
+
+ _UMTX_OP_WAIT_UINT = 0xb
+ _UMTX_OP_WAIT_UINT_PRIVATE = 0xf
+ _UMTX_OP_WAKE = 0x3
+ _UMTX_OP_WAKE_PRIVATE = 0x10
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGTERM = 0xf
+ _SIGURG = 0x10
+ _SIGSTOP = 0x11
+ _SIGTSTP = 0x12
+ _SIGCONT = 0x13
+ _SIGCHLD = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGIO = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGINFO = 0x1d
+ _SIGUSR1 = 0x1e
+ _SIGUSR2 = 0x1f
+
+ _FPE_INTDIV = 0x2
+ _FPE_INTOVF = 0x1
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EV_ADD = 0x1
+ _EV_DELETE = 0x2
+ _EV_CLEAR = 0x20
+ _EV_RECEIPT = 0x40
+ _EV_ERROR = 0x4000
+ _EV_EOF = 0x8000
+ _EVFILT_READ = -0x1
+ _EVFILT_WRITE = -0x2
+)
+
+type rtprio struct {
+ _type uint16
+ prio uint16
+}
+
+type thrparam struct {
+ start_func uintptr
+ arg unsafe.Pointer
+ stack_base uintptr
+ stack_size uintptr
+ tls_base unsafe.Pointer
+ tls_size uintptr
+ child_tid unsafe.Pointer // *int64
+ parent_tid *int64
+ flags int32
+ pad_cgo_0 [4]byte
+ rtp *rtprio
+ spare [3]uintptr
+}
+
+type thread int64 // long
+
+type sigset struct {
+ __bits [4]uint32
+}
+
+type stackt struct {
+ ss_sp uintptr
+ ss_size uintptr
+ ss_flags int32
+ pad_cgo_0 [4]byte
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ si_pid int32
+ si_uid uint32
+ si_status int32
+ si_addr uint64
+ si_value [8]byte
+ _reason [40]byte
+}
+
+type gpregs struct {
+ gp_x [30]uint64
+ gp_lr uint64
+ gp_sp uint64
+ gp_elr uint64
+ gp_spsr uint32
+ gp_pad int32
+}
+
+type fpregs struct {
+ fp_q [64]uint64 // actually [32]uint128
+ fp_sr uint32
+ fp_cr uint32
+ fp_flags int32
+ fp_pad int32
+}
+
+type mcontext struct {
+ mc_gpregs gpregs
+ mc_fpregs fpregs
+ mc_flags int32
+ mc_pad int32
+ mc_spare [8]uint64
+}
+
+type ucontext struct {
+ uc_sigmask sigset
+ uc_mcontext mcontext
+ uc_link *ucontext
+ uc_stack stackt
+ uc_flags int32
+ __spare__ [4]int32
+ pad_cgo_0 [12]byte
+}
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+//go:nosplit
+func (ts *timespec) setNsec(ns int64) {
+ ts.tv_sec = ns / 1e9
+ ts.tv_nsec = ns % 1e9
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type umtx_time struct {
+ _timeout timespec
+ _flags uint32
+ _clockid uint32
+}
+
+type keventt struct {
+ ident uint64
+ filter int16
+ flags uint16
+ fflags uint32
+ data int64
+ udata *byte
+}
+
+type bintime struct {
+ sec int64
+ frac uint64
+}
+
+type vdsoTimehands struct {
+ algo uint32
+ gen uint32
+ scale uint64
+ offset_count uint32
+ counter_mask uint32
+ offset bintime
+ boottime bintime
+ physical uint32
+ res [7]uint32
+}
+
+type vdsoTimekeep struct {
+ ver uint32
+ enabled uint32
+ current uint32
+ pad_cgo_0 [4]byte
+}
+
+const (
+ _VDSO_TK_VER_CURR = 0x1
+
+ vdsoTimehandsSize = 0x58
+ vdsoTimekeepSize = 0x10
+)
diff --git a/src/runtime/defs_illumos_amd64.go b/src/runtime/defs_illumos_amd64.go
new file mode 100644
index 0000000000..9c5413bae3
--- /dev/null
+++ b/src/runtime/defs_illumos_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _RCTL_LOCAL_DENY = 0x2
+
+ _RCTL_LOCAL_MAXIMAL = 0x80000000
+
+ _RCTL_FIRST = 0x0
+ _RCTL_NEXT = 0x1
+)
diff --git a/src/runtime/export_aix_test.go b/src/runtime/export_aix_test.go
new file mode 100644
index 0000000000..162552d04c
--- /dev/null
+++ b/src/runtime/export_aix_test.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+var Fcntl = syscall_fcntl1
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index f5b44a29a0..831f3f13d4 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -40,6 +40,8 @@ var Usleep = usleep
var PhysHugePageSize = physHugePageSize
+var NetpollGenericInit = netpollGenericInit
+
type LFNode struct {
Next uint64
Pushcnt uintptr
@@ -254,7 +256,7 @@ func CountPagesInUse() (pagesInUse, counted uintptr) {
pagesInUse = uintptr(mheap_.pagesInUse)
for _, s := range mheap_.allspans {
- if s.state == mSpanInUse {
+ if s.state.get() == mSpanInUse {
counted += s.npages
}
}
@@ -316,7 +318,7 @@ func ReadMemStatsSlow() (base, slow MemStats) {
// Add up current allocations in spans.
for _, s := range mheap_.allspans {
- if s.state != mSpanInUse {
+ if s.state.get() != mSpanInUse {
continue
}
if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
@@ -540,7 +542,7 @@ func UnscavHugePagesSlow() (uintptr, uintptr) {
lock(&mheap_.lock)
base = mheap_.free.unscavHugePages
for _, s := range mheap_.allspans {
- if s.state == mSpanFree && !s.scavenged {
+ if s.state.get() == mSpanFree && !s.scavenged {
slow += s.hugePages()
}
}
diff --git a/src/runtime/export_unix_test.go b/src/runtime/export_unix_test.go
index eecdfb7eb2..7af1c1dd54 100644
--- a/src/runtime/export_unix_test.go
+++ b/src/runtime/export_unix_test.go
@@ -17,3 +17,48 @@ func Sigisblocked(i int) bool {
sigprocmask(_SIG_SETMASK, nil, &sigmask)
return sigismember(&sigmask, i)
}
+
+type M = m
+
+var waitForSigusr1 struct {
+ park note
+ mp *m
+}
+
+// WaitForSigusr1 blocks until a SIGUSR1 is received. It calls ready
+// when it is set up to receive SIGUSR1. The ready function should
+// cause a SIGUSR1 to be sent.
+//
+// Once SIGUSR1 is received, it returns the ID of the current M and
+// the ID of the M the SIGUSR1 was received on. If no SIGUSR1 is
+// received for timeoutNS nanoseconds, it returns -1.
+func WaitForSigusr1(ready func(mp *M), timeoutNS int64) (int64, int64) {
+ lockOSThread()
+ // Make sure we can receive SIGUSR1.
+ unblocksig(_SIGUSR1)
+
+ mp := getg().m
+ testSigusr1 = func(gp *g) bool {
+ waitForSigusr1.mp = gp.m
+ notewakeup(&waitForSigusr1.park)
+ return true
+ }
+ ready(mp)
+ ok := notetsleepg(&waitForSigusr1.park, timeoutNS)
+ noteclear(&waitForSigusr1.park)
+ gotM := waitForSigusr1.mp
+ waitForSigusr1.mp = nil
+ testSigusr1 = nil
+
+ unlockOSThread()
+
+ if !ok {
+ return -1, -1
+ }
+ return mp.id, gotM.id
+}
+
+// SendSigusr1 sends SIGUSR1 to mp.
+func SendSigusr1(mp *M) {
+ signalM(mp, _SIGUSR1)
+}
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index 992df6391e..cfd5c251b4 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -371,7 +371,12 @@ func dumpgoroutine(gp *g) {
dumpint(uint64(d.sp))
dumpint(uint64(d.pc))
dumpint(uint64(uintptr(unsafe.Pointer(d.fn))))
- dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn))))
+ if d.fn == nil {
+ // d.fn can be nil for open-coded defers
+ dumpint(uint64(0))
+ } else {
+ dumpint(uint64(uintptr(unsafe.Pointer(d.fn.fn))))
+ }
dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
}
for p := gp._panic; p != nil; p = p.link {
@@ -430,7 +435,7 @@ func dumproots() {
// mspan.types
for _, s := range mheap_.allspans {
- if s.state == mSpanInUse {
+ if s.state.get() == mSpanInUse {
// Finalizers
for sp := s.specials; sp != nil; sp = sp.next {
if sp.kind != _KindSpecialFinalizer {
@@ -453,7 +458,7 @@ var freemark [_PageSize / 8]bool
func dumpobjs() {
for _, s := range mheap_.allspans {
- if s.state != mSpanInUse {
+ if s.state.get() != mSpanInUse {
continue
}
p := s.base()
@@ -616,7 +621,7 @@ func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs,
func dumpmemprof() {
iterate_memprof(dumpmemprof_callback)
for _, s := range mheap_.allspans {
- if s.state != mSpanInUse {
+ if s.state.get() != mSpanInUse {
continue
}
for sp := s.specials; sp != nil; sp = sp.next {
@@ -637,7 +642,7 @@ var dumphdr = []byte("go1.7 heap dump\n")
func mdump() {
// make sure we're done sweeping
for _, s := range mheap_.allspans {
- if s.state == mSpanInUse {
+ if s.state.get() == mSpanInUse {
s.ensureSwept()
}
}
diff --git a/src/runtime/internal/atomic/asm_386.s b/src/runtime/internal/atomic/asm_386.s
index 13289a88d0..9b9dc14a60 100644
--- a/src/runtime/internal/atomic/asm_386.s
+++ b/src/runtime/internal/atomic/asm_386.s
@@ -229,3 +229,9 @@ TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-5
LOCK
ANDB BX, (AX)
RET
+
+TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-5
+ MOVL ptr+0(FP), BX
+ MOVB val+4(FP), AX
+ XCHGB AX, 0(BX)
+ RET
diff --git a/src/runtime/internal/atomic/asm_amd64.s b/src/runtime/internal/atomic/asm_amd64.s
index e18aee7d59..90c56424c9 100644
--- a/src/runtime/internal/atomic/asm_amd64.s
+++ b/src/runtime/internal/atomic/asm_amd64.s
@@ -136,6 +136,12 @@ TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
JMP runtime∕internal∕atomic·Store(SB)
+TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-9
+ MOVQ ptr+0(FP), BX
+ MOVB val+8(FP), AX
+ XCHGB AX, 0(BX)
+ RET
+
TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
MOVQ ptr+0(FP), BX
MOVQ val+8(FP), AX
diff --git a/src/runtime/internal/atomic/asm_mips64x.s b/src/runtime/internal/atomic/asm_mips64x.s
index 9cb10371b7..3290fb726a 100644
--- a/src/runtime/internal/atomic/asm_mips64x.s
+++ b/src/runtime/internal/atomic/asm_mips64x.s
@@ -166,6 +166,14 @@ TEXT ·Store(SB), NOSPLIT, $0-12
SYNC
RET
+TEXT ·Store8(SB), NOSPLIT, $0-9
+ MOVV ptr+0(FP), R1
+ MOVB val+8(FP), R2
+ SYNC
+ MOVB R2, 0(R1)
+ SYNC
+ RET
+
TEXT ·Store64(SB), NOSPLIT, $0-16
MOVV ptr+0(FP), R1
MOVV val+8(FP), R2
diff --git a/src/runtime/internal/atomic/asm_mipsx.s b/src/runtime/internal/atomic/asm_mipsx.s
index af6bce57d6..62811a6599 100644
--- a/src/runtime/internal/atomic/asm_mipsx.s
+++ b/src/runtime/internal/atomic/asm_mipsx.s
@@ -32,6 +32,14 @@ TEXT ·Store(SB),NOSPLIT,$0-8
SYNC
RET
+TEXT ·Store8(SB),NOSPLIT,$0-5
+ MOVW ptr+0(FP), R1
+ MOVB val+4(FP), R2
+ SYNC
+ MOVB R2, 0(R1)
+ SYNC
+ RET
+
TEXT ·Load(SB),NOSPLIT,$0-8
MOVW ptr+0(FP), R1
SYNC
diff --git a/src/runtime/internal/atomic/asm_ppc64x.s b/src/runtime/internal/atomic/asm_ppc64x.s
index 052b031cfb..06dc931bf4 100644
--- a/src/runtime/internal/atomic/asm_ppc64x.s
+++ b/src/runtime/internal/atomic/asm_ppc64x.s
@@ -170,6 +170,13 @@ TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
MOVW R4, 0(R3)
RET
+TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-9
+ MOVD ptr+0(FP), R3
+ MOVB val+8(FP), R4
+ SYNC
+ MOVB R4, 0(R3)
+ RET
+
TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
MOVD ptr+0(FP), R3
MOVD val+8(FP), R4
diff --git a/src/runtime/internal/atomic/asm_s390x.s b/src/runtime/internal/atomic/asm_s390x.s
index 084f5b5163..78abd48afa 100644
--- a/src/runtime/internal/atomic/asm_s390x.s
+++ b/src/runtime/internal/atomic/asm_s390x.s
@@ -12,6 +12,14 @@ TEXT ·Store(SB), NOSPLIT, $0
SYNC
RET
+// func Store8(ptr *uint8, val uint8)
+TEXT ·Store8(SB), NOSPLIT, $0
+ MOVD ptr+0(FP), R2
+ MOVB val+8(FP), R3
+ MOVB R3, 0(R2)
+ SYNC
+ RET
+
// func Store64(ptr *uint64, val uint64)
TEXT ·Store64(SB), NOSPLIT, $0
MOVD ptr+0(FP), R2
diff --git a/src/runtime/internal/atomic/atomic_386.go b/src/runtime/internal/atomic/atomic_386.go
index d7f82cc752..8d002ebfe3 100644
--- a/src/runtime/internal/atomic/atomic_386.go
+++ b/src/runtime/internal/atomic/atomic_386.go
@@ -75,6 +75,9 @@ func CasRel(ptr *uint32, old, new uint32) bool
func Store(ptr *uint32, val uint32)
//go:noescape
+func Store8(ptr *uint8, val uint8)
+
+//go:noescape
func Store64(ptr *uint64, val uint64)
//go:noescape
diff --git a/src/runtime/internal/atomic/atomic_amd64.go b/src/runtime/internal/atomic/atomic_amd64.go
index fc865e892d..14b8101720 100644
--- a/src/runtime/internal/atomic/atomic_amd64.go
+++ b/src/runtime/internal/atomic/atomic_amd64.go
@@ -77,6 +77,9 @@ func CasRel(ptr *uint32, old, new uint32) bool
func Store(ptr *uint32, val uint32)
//go:noescape
+func Store8(ptr *uint8, val uint8)
+
+//go:noescape
func Store64(ptr *uint64, val uint64)
//go:noescape
diff --git a/src/runtime/internal/atomic/atomic_arm.go b/src/runtime/internal/atomic/atomic_arm.go
index c1fc1f727f..95713afcc1 100644
--- a/src/runtime/internal/atomic/atomic_arm.go
+++ b/src/runtime/internal/atomic/atomic_arm.go
@@ -210,4 +210,7 @@ func Xchg64(addr *uint64, v uint64) uint64
func Load64(addr *uint64) uint64
//go:noescape
+func Store8(addr *uint8, v uint8)
+
+//go:noescape
func Store64(addr *uint64, v uint64)
diff --git a/src/runtime/internal/atomic/atomic_arm64.go b/src/runtime/internal/atomic/atomic_arm64.go
index 0182f309cc..26ca94d54c 100644
--- a/src/runtime/internal/atomic/atomic_arm64.go
+++ b/src/runtime/internal/atomic/atomic_arm64.go
@@ -57,6 +57,9 @@ func CasRel(ptr *uint32, old, new uint32) bool
func Store(ptr *uint32, val uint32)
//go:noescape
+func Store8(ptr *uint8, val uint8)
+
+//go:noescape
func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go.
diff --git a/src/runtime/internal/atomic/atomic_arm64.s b/src/runtime/internal/atomic/atomic_arm64.s
index a7e8c35449..d95689fe2d 100644
--- a/src/runtime/internal/atomic/atomic_arm64.s
+++ b/src/runtime/internal/atomic/atomic_arm64.s
@@ -48,6 +48,12 @@ TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
STLRW R1, (R0)
RET
+TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-9
+ MOVD ptr+0(FP), R0
+ MOVB val+8(FP), R1
+ STLRB R1, (R0)
+ RET
+
TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
MOVD ptr+0(FP), R0
MOVD val+8(FP), R1
diff --git a/src/runtime/internal/atomic/atomic_mips64x.go b/src/runtime/internal/atomic/atomic_mips64x.go
index ce11e38a96..1d9977850b 100644
--- a/src/runtime/internal/atomic/atomic_mips64x.go
+++ b/src/runtime/internal/atomic/atomic_mips64x.go
@@ -59,6 +59,9 @@ func CasRel(ptr *uint32, old, new uint32) bool
func Store(ptr *uint32, val uint32)
//go:noescape
+func Store8(ptr *uint8, val uint8)
+
+//go:noescape
func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go.
diff --git a/src/runtime/internal/atomic/atomic_mipsx.go b/src/runtime/internal/atomic/atomic_mipsx.go
index 6e39262c15..0e2d77ade1 100644
--- a/src/runtime/internal/atomic/atomic_mipsx.go
+++ b/src/runtime/internal/atomic/atomic_mipsx.go
@@ -141,6 +141,9 @@ func Or8(ptr *uint8, val uint8)
//go:noescape
func Store(ptr *uint32, val uint32)
+//go:noescape
+func Store8(ptr *uint8, val uint8)
+
// NO go:noescape annotation; see atomic_pointer.go.
func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
diff --git a/src/runtime/internal/atomic/atomic_ppc64x.go b/src/runtime/internal/atomic/atomic_ppc64x.go
index 13805a5275..a48ecf5ee8 100644
--- a/src/runtime/internal/atomic/atomic_ppc64x.go
+++ b/src/runtime/internal/atomic/atomic_ppc64x.go
@@ -59,6 +59,9 @@ func CasRel(ptr *uint32, old, new uint32) bool
func Store(ptr *uint32, val uint32)
//go:noescape
+func Store8(ptr *uint8, val uint8)
+
+//go:noescape
func Store64(ptr *uint64, val uint64)
//go:noescape
diff --git a/src/runtime/internal/atomic/atomic_s390x.go b/src/runtime/internal/atomic/atomic_s390x.go
index 25fd890524..4d73b39baf 100644
--- a/src/runtime/internal/atomic/atomic_s390x.go
+++ b/src/runtime/internal/atomic/atomic_s390x.go
@@ -45,6 +45,9 @@ func LoadAcq(ptr *uint32) uint32 {
func Store(ptr *uint32, val uint32)
//go:noescape
+func Store8(ptr *uint8, val uint8)
+
+//go:noescape
func Store64(ptr *uint64, val uint64)
// NO go:noescape annotation; see atomic_pointer.go.
diff --git a/src/runtime/internal/atomic/atomic_test.go b/src/runtime/internal/atomic/atomic_test.go
index 9e4461ce38..0c1125c558 100644
--- a/src/runtime/internal/atomic/atomic_test.go
+++ b/src/runtime/internal/atomic/atomic_test.go
@@ -103,3 +103,120 @@ func TestUnaligned64(t *testing.T) {
shouldPanic(t, "Xchg64", func() { atomic.Xchg64(up64, 1) })
shouldPanic(t, "Cas64", func() { atomic.Cas64(up64, 1, 2) })
}
+
+func TestAnd8(t *testing.T) {
+ // Basic sanity check.
+ x := uint8(0xff)
+ for i := uint8(0); i < 8; i++ {
+ atomic.And8(&x, ^(1 << i))
+ if r := uint8(0xff) << (i + 1); x != r {
+ t.Fatalf("clearing bit %#x: want %#x, got %#x", uint8(1<<i), r, x)
+ }
+ }
+
+ // Set every bit in array to 1.
+ a := make([]uint8, 1<<12)
+ for i := range a {
+ a[i] = 0xff
+ }
+
+ // Clear array bit-by-bit in different goroutines.
+ done := make(chan bool)
+ for i := 0; i < 8; i++ {
+ m := ^uint8(1 << i)
+ go func() {
+ for i := range a {
+ atomic.And8(&a[i], m)
+ }
+ done <- true
+ }()
+ }
+ for i := 0; i < 8; i++ {
+ <-done
+ }
+
+ // Check that the array has been totally cleared.
+ for i, v := range a {
+ if v != 0 {
+ t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint8(0), v)
+ }
+ }
+}
+
+func TestOr8(t *testing.T) {
+ // Basic sanity check.
+ x := uint8(0)
+ for i := uint8(0); i < 8; i++ {
+ atomic.Or8(&x, 1<<i)
+ if r := (uint8(1) << (i + 1)) - 1; x != r {
+ t.Fatalf("setting bit %#x: want %#x, got %#x", uint8(1)<<i, r, x)
+ }
+ }
+
+ // Start with every bit in array set to 0.
+ a := make([]uint8, 1<<12)
+
+ // Set every bit in array bit-by-bit in different goroutines.
+ done := make(chan bool)
+ for i := 0; i < 8; i++ {
+ m := uint8(1 << i)
+ go func() {
+ for i := range a {
+ atomic.Or8(&a[i], m)
+ }
+ done <- true
+ }()
+ }
+ for i := 0; i < 8; i++ {
+ <-done
+ }
+
+ // Check that the array has been totally set.
+ for i, v := range a {
+ if v != 0xff {
+ t.Fatalf("a[%v] not fully set: want %#x, got %#x", i, uint8(0xff), v)
+ }
+ }
+}
+
+func TestBitwiseContended(t *testing.T) {
+ // Start with every bit in array set to 0.
+ a := make([]uint8, 16)
+
+ // Iterations to try.
+ N := 1 << 16
+ if testing.Short() {
+ N = 1 << 10
+ }
+
+ // Set and then clear every bit in the array bit-by-bit in different goroutines.
+ done := make(chan bool)
+ for i := 0; i < 8; i++ {
+ m := uint8(1 << i)
+ go func() {
+ for n := 0; n < N; n++ {
+ for i := range a {
+ atomic.Or8(&a[i], m)
+ if atomic.Load8(&a[i])&m != m {
+ t.Errorf("a[%v] bit %#x not set", i, m)
+ }
+ atomic.And8(&a[i], ^m)
+ if atomic.Load8(&a[i])&m != 0 {
+ t.Errorf("a[%v] bit %#x not clear", i, m)
+ }
+ }
+ }
+ done <- true
+ }()
+ }
+ for i := 0; i < 8; i++ {
+ <-done
+ }
+
+ // Check that the array has been totally cleared.
+ for i, v := range a {
+ if v != 0 {
+ t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint8(0), v)
+ }
+ }
+}
diff --git a/src/runtime/internal/atomic/atomic_wasm.go b/src/runtime/internal/atomic/atomic_wasm.go
index 0731763ac1..9037c2f7c8 100644
--- a/src/runtime/internal/atomic/atomic_wasm.go
+++ b/src/runtime/internal/atomic/atomic_wasm.go
@@ -143,6 +143,12 @@ func StoreRel(ptr *uint32, val uint32) {
//go:nosplit
//go:noinline
+func Store8(ptr *uint8, val uint8) {
+ *ptr = val
+}
+
+//go:nosplit
+//go:noinline
func Store64(ptr *uint64, val uint64) {
*ptr = val
}
diff --git a/src/runtime/internal/atomic/sys_linux_arm.s b/src/runtime/internal/atomic/sys_linux_arm.s
index 1fd3e832b7..192be4b64f 100644
--- a/src/runtime/internal/atomic/sys_linux_arm.s
+++ b/src/runtime/internal/atomic/sys_linux_arm.s
@@ -120,3 +120,25 @@ end:
MOVB R1, ret+4(FP)
RET
+TEXT ·Store8(SB),NOSPLIT,$0-5
+ MOVW addr+0(FP), R1
+ MOVB v+4(FP), R2
+
+ MOVB runtime·goarm(SB), R8
+ CMP $7, R8
+ BGE native_barrier
+ BL memory_barrier<>(SB)
+ B store
+native_barrier:
+ DMB MB_ISH
+
+store:
+ MOVB R2, (R1)
+
+ CMP $7, R8
+ BGE native_barrier2
+ BL memory_barrier<>(SB)
+ RET
+native_barrier2:
+ DMB MB_ISH
+ RET
diff --git a/src/runtime/internal/atomic/sys_nonlinux_arm.s b/src/runtime/internal/atomic/sys_nonlinux_arm.s
index 9d81334791..57568b2238 100644
--- a/src/runtime/internal/atomic/sys_nonlinux_arm.s
+++ b/src/runtime/internal/atomic/sys_nonlinux_arm.s
@@ -60,3 +60,20 @@ TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-5
MOVB R1, ret+4(FP)
RET
+
+TEXT ·Store8(SB),NOSPLIT,$0-5
+ MOVW addr+0(FP), R1
+ MOVB v+4(FP), R2
+
+ MOVB runtime·goarm(SB), R8
+ CMP $7, R8
+ BLT 2(PC)
+ DMB MB_ISH
+
+ MOVB R2, (R1)
+
+ CMP $7, R8
+ BLT 2(PC)
+ DMB MB_ISH
+ RET
+
diff --git a/src/runtime/lock_js.go b/src/runtime/lock_js.go
index 51cbe60607..df52ea04fd 100644
--- a/src/runtime/lock_js.go
+++ b/src/runtime/lock_js.go
@@ -158,6 +158,7 @@ var idleID int32
// If an event handler returned, we resume it and it will pause the execution.
func beforeIdle(delay int64) bool {
if delay > 0 {
+ clearIdleID()
if delay < 1e6 {
delay = 1
} else if delay < 1e15 {
@@ -229,6 +230,7 @@ func handleEvent() {
func handleAsyncEvent() {
isHandlingEvent = true
eventHandler()
+ clearIdleID()
isHandlingEvent = false
}
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 30ec5f1cc9..55c0282403 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -243,6 +243,10 @@ func (s *mspan) nextFreeIndex() uintptr {
}
// isFree reports whether the index'th object in s is unallocated.
+//
+// The caller must ensure s.state is mSpanInUse, and there must have
+// been no preemption points since ensuring this (which could allow a
+// GC transition, which would allow the state to change).
func (s *mspan) isFree(index uintptr) bool {
if index < s.freeindex {
return false
@@ -349,6 +353,33 @@ func heapBitsForAddr(addr uintptr) (h heapBits) {
return
}
+// badPointer throws bad pointer in heap panic.
+func badPointer(s *mspan, p, refBase, refOff uintptr) {
+ // Typically this indicates an incorrect use
+ // of unsafe or cgo to store a bad pointer in
+ // the Go heap. It may also indicate a runtime
+ // bug.
+ //
+ // TODO(austin): We could be more aggressive
+ // and detect pointers to unallocated objects
+ // in allocated spans.
+ printlock()
+ print("runtime: pointer ", hex(p))
+ state := s.state.get()
+ if state != mSpanInUse {
+ print(" to unallocated span")
+ } else {
+ print(" to unused region of span")
+ }
+ print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state, "\n")
+ if refBase != 0 {
+ print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
+ gcDumpObject("object", refBase, refOff)
+ }
+ getg().m.traceback = 2
+ throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
+}
+
// findObject returns the base address for the heap object containing
// the address p, the object's span, and the index of the object in s.
// If p does not point into a heap object, it returns base == 0.
@@ -359,42 +390,30 @@ func heapBitsForAddr(addr uintptr) (h heapBits) {
// refBase and refOff optionally give the base address of the object
// in which the pointer p was found and the byte offset at which it
// was found. These are used for error reporting.
+//
+// It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
+// Since p is a uintptr, it would not be adjusted if the stack were to move.
+//go:nosplit
func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
s = spanOf(p)
+ // If s is nil, the virtual address has never been part of the heap.
+ // This pointer may be to some mmap'd region, so we allow it.
+ if s == nil {
+ return
+ }
// If p is a bad pointer, it may not be in s's bounds.
- if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
- if s == nil || s.state == mSpanManual {
- // If s is nil, the virtual address has never been part of the heap.
- // This pointer may be to some mmap'd region, so we allow it.
- // Pointers into stacks are also ok, the runtime manages these explicitly.
+ //
+ // Check s.state to synchronize with span initialization
+ // before checking other fields. See also spanOfHeap.
+ if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
+ // Pointers into stacks are also ok, the runtime manages these explicitly.
+ if state == mSpanManual {
return
}
-
// The following ensures that we are rigorous about what data
// structures hold valid pointers.
if debug.invalidptr != 0 {
- // Typically this indicates an incorrect use
- // of unsafe or cgo to store a bad pointer in
- // the Go heap. It may also indicate a runtime
- // bug.
- //
- // TODO(austin): We could be more aggressive
- // and detect pointers to unallocated objects
- // in allocated spans.
- printlock()
- print("runtime: pointer ", hex(p))
- if s.state != mSpanInUse {
- print(" to unallocated span")
- } else {
- print(" to unused region of span")
- }
- print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
- if refBase != 0 {
- print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
- gcDumpObject("object", refBase, refOff)
- }
- getg().m.traceback = 2
- throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
+ badPointer(s, p, refBase, refOff)
}
return
}
@@ -609,7 +628,7 @@ func bulkBarrierPreWrite(dst, src, size uintptr) {
}
}
return
- } else if s.state != mSpanInUse || dst < s.base() || s.limit <= dst {
+ } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
// dst was heap memory at some point, but isn't now.
// It can't be a global. It must be either our stack,
// or in the case of direct channel sends, it could be
@@ -781,29 +800,19 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
// words to pointer/scan.
// Otherwise, it initializes all words to scalar/dead.
func (h heapBits) initSpan(s *mspan) {
- size, n, total := s.layout()
-
- // Init the markbit structures
- s.freeindex = 0
- s.allocCache = ^uint64(0) // all 1s indicating all free.
- s.nelems = n
- s.allocBits = nil
- s.gcmarkBits = nil
- s.gcmarkBits = newMarkBits(s.nelems)
- s.allocBits = newAllocBits(s.nelems)
-
// Clear bits corresponding to objects.
- nw := total / sys.PtrSize
+ nw := (s.npages << _PageShift) / sys.PtrSize
if nw%wordsPerBitmapByte != 0 {
throw("initSpan: unaligned length")
}
if h.shift != 0 {
throw("initSpan: unaligned base")
}
+ isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize
for nw > 0 {
hNext, anw := h.forwardOrBoundary(nw)
nbyte := anw / wordsPerBitmapByte
- if sys.PtrSize == 8 && size == sys.PtrSize {
+ if isPtrs {
bitp := h.bitp
for i := uintptr(0); i < nbyte; i++ {
*bitp = bitPointerAll | bitScanAll
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index a7089dd879..4a2ae89391 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -2168,8 +2168,7 @@ func gcResetMarkState() {
// allgs doesn't change.
lock(&allglock)
for _, gp := range allgs {
- gp.gcscandone = false // set to true in gcphasework
- gp.gcscanvalid = false // stack has not been scanned
+ gp.gcscandone = false // set to true in gcphasework
gp.gcAssistBytes = 0
}
unlock(&allglock)
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index 645083db07..2987d3572b 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -125,8 +125,7 @@ func gcMarkRootCheck() {
fail:
println("gp", gp, "goid", gp.goid,
"status", readgstatus(gp),
- "gcscandone", gp.gcscandone,
- "gcscanvalid", gp.gcscanvalid)
+ "gcscandone", gp.gcscandone)
unlock(&allglock) // Avoid self-deadlock with traceback.
throw("scan missed a g")
}
@@ -211,14 +210,24 @@ func markroot(gcw *gcWork, i uint32) {
userG.waitreason = waitReasonGarbageCollectionScan
}
- // TODO: scang blocks until gp's stack has
- // been scanned, which may take a while for
+ // TODO: suspendG blocks (and spins) until gp
+ // stops, which may take a while for
// running goroutines. Consider doing this in
// two phases where the first is non-blocking:
// we scan the stacks we can and ask running
// goroutines to scan themselves; and the
// second blocks.
- scang(gp, gcw)
+ stopped := suspendG(gp)
+ if stopped.dead {
+ gp.gcscandone = true
+ return
+ }
+ if gp.gcscandone {
+ throw("g already scanned")
+ }
+ scanstack(gp, gcw)
+ gp.gcscandone = true
+ resumeG(stopped)
if selfScan {
casgstatus(userG, _Gwaiting, _Grunning)
@@ -312,7 +321,9 @@ func markrootSpans(gcw *gcWork, shard int) {
// entered the scan phase, so addfinalizer will have ensured
// the above invariants for them.
for _, s := range spans {
- if s.state != mSpanInUse {
+ // This is racing with spans being initialized, so
+ // check the state carefully.
+ if s.state.get() != mSpanInUse {
continue
}
// Check that this span was swept (it may be cached or uncached).
@@ -658,16 +669,16 @@ func gcFlushBgCredit(scanWork int64) {
// scanstack scans gp's stack, greying all pointers found on the stack.
//
+// scanstack will also shrink the stack if it is safe to do so. If it
+// is not, it schedules a stack shrink for the next synchronous safe
+// point.
+//
// scanstack is marked go:systemstack because it must not be preempted
// while using a workbuf.
//
//go:nowritebarrier
//go:systemstack
func scanstack(gp *g, gcw *gcWork) {
- if gp.gcscanvalid {
- return
- }
-
if readgstatus(gp)&_Gscan == 0 {
print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
throw("scanstack - bad status")
@@ -690,8 +701,13 @@ func scanstack(gp *g, gcw *gcWork) {
throw("can't scan our own stack")
}
- // Shrink the stack if not much of it is being used.
- shrinkstack(gp)
+ if isShrinkStackSafe(gp) {
+ // Shrink the stack if not much of it is being used.
+ shrinkstack(gp)
+ } else {
+ // Otherwise, shrink the stack at the next sync safe point.
+ gp.preemptShrink = true
+ }
var state stackScanState
state.stack = gp.stack
@@ -807,8 +823,6 @@ func scanstack(gp *g, gcw *gcWork) {
if state.buf != nil || state.freeBuf != nil {
throw("remaining pointer buffers")
}
-
- gp.gcscanvalid = true
}
// Scan a stack frame: local variables and function arguments/results.
@@ -1298,15 +1312,15 @@ func gcDumpObject(label string, obj, off uintptr) {
return
}
print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
- if 0 <= s.state && int(s.state) < len(mSpanStateNames) {
- print(mSpanStateNames[s.state], "\n")
+ if state := s.state.get(); 0 <= state && int(state) < len(mSpanStateNames) {
+ print(mSpanStateNames[state], "\n")
} else {
- print("unknown(", s.state, ")\n")
+ print("unknown(", state, ")\n")
}
skipped := false
size := s.elemsize
- if s.state == mSpanManual && size == 0 {
+ if s.state.get() == mSpanManual && size == 0 {
// We're printing something from a stack frame. We
// don't know how big it is, so just show up to an
// including off.
@@ -1394,7 +1408,7 @@ var useCheckmark = false
func initCheckmarks() {
useCheckmark = true
for _, s := range mheap_.allspans {
- if s.state == mSpanInUse {
+ if s.state.get() == mSpanInUse {
heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout())
}
}
@@ -1403,7 +1417,7 @@ func initCheckmarks() {
func clearCheckmarks() {
useCheckmark = false
for _, s := range mheap_.allspans {
- if s.state == mSpanInUse {
+ if s.state.get() == mSpanInUse {
heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout())
}
}
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 5f1c90bfe0..580de7a715 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -114,12 +114,12 @@ func sweepone() uintptr {
atomic.Store(&mheap_.sweepdone, 1)
break
}
- if s.state != mSpanInUse {
+ if state := s.state.get(); state != mSpanInUse {
// This can happen if direct sweeping already
// swept this span, but in that case the sweep
// generation should always be up-to-date.
if !(s.sweepgen == sg || s.sweepgen == sg+3) {
- print("runtime: bad span s.state=", s.state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n")
+ print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n")
throw("non in-use span in unswept list")
}
continue
@@ -211,8 +211,8 @@ func (s *mspan) sweep(preserve bool) bool {
throw("mspan.sweep: m is not locked")
}
sweepgen := mheap_.sweepgen
- if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
- print("mspan.sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
+ if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
+ print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
throw("mspan.sweep: bad span state")
}
@@ -351,8 +351,8 @@ func (s *mspan) sweep(preserve bool) bool {
if freeToHeap || nfreed == 0 {
// The span must be in our exclusive ownership until we update sweepgen,
// check for potential races.
- if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
- print("mspan.sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
+ if state := s.state.get(); state != mSpanInUse || s.sweepgen != sweepgen-1 {
+ print("mspan.sweep: state=", state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
throw("mspan.sweep: bad span state after sweep")
}
// Serialization point.
diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go
index f2c16d7d8c..927b06c3f9 100644
--- a/src/runtime/mgcwork.go
+++ b/src/runtime/mgcwork.go
@@ -126,12 +126,12 @@ func (w *gcWork) checkPut(ptr uintptr, ptrs []uintptr) {
if debugCachedWork {
alreadyFailed := w.putGen == w.pauseGen
w.putGen = w.pauseGen
- if m := getg().m; m.locks > 0 || m.mallocing != 0 || m.preemptoff != "" || m.p.ptr().status != _Prunning {
+ if !canPreemptM(getg().m) {
// If we were to spin, the runtime may
- // deadlock: the condition above prevents
- // preemption (see newstack), which could
- // prevent gcMarkDone from finishing the
- // ragged barrier and releasing the spin.
+ // deadlock. Since we can't be preempted, the
+ // spin could prevent gcMarkDone from
+ // finishing the ragged barrier, which is what
+ // releases us from the spin.
return
}
for atomic.Load(&gcWorkPauseGen) == w.pauseGen {
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 3807050cbe..83ee310cda 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -305,6 +305,14 @@ type arenaHint struct {
// * During GC (gcphase != _GCoff), a span *must not* transition from
// manual or in-use to free. Because concurrent GC may read a pointer
// and then look up its span, the span state must be monotonic.
+//
+// Setting mspan.state to mSpanInUse or mSpanManual must be done
+// atomically and only after all other span fields are valid.
+// Likewise, if inspecting a span is contingent on it being
+// mSpanInUse, the state should be loaded atomically and checked
+// before depending on other fields. This allows the garbage collector
+// to safely deal with potentially invalid pointers, since resolving
+// such pointers may race with a span being allocated.
type mSpanState uint8
const (
@@ -323,6 +331,21 @@ var mSpanStateNames = []string{
"mSpanFree",
}
+// mSpanStateBox holds an mSpanState and provides atomic operations on
+// it. This is a separate type to disallow accidental comparison or
+// assignment with mSpanState.
+type mSpanStateBox struct {
+ s mSpanState
+}
+
+func (b *mSpanStateBox) set(s mSpanState) {
+ atomic.Store8((*uint8)(&b.s), uint8(s))
+}
+
+func (b *mSpanStateBox) get() mSpanState {
+ return mSpanState(atomic.Load8((*uint8)(&b.s)))
+}
+
// mSpanList heads a linked list of spans.
//
//go:notinheap
@@ -404,19 +427,19 @@ type mspan struct {
// h->sweepgen is incremented by 2 after every GC
sweepgen uint32
- divMul uint16 // for divide by elemsize - divMagic.mul
- baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base
- allocCount uint16 // number of allocated objects
- spanclass spanClass // size class and noscan (uint8)
- state mSpanState // mspaninuse etc
- needzero uint8 // needs to be zeroed before allocation
- divShift uint8 // for divide by elemsize - divMagic.shift
- divShift2 uint8 // for divide by elemsize - divMagic.shift2
- scavenged bool // whether this span has had its pages released to the OS
- elemsize uintptr // computed from sizeclass or from npages
- limit uintptr // end of data in span
- speciallock mutex // guards specials list
- specials *special // linked list of special records sorted by offset.
+ divMul uint16 // for divide by elemsize - divMagic.mul
+ baseMask uint16 // if non-0, elemsize is a power of 2, & this will get object allocation base
+ allocCount uint16 // number of allocated objects
+ spanclass spanClass // size class and noscan (uint8)
+ state mSpanStateBox // mSpanInUse etc; accessed atomically (get/set methods)
+ needzero uint8 // needs to be zeroed before allocation
+ divShift uint8 // for divide by elemsize - divMagic.shift
+ divShift2 uint8 // for divide by elemsize - divMagic.shift2
+ scavenged bool // whether this span has had its pages released to the OS
+ elemsize uintptr // computed from sizeclass or from npages
+ limit uintptr // end of data in span
+ speciallock mutex // guards specials list
+ specials *special // linked list of special records sorted by offset.
}
func (s *mspan) base() uintptr {
@@ -483,7 +506,7 @@ func (h *mheap) coalesce(s *mspan) {
// The size is potentially changing so the treap needs to delete adjacent nodes and
// insert back as a combined node.
h.free.removeSpan(other)
- other.state = mSpanDead
+ other.state.set(mSpanDead)
h.spanalloc.free(unsafe.Pointer(other))
}
@@ -525,7 +548,7 @@ func (h *mheap) coalesce(s *mspan) {
// Coalesce with earlier, later spans.
var hpBefore uintptr
- if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
+ if before := spanOf(s.base() - 1); before != nil && before.state.get() == mSpanFree {
if s.scavenged == before.scavenged {
hpBefore = before.hugePages()
merge(before, s, before)
@@ -536,7 +559,7 @@ func (h *mheap) coalesce(s *mspan) {
// Now check to see if next (greater addresses) span is free and can be coalesced.
var hpAfter uintptr
- if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
+ if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state.get() == mSpanFree {
if s.scavenged == after.scavenged {
hpAfter = after.hugePages()
merge(s, after, after)
@@ -733,7 +756,7 @@ func inHeapOrStack(b uintptr) bool {
if s == nil || b < s.base() {
return false
}
- switch s.state {
+ switch s.state.get() {
case mSpanInUse, mSpanManual:
return b < s.limit
default:
@@ -800,9 +823,12 @@ func spanOfUnchecked(p uintptr) *mspan {
//go:nosplit
func spanOfHeap(p uintptr) *mspan {
s := spanOf(p)
- // If p is not allocated, it may point to a stale span, so we
- // have to check the span's bounds and state.
- if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
+ // s is nil if it's never been allocated. Otherwise, we check
+ // its state first because we don't trust this pointer, so we
+ // have to synchronize with span initialization. Then, it's
+ // still possible we picked up a stale span pointer, so we
+ // have to check the span's bounds.
+ if s == nil || s.state.get() != mSpanInUse || p < s.base() || p >= s.limit {
return nil
}
return s
@@ -1012,6 +1038,23 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
h.reclaim(npage)
}
+ // Compute size information.
+ nbytes := npage << _PageShift
+ var elemSize, nelems uintptr
+ if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
+ elemSize = nbytes
+ nelems = 1
+ } else {
+ elemSize = uintptr(class_to_size[sizeclass])
+ nelems = nbytes / elemSize
+ }
+
+ // Allocate mark and allocation bits before we take the heap
+ // lock. We'll drop these on the floor if we fail to allocate
+ // the span, but in that case we'll panic soon.
+ gcmarkBits := newMarkBits(nelems)
+ allocBits := newAllocBits(nelems)
+
lock(&h.lock)
// transfer stats from cache to global
memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
@@ -1025,17 +1068,15 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
// able to map interior pointer to containing span.
atomic.Store(&s.sweepgen, h.sweepgen)
h.sweepSpans[h.sweepgen/2%2].push(s) // Add to swept in-use list.
- s.state = mSpanInUse
s.allocCount = 0
s.spanclass = spanclass
+ s.elemsize = elemSize
if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
- s.elemsize = s.npages << _PageShift
s.divShift = 0
s.divMul = 0
s.divShift2 = 0
s.baseMask = 0
} else {
- s.elemsize = uintptr(class_to_size[sizeclass])
m := &class_to_divmagic[sizeclass]
s.divShift = m.shift
s.divMul = m.mul
@@ -1043,6 +1084,25 @@ func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
s.baseMask = m.baseMask
}
+ // Initialize mark and allocation structures.
+ s.freeindex = 0
+ s.allocCache = ^uint64(0) // all 1s indicating all free.
+ s.nelems = nelems
+ s.gcmarkBits = gcmarkBits
+ s.allocBits = allocBits
+
+ // Now that the span is filled in, set its state. This
+ // is a publication barrier for the other fields in
+ // the span. While valid pointers into this span
+ // should never be visible until the span is returned,
+ // if the garbage collector finds an invalid pointer,
+ // access to the span may race with initialization of
+ // the span. We resolve this race by atomically
+ // setting the state after the span is fully
+ // initialized, and atomically checking the state in
+ // any situation where a pointer is suspect.
+ s.state.set(mSpanInUse)
+
// Mark in-use span in arena page bitmap.
arena, pageIdx, pageMask := pageIndexOf(s.base())
arena.pageInUse[pageIdx] |= pageMask
@@ -1120,13 +1180,13 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
lock(&h.lock)
s := h.allocSpanLocked(npage, stat)
if s != nil {
- s.state = mSpanManual
s.manualFreeList = 0
s.allocCount = 0
s.spanclass = 0
s.nelems = 0
s.elemsize = 0
s.limit = s.base() + s.npages<<_PageShift
+ s.state.set(mSpanManual) // Publish the span
// Manually managed memory doesn't count toward heap_sys.
memstats.heap_sys -= uint64(s.npages << _PageShift)
}
@@ -1178,7 +1238,7 @@ func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
HaveSpan:
s := t.span()
- if s.state != mSpanFree {
+ if s.state.get() != mSpanFree {
throw("candidate mspan for allocation is not free")
}
@@ -1309,7 +1369,7 @@ func (h *mheap) growAddSpan(v unsafe.Pointer, size uintptr) {
s := (*mspan)(h.spanalloc.alloc())
s.init(uintptr(v), size/pageSize)
h.setSpans(s.base(), s.npages, s)
- s.state = mSpanFree
+ s.state.set(mSpanFree)
// [v, v+size) is always in the Prepared state. The new span
// must be marked scavenged so the allocator transitions it to
// Ready when allocating from it.
@@ -1372,7 +1432,7 @@ func (h *mheap) freeManual(s *mspan, stat *uint64) {
}
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
- switch s.state {
+ switch s.state.get() {
case mSpanManual:
if s.allocCount != 0 {
throw("mheap.freeSpanLocked - invalid stack free")
@@ -1397,7 +1457,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
if acctidle {
memstats.heap_idle += uint64(s.npages << _PageShift)
}
- s.state = mSpanFree
+ s.state.set(mSpanFree)
// Coalesce span with neighbors.
h.coalesce(s)
@@ -1458,7 +1518,7 @@ func (h *mheap) scavengeSplit(t treapIter, size uintptr) *mspan {
h.setSpan(n.base(), n)
h.setSpan(n.base()+nbytes-1, n)
n.needzero = s.needzero
- n.state = s.state
+ n.state.set(s.state.get())
})
return n
}
@@ -1557,7 +1617,6 @@ func (span *mspan) init(base uintptr, npages uintptr) {
span.allocCount = 0
span.spanclass = 0
span.elemsize = 0
- span.state = mSpanDead
span.scavenged = false
span.speciallock.key = 0
span.specials = nil
@@ -1565,6 +1624,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
span.freeindex = 0
span.allocBits = nil
span.gcmarkBits = nil
+ span.state.set(mSpanDead)
}
func (span *mspan) inList() bool {
diff --git a/src/runtime/nbpipe_fcntl_aix_test.go b/src/runtime/nbpipe_fcntl_aix_test.go
new file mode 100644
index 0000000000..4276ed5b53
--- /dev/null
+++ b/src/runtime/nbpipe_fcntl_aix_test.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "syscall"
+)
+
+// We can't call syscall.Syscall on AIX. Therefore, fcntl is exported from the
+// runtime in export_aix_test.go.
+func fcntl(fd uintptr, cmd int, arg uintptr) (uintptr, syscall.Errno) {
+ res, errno := runtime.Fcntl(fd, uintptr(cmd), arg)
+ return res, syscall.Errno(errno)
+}
diff --git a/src/runtime/nbpipe_fcntl_unix_test.go b/src/runtime/nbpipe_fcntl_unix_test.go
new file mode 100644
index 0000000000..06b3275f06
--- /dev/null
+++ b/src/runtime/nbpipe_fcntl_unix_test.go
@@ -0,0 +1,14 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package runtime_test
+
+import "syscall"
+
+func fcntl(fd uintptr, cmd int, arg uintptr) (uintptr, syscall.Errno) {
+ res, _, err := syscall.Syscall(syscall.SYS_FCNTL, fd, uintptr(cmd), arg)
+ return res, err
+}
diff --git a/src/runtime/nbpipe_test.go b/src/runtime/nbpipe_test.go
index bd0d578234..00dc11e937 100644
--- a/src/runtime/nbpipe_test.go
+++ b/src/runtime/nbpipe_test.go
@@ -49,7 +49,7 @@ func checkIsPipe(t *testing.T, r, w int32) {
func checkNonblocking(t *testing.T, fd int32, name string) {
t.Helper()
- flags, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), syscall.F_GETFL, 0)
+ flags, errno := fcntl(uintptr(fd), syscall.F_GETFL, 0)
if errno != 0 {
t.Errorf("fcntl(%s, F_GETFL) failed: %v", name, syscall.Errno(errno))
} else if flags&syscall.O_NONBLOCK == 0 {
@@ -59,7 +59,7 @@ func checkNonblocking(t *testing.T, fd int32, name string) {
func checkCloseonexec(t *testing.T, fd int32, name string) {
t.Helper()
- flags, _, errno := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), syscall.F_GETFD, 0)
+ flags, errno := fcntl(uintptr(fd), syscall.F_GETFD, 0)
if errno != 0 {
t.Errorf("fcntl(%s, F_GETFD) failed: %v", name, syscall.Errno(errno))
} else if flags&syscall.FD_CLOEXEC == 0 {
diff --git a/src/runtime/netpoll_aix.go b/src/runtime/netpoll_aix.go
index e1512f826c..76ee876771 100644
--- a/src/runtime/netpoll_aix.go
+++ b/src/runtime/netpoll_aix.go
@@ -185,13 +185,12 @@ retry:
for read(rdwake, unsafe.Pointer(&b[0]), 1) == 1 {
}
}
- // Do not look at the other fds in this case as the mode may have changed
- // XXX only additions of flags are made, so maybe it is ok
- unlock(&mtxset)
- goto retry
+ // Still look at the other fds even if the mode may have
+ // changed, as netpollBreak might have been called.
+ n--
}
var toRun gList
- for i := 0; i < len(pfds) && n > 0; i++ {
+ for i := 1; i < len(pfds) && n > 0; i++ {
pfd := &pfds[i]
var mode int32
diff --git a/src/runtime/netpoll_solaris.go b/src/runtime/netpoll_solaris.go
index fac4829ed1..26bbe38d86 100644
--- a/src/runtime/netpoll_solaris.go
+++ b/src/runtime/netpoll_solaris.go
@@ -91,8 +91,8 @@ func errno() int32 {
return *getg().m.perrno
}
-func fcntl(fd, cmd int32, arg uintptr) int32 {
- return int32(sysvicall3(&libc_fcntl, uintptr(fd), uintptr(cmd), arg))
+func fcntl(fd, cmd, arg int32) int32 {
+ return int32(sysvicall3(&libc_fcntl, uintptr(fd), uintptr(cmd), uintptr(arg)))
}
func port_create() int32 {
diff --git a/src/runtime/netpoll_stub.go b/src/runtime/netpoll_stub.go
index ab92b0424e..fe45cfbd40 100644
--- a/src/runtime/netpoll_stub.go
+++ b/src/runtime/netpoll_stub.go
@@ -16,6 +16,7 @@ var netpollNote note
var netpollBroken uint32
func netpollGenericInit() {
+ atomic.Store(&netpollInited, 1)
}
func netpollBreak() {
@@ -30,13 +31,17 @@ func netpoll(delay int64) gList {
// Implementation for platforms that do not support
// integrated network poller.
if delay != 0 {
+ // This lock ensures that only one goroutine tries to use
+ // the note. It should normally be completely uncontended.
+ lock(&netpollStubLock)
noteclear(&netpollNote)
atomic.Store(&netpollBroken, 0)
notetsleep(&netpollNote, delay)
+ unlock(&netpollStubLock)
}
return gList{}
}
func netpollinited() bool {
- return false
+ return atomic.Load(&netpollInited) != 0
}
diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go
index 7f69d6d1e3..7c3cb27223 100644
--- a/src/runtime/os2_aix.go
+++ b/src/runtime/os2_aix.go
@@ -64,6 +64,8 @@ var (
//go:cgo_import_dynamic libpthread_attr_setstackaddr pthread_attr_setstackaddr "libpthread.a/shr_xpg5_64.o"
//go:cgo_import_dynamic libpthread_create pthread_create "libpthread.a/shr_xpg5_64.o"
//go:cgo_import_dynamic libpthread_sigthreadmask sigthreadmask "libpthread.a/shr_xpg5_64.o"
+//go:cgo_import_dynamic libpthread_self pthread_self "libpthread.a/shr_xpg5_64.o"
+//go:cgo_import_dynamic libpthread_kill pthread_kill "libpthread.a/shr_xpg5_64.o"
//go:linkname libc__Errno libc__Errno
//go:linkname libc_clock_gettime libc_clock_gettime
@@ -101,6 +103,8 @@ var (
//go:linkname libpthread_attr_setstackaddr libpthread_attr_setstackaddr
//go:linkname libpthread_create libpthread_create
//go:linkname libpthread_sigthreadmask libpthread_sigthreadmask
+//go:linkname libpthread_self libpthread_self
+//go:linkname libpthread_kill libpthread_kill
var (
//libc
@@ -139,7 +143,9 @@ var (
libpthread_attr_setdetachstate,
libpthread_attr_setstackaddr,
libpthread_create,
- libpthread_sigthreadmask libFunc
+ libpthread_sigthreadmask,
+ libpthread_self,
+ libpthread_kill libFunc
)
type libFunc uintptr
@@ -724,3 +730,14 @@ func sigprocmask(how int32, new, old *sigset) {
sigprocmask1(uintptr(how), uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old)))
}
+
+//go:nosplit
+func pthread_self() pthread {
+ r, _ := syscall0(&libpthread_self)
+ return pthread(r)
+}
+
+//go:nosplit
+func signalM(mp *m, sig int) {
+ syscall2(&libpthread_kill, uintptr(pthread(mp.procid)), uintptr(sig))
+}
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
index 4ac191fab8..373c682f05 100644
--- a/src/runtime/os3_solaris.go
+++ b/src/runtime/os3_solaris.go
@@ -29,6 +29,8 @@ import (
//go:cgo_import_dynamic libc_pthread_attr_setdetachstate pthread_attr_setdetachstate "libc.so"
//go:cgo_import_dynamic libc_pthread_attr_setstack pthread_attr_setstack "libc.so"
//go:cgo_import_dynamic libc_pthread_create pthread_create "libc.so"
+//go:cgo_import_dynamic libc_pthread_self pthread_self "libc.so"
+//go:cgo_import_dynamic libc_pthread_kill pthread_kill "libc.so"
//go:cgo_import_dynamic libc_raise raise "libc.so"
//go:cgo_import_dynamic libc_read read "libc.so"
//go:cgo_import_dynamic libc_select select "libc.so"
@@ -61,6 +63,8 @@ import (
//go:linkname libc_pthread_attr_setdetachstate libc_pthread_attr_setdetachstate
//go:linkname libc_pthread_attr_setstack libc_pthread_attr_setstack
//go:linkname libc_pthread_create libc_pthread_create
+//go:linkname libc_pthread_self libc_pthread_self
+//go:linkname libc_pthread_kill libc_pthread_kill
//go:linkname libc_raise libc_raise
//go:linkname libc_read libc_read
//go:linkname libc_select libc_select
@@ -94,6 +98,8 @@ var (
libc_pthread_attr_setdetachstate,
libc_pthread_attr_setstack,
libc_pthread_create,
+ libc_pthread_self,
+ libc_pthread_kill,
libc_raise,
libc_read,
libc_sched_yield,
@@ -113,14 +119,6 @@ var (
var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
-func getncpu() int32 {
- n := int32(sysconf(__SC_NPROCESSORS_ONLN))
- if n < 1 {
- return 1
- }
- return n
-}
-
func getPageSize() uintptr {
n := int32(sysconf(__SC_PAGESIZE))
if n <= 0 {
@@ -214,6 +212,8 @@ func minit() {
asmcgocall(unsafe.Pointer(funcPC(miniterrno)), unsafe.Pointer(&libc____errno))
minitSignals()
+
+ getg().m.procid = uint64(pthread_self())
}
// Called from dropm to undo the effect of an minit.
@@ -434,6 +434,14 @@ func pthread_create(thread *pthread, attr *pthreadattr, fn uintptr, arg unsafe.P
return int32(sysvicall4(&libc_pthread_create, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(fn), uintptr(arg)))
}
+func pthread_self() pthread {
+ return pthread(sysvicall0(&libc_pthread_self))
+}
+
+func signalM(mp *m, sig int) {
+ sysvicall2(&libc_pthread_kill, uintptr(pthread(mp.procid)), uintptr(sig))
+}
+
//go:nosplit
//go:nowritebarrierrec
func raise(sig uint32) /* int32 */ {
diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go
index 855ae6ff46..9a6b8aec7c 100644
--- a/src/runtime/os_aix.go
+++ b/src/runtime/os_aix.go
@@ -175,6 +175,7 @@ func miniterrno() {
func minit() {
miniterrno()
minitSignals()
+ getg().m.procid = uint64(pthread_self())
}
func unminit() {
@@ -359,8 +360,8 @@ func setupSystemConf() {
}
//go:nosplit
-func fcntl(fd, cmd int32, arg uintptr) int32 {
- r, _ := syscall3(&libc_fcntl, uintptr(fd), uintptr(cmd), arg)
+func fcntl(fd, cmd, arg int32) int32 {
+ r, _ := syscall3(&libc_fcntl, uintptr(fd), uintptr(cmd), uintptr(arg))
return int32(r)
}
@@ -372,5 +373,5 @@ func closeonexec(fd int32) {
//go:nosplit
func setNonblock(fd int32) {
flags := fcntl(fd, _F_GETFL, 0)
- fcntl(fd, _F_SETFL, uintptr(flags|_O_NONBLOCK))
+ fcntl(fd, _F_SETFL, flags|_O_NONBLOCK)
}
diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go
index 1614b66c8a..c11fbec0a5 100644
--- a/src/runtime/os_darwin.go
+++ b/src/runtime/os_darwin.go
@@ -295,6 +295,7 @@ func minit() {
minitSignalStack()
}
minitSignalMask()
+ getg().m.procid = uint64(pthread_self())
}
// Called from dropm to undo the effect of an minit.
@@ -406,3 +407,7 @@ func sysargs(argc int32, argv **byte) {
executablePath = executablePath[len(prefix):]
}
}
+
+func signalM(mp *m, sig int) {
+ pthread_kill(pthread(mp.procid), uint32(sig))
+}
diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go
index 3266b2623a..6578fcbeb1 100644
--- a/src/runtime/os_dragonfly.go
+++ b/src/runtime/os_dragonfly.go
@@ -38,9 +38,11 @@ func setitimer(mode int32, new, old *itimerval)
//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func raise(sig uint32)
func raiseproc(sig uint32)
+func lwp_gettid() int32
+func lwp_kill(pid, tid int32, sig int)
+
//go:noescape
func sys_umtx_sleep(addr *uint32, val, timeout int32) int32
@@ -151,7 +153,7 @@ func newosproc(mp *m) {
start_func: funcPC(lwp_start),
arg: unsafe.Pointer(mp),
stack: uintptr(stk),
- tid1: unsafe.Pointer(&mp.procid),
+ tid1: nil, // minit will record tid
tid2: nil,
}
@@ -191,10 +193,7 @@ func mpreinit(mp *m) {
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
- // m.procid is a uint64, but lwp_start writes an int32. Fix it up.
- _g_ := getg()
- _g_.m.procid = uint64(*(*int32)(unsafe.Pointer(&_g_.m.procid)))
-
+ getg().m.procid = uint64(lwp_gettid())
minitSignals()
}
@@ -288,3 +287,17 @@ func sysauxv(auxv []uintptr) {
}
}
}
+
+// raise sends a signal to the calling thread.
+//
+// It must be nosplit because it is used by the signal handler before
+// it definitely has a Go stack.
+//
+//go:nosplit
+func raise(sig uint32) {
+ lwp_kill(-1, lwp_gettid(), int(sig))
+}
+
+func signalM(mp *m, sig int) {
+ lwp_kill(-1, int32(mp.procid), sig)
+}
diff --git a/src/runtime/os_freebsd.go b/src/runtime/os_freebsd.go
index 183d8ab9c7..69e05b66a2 100644
--- a/src/runtime/os_freebsd.go
+++ b/src/runtime/os_freebsd.go
@@ -26,9 +26,11 @@ func setitimer(mode int32, new, old *itimerval)
//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func raise(sig uint32)
func raiseproc(sig uint32)
+func thr_self() thread
+func thr_kill(tid thread, sig int)
+
//go:noescape
func sys_umtx_op(addr *uint32, mode int32, val uint32, uaddr1 uintptr, ut *umtx_time) int32
@@ -195,7 +197,7 @@ func newosproc(mp *m) {
arg: unsafe.Pointer(mp),
stack_base: mp.g0.stack.lo,
stack_size: uintptr(stk) - mp.g0.stack.lo,
- child_tid: unsafe.Pointer(&mp.procid),
+ child_tid: nil, // minit will record tid
parent_tid: nil,
tls_base: unsafe.Pointer(&mp.tls[0]),
tls_size: unsafe.Sizeof(mp.tls),
@@ -231,7 +233,7 @@ func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
arg: nil,
stack_base: uintptr(stack), //+stacksize?
stack_size: stacksize,
- child_tid: unsafe.Pointer(&m0.procid),
+ child_tid: nil, // minit will record tid
parent_tid: nil,
tls_base: unsafe.Pointer(&m0.tls[0]),
tls_size: unsafe.Sizeof(m0.tls),
@@ -290,12 +292,7 @@ func mpreinit(mp *m) {
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
- // m.procid is a uint64, but thr_new writes a uint32 on 32-bit systems.
- // Fix it up. (Only matters on big-endian, but be clean anyway.)
- if sys.PtrSize == 4 {
- _g_ := getg()
- _g_.m.procid = uint64(*(*uint32)(unsafe.Pointer(&_g_.m.procid)))
- }
+ getg().m.procid = uint64(thr_self())
// On FreeBSD before about April 2017 there was a bug such
// that calling execve from a thread other than the main
@@ -423,3 +420,17 @@ func sysSigaction(sig uint32, new, old *sigactiont) {
// asmSigaction is implemented in assembly.
//go:noescape
func asmSigaction(sig uintptr, new, old *sigactiont) int32
+
+// raise sends a signal to the calling thread.
+//
+// It must be nosplit because it is used by the signal handler before
+// it definitely has a Go stack.
+//
+//go:nosplit
+func raise(sig uint32) {
+ thr_kill(thr_self(), int(sig))
+}
+
+func signalM(mp *m, sig int) {
+ thr_kill(thread(mp.procid), sig)
+}
diff --git a/src/runtime/os_freebsd_arm64.go b/src/runtime/os_freebsd_arm64.go
new file mode 100644
index 0000000000..800bd2fa6e
--- /dev/null
+++ b/src/runtime/os_freebsd_arm64.go
@@ -0,0 +1,156 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "internal/cpu"
+
+const (
+ hwcap_FP = 1 << 0
+ hwcap_ASIMD = 1 << 1
+ hwcap_EVTSTRM = 1 << 2
+ hwcap_AES = 1 << 3
+ hwcap_PMULL = 1 << 4
+ hwcap_SHA1 = 1 << 5
+ hwcap_SHA2 = 1 << 6
+ hwcap_CRC32 = 1 << 7
+ hwcap_ATOMICS = 1 << 8
+ hwcap_FPHP = 1 << 9
+ hwcap_ASIMDHP = 1 << 10
+ hwcap_CPUID = 1 << 11
+ hwcap_ASIMDRDM = 1 << 12
+ hwcap_JSCVT = 1 << 13
+ hwcap_FCMA = 1 << 14
+ hwcap_LRCPC = 1 << 15
+ hwcap_DCPOP = 1 << 16
+ hwcap_SHA3 = 1 << 17
+ hwcap_SM3 = 1 << 18
+ hwcap_SM4 = 1 << 19
+ hwcap_ASIMDDP = 1 << 20
+ hwcap_SHA512 = 1 << 21
+ hwcap_SVE = 1 << 22
+ hwcap_ASIMDFHM = 1 << 23
+)
+
+func getisar0() uint64
+func getisar1() uint64
+func getpfr0() uint64
+
+// no hwcap support on FreeBSD aarch64, we need to retrieve the info from
+// ID_AA64ISAR0_EL1, ID_AA64ISAR1_EL1 and ID_AA64PFR0_EL1
+func archauxv(tag, val uintptr) {
+ var isar0, isar1, pfr0 uint64
+
+ isar0 = getisar0()
+ isar1 = getisar1()
+ pfr0 = getpfr0()
+
+ // ID_AA64ISAR0_EL1
+ switch extractBits(isar0, 4, 7) {
+ case 1:
+ cpu.HWCap |= hwcap_AES
+ case 2:
+ cpu.HWCap |= hwcap_PMULL | hwcap_AES
+ }
+
+ switch extractBits(isar0, 8, 11) {
+ case 1:
+ cpu.HWCap |= hwcap_SHA1
+ }
+
+ switch extractBits(isar0, 12, 15) {
+ case 1:
+ cpu.HWCap |= hwcap_SHA2
+ case 2:
+ cpu.HWCap |= hwcap_SHA2 | hwcap_SHA512
+ }
+
+ switch extractBits(isar0, 16, 19) {
+ case 1:
+ cpu.HWCap |= hwcap_CRC32
+ }
+
+ switch extractBits(isar0, 20, 23) {
+ case 2:
+ cpu.HWCap |= hwcap_ATOMICS
+ }
+
+ switch extractBits(isar0, 28, 31) {
+ case 1:
+ cpu.HWCap |= hwcap_ASIMDRDM
+ }
+
+ switch extractBits(isar0, 32, 35) {
+ case 1:
+ cpu.HWCap |= hwcap_SHA3
+ }
+
+ switch extractBits(isar0, 36, 39) {
+ case 1:
+ cpu.HWCap |= hwcap_SM3
+ }
+
+ switch extractBits(isar0, 40, 43) {
+ case 1:
+ cpu.HWCap |= hwcap_SM4
+ }
+
+ switch extractBits(isar0, 44, 47) {
+ case 1:
+ cpu.HWCap |= hwcap_ASIMDDP
+ }
+
+ // ID_AA64ISAR1_EL1
+ switch extractBits(isar1, 0, 3) {
+ case 1:
+ cpu.HWCap |= hwcap_DCPOP
+ }
+
+ switch extractBits(isar1, 12, 15) {
+ case 1:
+ cpu.HWCap |= hwcap_JSCVT
+ }
+
+ switch extractBits(isar1, 16, 19) {
+ case 1:
+ cpu.HWCap |= hwcap_FCMA
+ }
+
+ switch extractBits(isar1, 20, 23) {
+ case 1:
+ cpu.HWCap |= hwcap_LRCPC
+ }
+
+ // ID_AA64PFR0_EL1
+ switch extractBits(pfr0, 16, 19) {
+ case 0:
+ cpu.HWCap |= hwcap_FP
+ case 1:
+ cpu.HWCap |= hwcap_FP | hwcap_FPHP
+ }
+
+ switch extractBits(pfr0, 20, 23) {
+ case 0:
+ cpu.HWCap |= hwcap_ASIMD
+ case 1:
+ cpu.HWCap |= hwcap_ASIMD | hwcap_ASIMDHP
+ }
+
+ switch extractBits(pfr0, 32, 35) {
+ case 1:
+ cpu.HWCap |= hwcap_SVE
+ }
+}
+
+func extractBits(data uint64, start, end uint) uint {
+ return (uint)(data>>start) & ((1 << (end - start + 1)) - 1)
+}
+
+//go:nosplit
+func cputicks() int64 {
+ // Currently cputicks() is used in blocking profiler and to seed fastrand().
+ // nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
+ // TODO: need more entropy to better seed fastrand.
+ return nanotime()
+}
diff --git a/src/runtime/os_freebsd_noauxv.go b/src/runtime/os_freebsd_noauxv.go
index 01efb9b7c9..c6a49927c8 100644
--- a/src/runtime/os_freebsd_noauxv.go
+++ b/src/runtime/os_freebsd_noauxv.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// +build freebsd
-// +build !arm
+// +build !arm,!arm64
package runtime
diff --git a/src/runtime/os_illumos.go b/src/runtime/os_illumos.go
new file mode 100644
index 0000000000..c3c3e4e6d5
--- /dev/null
+++ b/src/runtime/os_illumos.go
@@ -0,0 +1,132 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+//go:cgo_import_dynamic libc_getrctl getrctl "libc.so"
+//go:cgo_import_dynamic libc_rctlblk_get_local_action rctlblk_get_local_action "libc.so"
+//go:cgo_import_dynamic libc_rctlblk_get_local_flags rctlblk_get_local_flags "libc.so"
+//go:cgo_import_dynamic libc_rctlblk_get_value rctlblk_get_value "libc.so"
+//go:cgo_import_dynamic libc_rctlblk_size rctlblk_size "libc.so"
+
+//go:linkname libc_getrctl libc_getrctl
+//go:linkname libc_rctlblk_get_local_action libc_rctlblk_get_local_action
+//go:linkname libc_rctlblk_get_local_flags libc_rctlblk_get_local_flags
+//go:linkname libc_rctlblk_get_value libc_rctlblk_get_value
+//go:linkname libc_rctlblk_size libc_rctlblk_size
+
+var (
+ libc_getrctl,
+ libc_rctlblk_get_local_action,
+ libc_rctlblk_get_local_flags,
+ libc_rctlblk_get_value,
+ libc_rctlblk_size libcFunc
+)
+
+// Return the minimum value seen for the zone CPU cap, or 0 if no cap is
+// detected.
+func getcpucap() uint64 {
+ // The resource control block is an opaque object whose size is only
+ // known to libc. In practice, given the contents, it is unlikely to
+ // grow beyond 8KB so we'll use a static buffer of that size here.
+ const rblkmaxsize = 8 * 1024
+ if rctlblk_size() > rblkmaxsize {
+ return 0
+ }
+
+ // The "zone.cpu-cap" resource control, as described in
+ // resource_controls(5), "sets a limit on the amount of CPU time that
+ // can be used by a zone. The unit used is the percentage of a single
+ // CPU that can be used by all user threads in a zone, expressed as an
+ // integer." A C string of the name must be passed to getrctl(2).
+ name := []byte("zone.cpu-cap\x00")
+
+ // To iterate over the list of values for a particular resource
+ // control, we need two blocks: one for the previously read value and
+ // one for the next value.
+ var rblk0 [rblkmaxsize]byte
+ var rblk1 [rblkmaxsize]byte
+ rblk := &rblk0[0]
+ rblkprev := &rblk1[0]
+
+ var flag uint32 = _RCTL_FIRST
+ var capval uint64 = 0
+
+ for {
+ if getrctl(unsafe.Pointer(&name[0]), unsafe.Pointer(rblkprev), unsafe.Pointer(rblk), flag) != 0 {
+ // The end of the sequence is reported as an ENOENT
+ // failure, but determining the CPU cap is not critical
+ // here. We'll treat any failure as if it were the end
+ // of sequence.
+ break
+ }
+
+ lflags := rctlblk_get_local_flags(unsafe.Pointer(rblk))
+ action := rctlblk_get_local_action(unsafe.Pointer(rblk))
+ if (lflags&_RCTL_LOCAL_MAXIMAL) == 0 && action == _RCTL_LOCAL_DENY {
+ // This is a finite (not maximal) value representing a
+ // cap (deny) action.
+ v := rctlblk_get_value(unsafe.Pointer(rblk))
+ if capval == 0 || capval > v {
+ capval = v
+ }
+ }
+
+ // Swap the blocks around so that we can fetch the next value
+ t := rblk
+ rblk = rblkprev
+ rblkprev = t
+ flag = _RCTL_NEXT
+ }
+
+ return capval
+}
+
+func getncpu() int32 {
+ n := int32(sysconf(__SC_NPROCESSORS_ONLN))
+ if n < 1 {
+ return 1
+ }
+
+ if cents := int32(getcpucap()); cents > 0 {
+ // Convert from a percentage of CPUs to a number of CPUs,
+ // rounding up to make use of a fractional CPU
+ // e.g., 336% becomes 4 CPUs
+ ncap := (cents + 99) / 100
+ if ncap < n {
+ return ncap
+ }
+ }
+
+ return n
+}
+
+//go:nosplit
+func getrctl(controlname, oldbuf, newbuf unsafe.Pointer, flags uint32) uintptr {
+ return sysvicall4(&libc_getrctl, uintptr(controlname), uintptr(oldbuf), uintptr(newbuf), uintptr(flags))
+}
+
+//go:nosplit
+func rctlblk_get_local_action(buf unsafe.Pointer) uintptr {
+ return sysvicall2(&libc_rctlblk_get_local_action, uintptr(buf), uintptr(0))
+}
+
+//go:nosplit
+func rctlblk_get_local_flags(buf unsafe.Pointer) uintptr {
+ return sysvicall1(&libc_rctlblk_get_local_flags, uintptr(buf))
+}
+
+//go:nosplit
+func rctlblk_get_value(buf unsafe.Pointer) uint64 {
+ return uint64(sysvicall1(&libc_rctlblk_get_value, uintptr(buf)))
+}
+
+//go:nosplit
+func rctlblk_size() uintptr {
+ return sysvicall0(&libc_rctlblk_size)
+}
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index b1ddf53dd1..20b947f250 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -332,7 +332,9 @@ func gettid() uint32
func minit() {
minitSignals()
- // for debuggers, in case cgo created the thread
+ // Cgo-created threads and the bootstrap m are missing a
+ // procid. We need this for asynchronous preemption and its
+ // useful in debuggers.
getg().m.procid = uint64(gettid())
}
@@ -454,3 +456,11 @@ func sysSigaction(sig uint32, new, old *sigactiont) {
// rt_sigaction is implemented in assembly.
//go:noescape
func rt_sigaction(sig uintptr, new, old *sigactiont, size uintptr) int32
+
+func getpid() int
+func tgkill(tgid, tid, sig int)
+
+// signalM sends a signal to mp.
+func signalM(mp *m, sig int) {
+ tgkill(getpid(), int(mp.procid), sig)
+}
diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go
index 3cb9411a9c..b50cf237fb 100644
--- a/src/runtime/os_netbsd.go
+++ b/src/runtime/os_netbsd.go
@@ -47,9 +47,10 @@ func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, nds
func lwp_tramp()
-func raise(sig uint32)
func raiseproc(sig uint32)
+func lwp_kill(tid int32, sig int)
+
//go:noescape
func getcontext(ctxt unsafe.Pointer)
@@ -361,3 +362,17 @@ func sysauxv(auxv []uintptr) {
}
}
}
+
+// raise sends signal to the calling thread.
+//
+// It must be nosplit because it is used by the signal handler before
+// it definitely has a Go stack.
+//
+//go:nosplit
+func raise(sig uint32) {
+ lwp_kill(lwp_self(), int(sig))
+}
+
+func signalM(mp *m, sig int) {
+ lwp_kill(int32(mp.procid), sig)
+}
diff --git a/src/runtime/os_only_solaris.go b/src/runtime/os_only_solaris.go
new file mode 100644
index 0000000000..e2f5409354
--- /dev/null
+++ b/src/runtime/os_only_solaris.go
@@ -0,0 +1,18 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Solaris code that doesn't also apply to illumos.
+
+// +build !illumos
+
+package runtime
+
+func getncpu() int32 {
+ n := int32(sysconf(__SC_NPROCESSORS_ONLN))
+ if n < 1 {
+ return 1
+ }
+
+ return n
+}
diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go
index 351a99f7e9..f26b39575d 100644
--- a/src/runtime/os_openbsd.go
+++ b/src/runtime/os_openbsd.go
@@ -42,9 +42,11 @@ func sigprocmask(how int32, new, old *sigset) {
//go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func raise(sig uint32)
func raiseproc(sig uint32)
+func getthrid() int32
+func thrkill(tid int32, sig int)
+
//go:noescape
func tfork(param *tforkt, psize uintptr, mm *m, gg *g, fn uintptr) int32
@@ -190,7 +192,7 @@ func newosproc(mp *m) {
// rather than at the top of it.
param := tforkt{
tf_tcb: unsafe.Pointer(&mp.tls[0]),
- tf_tid: (*int32)(unsafe.Pointer(&mp.procid)),
+ tf_tid: nil, // minit will record tid
tf_stack: uintptr(stk) - sys.PtrSize,
}
@@ -238,10 +240,7 @@ func mpreinit(mp *m) {
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, can not allocate memory.
func minit() {
- // m.procid is a uint64, but tfork writes an int32. Fix it up.
- _g_ := getg()
- _g_.m.procid = uint64(*(*int32)(unsafe.Pointer(&_g_.m.procid)))
-
+ getg().m.procid = uint64(getthrid())
minitSignals()
}
@@ -337,3 +336,11 @@ func osStackRemap(s *mspan, flags int32) {
throw("remapping stack memory failed")
}
}
+
+func raise(sig uint32) {
+ thrkill(getthrid(), int(sig))
+}
+
+func signalM(mp *m, sig int) {
+ thrkill(int32(mp.procid), sig)
+}
diff --git a/src/runtime/preempt.go b/src/runtime/preempt.go
new file mode 100644
index 0000000000..96eaa3488b
--- /dev/null
+++ b/src/runtime/preempt.go
@@ -0,0 +1,234 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Goroutine preemption
+//
+// A goroutine can be preempted at any safe-point. Currently, there
+// are a few categories of safe-points:
+//
+// 1. A blocked safe-point occurs for the duration that a goroutine is
+// descheduled, blocked on synchronization, or in a system call.
+//
+// 2. Synchronous safe-points occur when a running goroutine checks
+// for a preemption request.
+//
+// At both blocked and synchronous safe-points, a goroutine's CPU
+// state is minimal and the garbage collector has complete information
+// about its entire stack. This makes it possible to deschedule a
+// goroutine with minimal space, and to precisely scan a goroutine's
+// stack.
+//
+// Synchronous safe-points are implemented by overloading the stack
+// bound check in function prologues. To preempt a goroutine at the
+// next synchronous safe-point, the runtime poisons the goroutine's
+// stack bound to a value that will cause the next stack bound check
+// to fail and enter the stack growth implementation, which will
+// detect that it was actually a preemption and redirect to preemption
+// handling.
+
+package runtime
+
+type suspendGState struct {
+ g *g
+
+ // dead indicates the goroutine was not suspended because it
+ // is dead. This goroutine could be reused after the dead
+ // state was observed, so the caller must not assume that it
+ // remains dead.
+ dead bool
+
+ // stopped indicates that this suspendG transitioned the G to
+ // _Gwaiting via g.preemptStop and thus is responsible for
+ // readying it when done.
+ stopped bool
+}
+
+// suspendG suspends goroutine gp at a safe-point and returns the
+// state of the suspended goroutine. The caller gets read access to
+// the goroutine until it calls resumeG.
+//
+// It is safe for multiple callers to attempt to suspend the same
+// goroutine at the same time. The goroutine may execute between
+// subsequent successful suspend operations. The current
+// implementation grants exclusive access to the goroutine, and hence
+// multiple callers will serialize. However, the intent is to grant
+// shared read access, so please don't depend on exclusive access.
+//
+// This must be called from the system stack and the user goroutine on
+// the current M (if any) must be in a preemptible state. This
+// prevents deadlocks where two goroutines attempt to suspend each
+// other and both are in non-preemptible states. There are other ways
+// to resolve this deadlock, but this seems simplest.
+//
+// TODO(austin): What if we instead required this to be called from a
+// user goroutine? Then we could deschedule the goroutine while
+// waiting instead of blocking the thread. If two goroutines tried to
+// suspend each other, one of them would win and the other wouldn't
+// complete the suspend until it was resumed. We would have to be
+// careful that they couldn't actually queue up suspend for each other
+// and then both be suspended. This would also avoid the need for a
+// kernel context switch in the synchronous case because we could just
+// directly schedule the waiter. The context switch is unavoidable in
+// the signal case.
+//
+//go:systemstack
+func suspendG(gp *g) suspendGState {
+ if mp := getg().m; mp.curg != nil && readgstatus(mp.curg) == _Grunning {
+ // Since we're on the system stack of this M, the user
+ // G is stuck at an unsafe point. If another goroutine
+ // were to try to preempt m.curg, it could deadlock.
+ throw("suspendG from non-preemptible goroutine")
+ }
+
+ // See https://golang.org/cl/21503 for justification of the yield delay.
+ const yieldDelay = 10 * 1000
+ var nextYield int64
+
+ // Drive the goroutine to a preemption point.
+ stopped := false
+ for i := 0; ; i++ {
+ switch s := readgstatus(gp); s {
+ default:
+ if s&_Gscan != 0 {
+ // Someone else is suspending it. Wait
+ // for them to finish.
+ //
+ // TODO: It would be nicer if we could
+ // coalesce suspends.
+ break
+ }
+
+ dumpgstatus(gp)
+ throw("invalid g status")
+
+ case _Gdead:
+ // Nothing to suspend.
+ //
+ // preemptStop may need to be cleared, but
+ // doing that here could race with goroutine
+ // reuse. Instead, goexit0 clears it.
+ return suspendGState{dead: true}
+
+ case _Gcopystack:
+ // The stack is being copied. We need to wait
+ // until this is done.
+
+ case _Gpreempted:
+ // We (or someone else) suspended the G. Claim
+ // ownership of it by transitioning it to
+ // _Gwaiting.
+ if !casGFromPreempted(gp, _Gpreempted, _Gwaiting) {
+ break
+ }
+
+ // We stopped the G, so we have to ready it later.
+ stopped = true
+
+ s = _Gwaiting
+ fallthrough
+
+ case _Grunnable, _Gsyscall, _Gwaiting:
+ // Claim goroutine by setting scan bit.
+ // This may race with execution or readying of gp.
+ // The scan bit keeps it from transition state.
+ if !castogscanstatus(gp, s, s|_Gscan) {
+ break
+ }
+
+ // Clear the preemption request. It's safe to
+ // reset the stack guard because we hold the
+ // _Gscan bit and thus own the stack.
+ gp.preemptStop = false
+ gp.preempt = false
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+
+ // The goroutine was already at a safe-point
+ // and we've now locked that in.
+ //
+ // TODO: It would be much better if we didn't
+ // leave it in _Gscan, but instead gently
+ // prevented its scheduling until resumption.
+ // Maybe we only use this to bump a suspended
+ // count and the scheduler skips suspended
+ // goroutines? That wouldn't be enough for
+ // {_Gsyscall,_Gwaiting} -> _Grunning. Maybe
+ // for all those transitions we need to check
+ // suspended and deschedule?
+ return suspendGState{g: gp, stopped: stopped}
+
+ case _Grunning:
+ // Optimization: if there is already a pending preemption request
+ // (from the previous loop iteration), don't bother with the atomics.
+ if gp.preemptStop && gp.preempt && gp.stackguard0 == stackPreempt {
+ break
+ }
+
+ // Temporarily block state transitions.
+ if !castogscanstatus(gp, _Grunning, _Gscanrunning) {
+ break
+ }
+
+ // Request synchronous preemption.
+ gp.preemptStop = true
+ gp.preempt = true
+ gp.stackguard0 = stackPreempt
+
+ // TODO: Inject asynchronous preemption.
+
+ casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
+ }
+
+ // TODO: Don't busy wait. This loop should really only
+ // be a simple read/decide/CAS loop that only fails if
+ // there's an active race. Once the CAS succeeds, we
+ // should queue up the preemption (which will require
+ // it to be reliable in the _Grunning case, not
+ // best-effort) and then sleep until we're notified
+ // that the goroutine is suspended.
+ if i == 0 {
+ nextYield = nanotime() + yieldDelay
+ }
+ if nanotime() < nextYield {
+ procyield(10)
+ } else {
+ osyield()
+ nextYield = nanotime() + yieldDelay/2
+ }
+ }
+}
+
+// resumeG undoes the effects of suspendG, allowing the suspended
+// goroutine to continue from its current safe-point.
+func resumeG(state suspendGState) {
+ if state.dead {
+ // We didn't actually stop anything.
+ return
+ }
+
+ gp := state.g
+ switch s := readgstatus(gp); s {
+ default:
+ dumpgstatus(gp)
+ throw("unexpected g status")
+
+ case _Grunnable | _Gscan,
+ _Gwaiting | _Gscan,
+ _Gsyscall | _Gscan:
+ casfrom_Gscanstatus(gp, s, s&^_Gscan)
+ }
+
+ if state.stopped {
+ // We stopped it, so we need to re-schedule it.
+ ready(gp, 0, true)
+ }
+}
+
+// canPreemptM reports whether mp is in a state that is safe to preempt.
+//
+// It is nosplit because it has nosplit callers.
+//
+//go:nosplit
+func canPreemptM(mp *m) bool {
+ return mp.locks == 0 && mp.mallocing == 0 && mp.preemptoff == "" && mp.p.ptr().status == _Prunning
+}
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 524d75e3c7..fc8aa3330a 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -710,18 +710,6 @@ func readgstatus(gp *g) uint32 {
return atomic.Load(&gp.atomicstatus)
}
-// Ownership of gcscanvalid:
-//
-// If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
-// then gp owns gp.gcscanvalid, and other goroutines must not modify it.
-//
-// Otherwise, a second goroutine can lock the scan state by setting _Gscan
-// in the status bit and then modify gcscanvalid, and then unlock the scan state.
-//
-// Note that the first condition implies an exception to the second:
-// if a second goroutine changes gp's status to _Grunning|_Gscan,
-// that second goroutine still does not have the right to modify gcscanvalid.
-
// The Gscanstatuses are acting like locks and this releases them.
// If it proves to be a performance hit we should be able to make these
// simple atomic stores but for now we are going to throw if
@@ -738,7 +726,8 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
case _Gscanrunnable,
_Gscanwaiting,
_Gscanrunning,
- _Gscansyscall:
+ _Gscansyscall,
+ _Gscanpreempted:
if newval == oldval&^_Gscan {
success = atomic.Cas(&gp.atomicstatus, oldval, newval)
}
@@ -780,17 +769,6 @@ func casgstatus(gp *g, oldval, newval uint32) {
})
}
- if oldval == _Grunning && gp.gcscanvalid {
- // If oldvall == _Grunning, then the actual status must be
- // _Grunning or _Grunning|_Gscan; either way,
- // we own gp.gcscanvalid, so it's safe to read.
- // gp.gcscanvalid must not be true when we are running.
- systemstack(func() {
- print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
- throw("casgstatus")
- })
- }
-
// See https://golang.org/cl/21503 for justification of the yield delay.
const yieldDelay = 5 * 1000
var nextYield int64
@@ -801,14 +779,6 @@ func casgstatus(gp *g, oldval, newval uint32) {
if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
throw("casgstatus: waiting for Gwaiting but is Grunnable")
}
- // Help GC if needed.
- // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
- // gp.preemptscan = false
- // systemstack(func() {
- // gcphasework(gp)
- // })
- // }
- // But meanwhile just yield.
if i == 0 {
nextYield = nanotime() + yieldDelay
}
@@ -821,9 +791,6 @@ func casgstatus(gp *g, oldval, newval uint32) {
nextYield = nanotime() + yieldDelay/2
}
}
- if newval == _Grunning {
- gp.gcscanvalid = false
- }
}
// casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
@@ -844,109 +811,26 @@ func casgcopystack(gp *g) uint32 {
}
}
-// scang blocks until gp's stack has been scanned.
-// It might be scanned by scang or it might be scanned by the goroutine itself.
-// Either way, the stack scan has completed when scang returns.
-func scang(gp *g, gcw *gcWork) {
- // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
- // Nothing is racing with us now, but gcscandone might be set to true left over
- // from an earlier round of stack scanning (we scan twice per GC).
- // We use gcscandone to record whether the scan has been done during this round.
-
- gp.gcscandone = false
-
- // See https://golang.org/cl/21503 for justification of the yield delay.
- const yieldDelay = 10 * 1000
- var nextYield int64
-
- // Endeavor to get gcscandone set to true,
- // either by doing the stack scan ourselves or by coercing gp to scan itself.
- // gp.gcscandone can transition from false to true when we're not looking
- // (if we asked for preemption), so any time we lock the status using
- // castogscanstatus we have to double-check that the scan is still not done.
-loop:
- for i := 0; !gp.gcscandone; i++ {
- switch s := readgstatus(gp); s {
- default:
- dumpgstatus(gp)
- throw("stopg: invalid status")
-
- case _Gdead:
- // No stack.
- gp.gcscandone = true
- break loop
-
- case _Gcopystack:
- // Stack being switched. Go around again.
-
- case _Grunnable, _Gsyscall, _Gwaiting:
- // Claim goroutine by setting scan bit.
- // Racing with execution or readying of gp.
- // The scan bit keeps them from running
- // the goroutine until we're done.
- if castogscanstatus(gp, s, s|_Gscan) {
- if !gp.gcscandone {
- scanstack(gp, gcw)
- gp.gcscandone = true
- }
- restartg(gp)
- break loop
- }
-
- case _Gscanwaiting:
- // newstack is doing a scan for us right now. Wait.
-
- case _Grunning:
- // Goroutine running. Try to preempt execution so it can scan itself.
- // The preemption handler (in newstack) does the actual scan.
-
- // Optimization: if there is already a pending preemption request
- // (from the previous loop iteration), don't bother with the atomics.
- if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt {
- break
- }
-
- // Ask for preemption and self scan.
- if castogscanstatus(gp, _Grunning, _Gscanrunning) {
- if !gp.gcscandone {
- gp.preemptscan = true
- gp.preempt = true
- gp.stackguard0 = stackPreempt
- }
- casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
- }
- }
-
- if i == 0 {
- nextYield = nanotime() + yieldDelay
- }
- if nanotime() < nextYield {
- procyield(10)
- } else {
- osyield()
- nextYield = nanotime() + yieldDelay/2
- }
+// casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted.
+//
+// TODO(austin): This is the only status operation that both changes
+// the status and locks the _Gscan bit. Rethink this.
+func casGToPreemptScan(gp *g, old, new uint32) {
+ if old != _Grunning || new != _Gscan|_Gpreempted {
+ throw("bad g transition")
+ }
+ for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) {
}
-
- gp.preemptscan = false // cancel scan request if no longer needed
}
-// The GC requests that this routine be moved from a scanmumble state to a mumble state.
-func restartg(gp *g) {
- s := readgstatus(gp)
- switch s {
- default:
- dumpgstatus(gp)
- throw("restartg: unexpected status")
-
- case _Gdead:
- // ok
-
- case _Gscanrunnable,
- _Gscanwaiting,
- _Gscansyscall:
- casfrom_Gscanstatus(gp, s, s&^_Gscan)
+// casGFromPreempted attempts to transition gp from _Gpreempted to
+// _Gwaiting. If successful, the caller is responsible for
+// re-scheduling gp.
+func casGFromPreempted(gp *g, old, new uint32) bool {
+ if old != _Gpreempted || new != _Gwaiting {
+ throw("bad g transition")
}
+ return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting)
}
// stopTheWorld stops all P's from executing goroutines, interrupting
@@ -1306,6 +1190,11 @@ func mexit(osStack bool) {
// Free the gsignal stack.
if m.gsignal != nil {
stackfree(m.gsignal.stack)
+ // On some platforms, when calling into VDSO (e.g. nanotime)
+ // we store our g on the gsignal stack, if there is one.
+ // Now the stack is freed, unlink it from the m, so we
+ // won't write to it when calling VDSO code.
+ m.gsignal = nil
}
// Remove m from allm.
@@ -1675,8 +1564,6 @@ func oneNewExtraM() {
gp.syscallpc = gp.sched.pc
gp.syscallsp = gp.sched.sp
gp.stktopsp = gp.sched.sp
- gp.gcscanvalid = true
- gp.gcscandone = true
// malg returns status as _Gidle. Change to _Gdead before
// adding to allg where GC can see it. We use _Gdead to hide
// this from tracebacks and stack scans since it isn't a
@@ -2821,7 +2708,7 @@ func gosched_m(gp *g) {
// goschedguarded is a forbidden-states-avoided version of gosched_m
func goschedguarded_m(gp *g) {
- if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning {
+ if !canPreemptM(gp.m) {
gogo(&gp.sched) // never return
}
@@ -2838,6 +2725,32 @@ func gopreempt_m(gp *g) {
goschedImpl(gp)
}
+// preemptPark parks gp and puts it in _Gpreempted.
+//
+//go:systemstack
+func preemptPark(gp *g) {
+ if trace.enabled {
+ traceGoPark(traceEvGoBlock, 0)
+ }
+ status := readgstatus(gp)
+ if status&^_Gscan != _Grunning {
+ dumpgstatus(gp)
+ throw("bad g status")
+ }
+ gp.waitreason = waitReasonPreempted
+ // Transition from _Grunning to _Gscan|_Gpreempted. We can't
+ // be in _Grunning when we dropg because then we'd be running
+ // without an M, but the moment we're in _Gpreempted,
+ // something could claim this G before we've fully cleaned it
+ // up. Hence, we set the scan bit to lock down further
+ // transitions until we can dropg.
+ casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
+ dropg()
+ casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
+
+ schedule()
+}
+
// Finishes execution of the current goroutine.
func goexit1() {
if raceenabled {
@@ -2861,6 +2774,7 @@ func goexit0(gp *g) {
locked := gp.lockedm != 0
gp.lockedm = 0
_g_.m.lockedg = 0
+ gp.preemptStop = false
gp.paniconfault = false
gp._defer = nil // should be true already but just in case.
gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
@@ -2879,9 +2793,6 @@ func goexit0(gp *g) {
gp.gcAssistBytes = 0
}
- // Note that gp's stack scan is now "valid" because it has no
- // stack.
- gp.gcscanvalid = true
dropg()
if GOARCH == "wasm" { // no threads yet on wasm
@@ -3526,7 +3437,6 @@ func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerp
if isSystemGoroutine(newg, false) {
atomic.Xadd(&sched.ngsys, +1)
}
- newg.gcscanvalid = false
casgstatus(newg, _Gdead, _Grunnable)
if _p_.goidcache == _p_.goidcacheend {
@@ -4436,7 +4346,8 @@ func checkdead() {
}
s := readgstatus(gp)
switch s &^ _Gscan {
- case _Gwaiting:
+ case _Gwaiting,
+ _Gpreempted:
grunning++
case _Grunnable,
_Grunning,
diff --git a/src/runtime/proc_test.go b/src/runtime/proc_test.go
index 3a1bf91fa5..9b80ce31e5 100644
--- a/src/runtime/proc_test.go
+++ b/src/runtime/proc_test.go
@@ -992,7 +992,7 @@ func TestNetpollBreak(t *testing.T) {
}
// Make sure that netpoll is initialized.
- time.Sleep(1)
+ runtime.NetpollGenericInit()
start := time.Now()
c := make(chan bool, 2)
diff --git a/src/runtime/rt0_freebsd_arm64.s b/src/runtime/rt0_freebsd_arm64.s
new file mode 100644
index 0000000000..3a348c33e2
--- /dev/null
+++ b/src/runtime/rt0_freebsd_arm64.s
@@ -0,0 +1,106 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// On FreeBSD argc/argv are passed in R0, not RSP
+TEXT _rt0_arm64_freebsd(SB),NOSPLIT|NOFRAME,$0
+ ADD $8, R0, R1 // argv
+ MOVD 0(R0), R0 // argc
+ BL main(SB)
+
+// When building with -buildmode=c-shared, this symbol is called when the shared
+// library is loaded.
+TEXT _rt0_arm64_freebsd_lib(SB),NOSPLIT,$184
+ // Preserve callee-save registers.
+ MOVD R19, 24(RSP)
+ MOVD R20, 32(RSP)
+ MOVD R21, 40(RSP)
+ MOVD R22, 48(RSP)
+ MOVD R23, 56(RSP)
+ MOVD R24, 64(RSP)
+ MOVD R25, 72(RSP)
+ MOVD R26, 80(RSP)
+ MOVD R27, 88(RSP)
+ FMOVD F8, 96(RSP)
+ FMOVD F9, 104(RSP)
+ FMOVD F10, 112(RSP)
+ FMOVD F11, 120(RSP)
+ FMOVD F12, 128(RSP)
+ FMOVD F13, 136(RSP)
+ FMOVD F14, 144(RSP)
+ FMOVD F15, 152(RSP)
+ MOVD g, 160(RSP)
+
+ // Initialize g as null in case of using g later e.g. sigaction in cgo_sigaction.go
+ MOVD ZR, g
+
+ MOVD R0, _rt0_arm64_freebsd_lib_argc<>(SB)
+ MOVD R1, _rt0_arm64_freebsd_lib_argv<>(SB)
+
+ // Synchronous initialization.
+ MOVD $runtime·libpreinit(SB), R4
+ BL (R4)
+
+ // Create a new thread to do the runtime initialization and return.
+ MOVD _cgo_sys_thread_create(SB), R4
+ CMP $0, R4
+ BEQ nocgo
+ MOVD $_rt0_arm64_freebsd_lib_go(SB), R0
+ MOVD $0, R1
+ SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved.
+ BL (R4)
+ ADD $16, RSP
+ B restore
+
+nocgo:
+ MOVD $0x800000, R0 // stacksize = 8192KB
+ MOVD $_rt0_arm64_freebsd_lib_go(SB), R1
+ MOVD R0, 8(RSP)
+ MOVD R1, 16(RSP)
+ MOVD $runtime·newosproc0(SB),R4
+ BL (R4)
+
+restore:
+ // Restore callee-save registers.
+ MOVD 24(RSP), R19
+ MOVD 32(RSP), R20
+ MOVD 40(RSP), R21
+ MOVD 48(RSP), R22
+ MOVD 56(RSP), R23
+ MOVD 64(RSP), R24
+ MOVD 72(RSP), R25
+ MOVD 80(RSP), R26
+ MOVD 88(RSP), R27
+ FMOVD 96(RSP), F8
+ FMOVD 104(RSP), F9
+ FMOVD 112(RSP), F10
+ FMOVD 120(RSP), F11
+ FMOVD 128(RSP), F12
+ FMOVD 136(RSP), F13
+ FMOVD 144(RSP), F14
+ FMOVD 152(RSP), F15
+ MOVD 160(RSP), g
+ RET
+
+TEXT _rt0_arm64_freebsd_lib_go(SB),NOSPLIT,$0
+ MOVD _rt0_arm64_freebsd_lib_argc<>(SB), R0
+ MOVD _rt0_arm64_freebsd_lib_argv<>(SB), R1
+ MOVD $runtime·rt0_go(SB),R4
+ B (R4)
+
+DATA _rt0_arm64_freebsd_lib_argc<>(SB)/8, $0
+GLOBL _rt0_arm64_freebsd_lib_argc<>(SB),NOPTR, $8
+DATA _rt0_arm64_freebsd_lib_argv<>(SB)/8, $0
+GLOBL _rt0_arm64_freebsd_lib_argv<>(SB),NOPTR, $8
+
+
+TEXT main(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·rt0_go(SB), R2
+ BL (R2)
+exit:
+ MOVD $0, R0
+ MOVD $1, R8 // SYS_exit
+ SVC
+ B exit
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index c5023027be..c319196557 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -78,6 +78,13 @@ const (
// stack is owned by the goroutine that put it in _Gcopystack.
_Gcopystack // 8
+ // _Gpreempted means this goroutine stopped itself for a
+ // suspendG preemption. It is like _Gwaiting, but nothing is
+ // yet responsible for ready()ing it. Some suspendG must CAS
+ // the status to _Gwaiting to take responsibility for
+ // ready()ing this G.
+ _Gpreempted // 9
+
// _Gscan combined with one of the above states other than
// _Grunning indicates that GC is scanning the stack. The
// goroutine is not executing user code and the stack is owned
@@ -89,11 +96,12 @@ const (
//
// atomicstatus&~Gscan gives the state the goroutine will
// return to when the scan completes.
- _Gscan = 0x1000
- _Gscanrunnable = _Gscan + _Grunnable // 0x1001
- _Gscanrunning = _Gscan + _Grunning // 0x1002
- _Gscansyscall = _Gscan + _Gsyscall // 0x1003
- _Gscanwaiting = _Gscan + _Gwaiting // 0x1004
+ _Gscan = 0x1000
+ _Gscanrunnable = _Gscan + _Grunnable // 0x1001
+ _Gscanrunning = _Gscan + _Grunning // 0x1002
+ _Gscansyscall = _Gscan + _Gsyscall // 0x1003
+ _Gscanwaiting = _Gscan + _Gwaiting // 0x1004
+ _Gscanpreempted = _Gscan + _Gpreempted // 0x1009
)
const (
@@ -396,31 +404,39 @@ type g struct {
stackguard0 uintptr // offset known to liblink
stackguard1 uintptr // offset known to liblink
- _panic *_panic // innermost panic - offset known to liblink
- _defer *_defer // innermost defer
- m *m // current m; offset known to arm liblink
- sched gobuf
- syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
- syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
- stktopsp uintptr // expected sp at top of stack, to check in traceback
- param unsafe.Pointer // passed parameter on wakeup
- atomicstatus uint32
- stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
- goid int64
- schedlink guintptr
- waitsince int64 // approx time when the g become blocked
- waitreason waitReason // if status==Gwaiting
- preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
- paniconfault bool // panic (instead of crash) on unexpected fault address
- preemptscan bool // preempted g does scan for gc
- gcscandone bool // g has scanned stack; protected by _Gscan bit in status
- gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan; TODO: remove?
- throwsplit bool // must not split stack
- raceignore int8 // ignore race detection events
- sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
- sysexitticks int64 // cputicks when syscall has returned (for tracing)
- traceseq uint64 // trace event sequencer
- tracelastp puintptr // last P emitted an event for this goroutine
+ _panic *_panic // innermost panic - offset known to liblink
+ _defer *_defer // innermost defer
+ m *m // current m; offset known to arm liblink
+ sched gobuf
+ syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc
+ syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc
+ stktopsp uintptr // expected sp at top of stack, to check in traceback
+ param unsafe.Pointer // passed parameter on wakeup
+ atomicstatus uint32
+ stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
+ goid int64
+ schedlink guintptr
+ waitsince int64 // approx time when the g become blocked
+ waitreason waitReason // if status==Gwaiting
+
+ preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
+ preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule
+ preemptShrink bool // shrink stack at synchronous safe point
+
+ paniconfault bool // panic (instead of crash) on unexpected fault address
+ gcscandone bool // g has scanned stack; protected by _Gscan bit in status
+ throwsplit bool // must not split stack
+ // activeStackChans indicates that there are unlocked channels
+ // pointing into this goroutine's stack. If true, stack
+ // copying needs to acquire channel locks to protect these
+ // areas of the stack.
+ activeStackChans bool
+
+ raceignore int8 // ignore race detection events
+ sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine
+ sysexitticks int64 // cputicks when syscall has returned (for tracing)
+ traceseq uint64 // trace event sequencer
+ tracelastp puintptr // last P emitted an event for this goroutine
lockedm muintptr
sig uint32
writebuf []byte
@@ -808,10 +824,10 @@ type _defer struct {
// defers. We have only one defer record for the entire frame (which may
// currently have 0, 1, or more defers active).
openDefer bool
- sp uintptr // sp at time of defer
- pc uintptr // pc at time of defer
- fn *funcval
- _panic *_panic // panic that is running defer
+ sp uintptr // sp at time of defer
+ pc uintptr // pc at time of defer
+ fn *funcval // can be nil for open-coded defers
+ _panic *_panic // panic that is running defer
link *_defer
// If openDefer is true, the fields below record values about the stack
@@ -906,6 +922,7 @@ const (
waitReasonTraceReaderBlocked // "trace reader (blocked)"
waitReasonWaitForGCCycle // "wait for GC cycle"
waitReasonGCWorkerIdle // "GC worker (idle)"
+ waitReasonPreempted // "preempted"
)
var waitReasonStrings = [...]string{
@@ -934,6 +951,7 @@ var waitReasonStrings = [...]string{
waitReasonTraceReaderBlocked: "trace reader (blocked)",
waitReasonWaitForGCCycle: "wait for GC cycle",
waitReasonGCWorkerIdle: "GC worker (idle)",
+ waitReasonPreempted: "preempted",
}
func (w waitReason) String() string {
diff --git a/src/runtime/select.go b/src/runtime/select.go
index d2c5a03a1a..8033b6512f 100644
--- a/src/runtime/select.go
+++ b/src/runtime/select.go
@@ -75,6 +75,9 @@ func selunlock(scases []scase, lockorder []uint16) {
}
func selparkcommit(gp *g, _ unsafe.Pointer) bool {
+ // There are unlocked sudogs that point into gp's stack. Stack
+ // copying must lock the channels of those sudogs.
+ gp.activeStackChans = true
// This must not access gp's stack (see gopark). In
// particular, it must not access the *hselect. That's okay,
// because by the time this is called, gp.waiting has all
@@ -311,6 +314,7 @@ loop:
// wait for someone to wake us up
gp.param = nil
gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
+ gp.activeStackChans = false
sellock(scases, lockorder)
diff --git a/src/runtime/signal_arm64.go b/src/runtime/signal_arm64.go
index 7a3b1ccbb8..e1fe62d99d 100644
--- a/src/runtime/signal_arm64.go
+++ b/src/runtime/signal_arm64.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin linux netbsd openbsd
+// +build darwin freebsd linux netbsd openbsd
package runtime
diff --git a/src/runtime/signal_freebsd_arm64.go b/src/runtime/signal_freebsd_arm64.go
new file mode 100644
index 0000000000..159e965a7d
--- /dev/null
+++ b/src/runtime/signal_freebsd_arm64.go
@@ -0,0 +1,66 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) regs() *mcontext { return &(*ucontext)(c.ctxt).uc_mcontext }
+
+func (c *sigctxt) r0() uint64 { return c.regs().mc_gpregs.gp_x[0] }
+func (c *sigctxt) r1() uint64 { return c.regs().mc_gpregs.gp_x[1] }
+func (c *sigctxt) r2() uint64 { return c.regs().mc_gpregs.gp_x[2] }
+func (c *sigctxt) r3() uint64 { return c.regs().mc_gpregs.gp_x[3] }
+func (c *sigctxt) r4() uint64 { return c.regs().mc_gpregs.gp_x[4] }
+func (c *sigctxt) r5() uint64 { return c.regs().mc_gpregs.gp_x[5] }
+func (c *sigctxt) r6() uint64 { return c.regs().mc_gpregs.gp_x[6] }
+func (c *sigctxt) r7() uint64 { return c.regs().mc_gpregs.gp_x[7] }
+func (c *sigctxt) r8() uint64 { return c.regs().mc_gpregs.gp_x[8] }
+func (c *sigctxt) r9() uint64 { return c.regs().mc_gpregs.gp_x[9] }
+func (c *sigctxt) r10() uint64 { return c.regs().mc_gpregs.gp_x[10] }
+func (c *sigctxt) r11() uint64 { return c.regs().mc_gpregs.gp_x[11] }
+func (c *sigctxt) r12() uint64 { return c.regs().mc_gpregs.gp_x[12] }
+func (c *sigctxt) r13() uint64 { return c.regs().mc_gpregs.gp_x[13] }
+func (c *sigctxt) r14() uint64 { return c.regs().mc_gpregs.gp_x[14] }
+func (c *sigctxt) r15() uint64 { return c.regs().mc_gpregs.gp_x[15] }
+func (c *sigctxt) r16() uint64 { return c.regs().mc_gpregs.gp_x[16] }
+func (c *sigctxt) r17() uint64 { return c.regs().mc_gpregs.gp_x[17] }
+func (c *sigctxt) r18() uint64 { return c.regs().mc_gpregs.gp_x[18] }
+func (c *sigctxt) r19() uint64 { return c.regs().mc_gpregs.gp_x[19] }
+func (c *sigctxt) r20() uint64 { return c.regs().mc_gpregs.gp_x[20] }
+func (c *sigctxt) r21() uint64 { return c.regs().mc_gpregs.gp_x[21] }
+func (c *sigctxt) r22() uint64 { return c.regs().mc_gpregs.gp_x[22] }
+func (c *sigctxt) r23() uint64 { return c.regs().mc_gpregs.gp_x[23] }
+func (c *sigctxt) r24() uint64 { return c.regs().mc_gpregs.gp_x[24] }
+func (c *sigctxt) r25() uint64 { return c.regs().mc_gpregs.gp_x[25] }
+func (c *sigctxt) r26() uint64 { return c.regs().mc_gpregs.gp_x[26] }
+func (c *sigctxt) r27() uint64 { return c.regs().mc_gpregs.gp_x[27] }
+func (c *sigctxt) r28() uint64 { return c.regs().mc_gpregs.gp_x[28] }
+func (c *sigctxt) r29() uint64 { return c.regs().mc_gpregs.gp_x[29] }
+func (c *sigctxt) lr() uint64 { return c.regs().mc_gpregs.gp_lr }
+func (c *sigctxt) sp() uint64 { return c.regs().mc_gpregs.gp_sp }
+
+//go:nosplit
+//go:nowritebarrierrec
+func (c *sigctxt) pc() uint64 { return c.regs().mc_gpregs.gp_elr }
+
+func (c *sigctxt) fault() uint64 { return c.info.si_addr }
+
+func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
+
+func (c *sigctxt) set_pc(x uint64) { c.regs().mc_gpregs.gp_elr = x }
+func (c *sigctxt) set_sp(x uint64) { c.regs().mc_gpregs.gp_sp = x }
+func (c *sigctxt) set_lr(x uint64) { c.regs().mc_gpregs.gp_lr = x }
+func (c *sigctxt) set_r28(x uint64) { c.regs().mc_gpregs.gp_x[28] = x }
+
+func (c *sigctxt) set_sigcode(x uint64) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) { c.info.si_addr = x }
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index cea65282e0..e0757acbed 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -305,7 +305,7 @@ func sigFetchG(c *sigctxt) *g {
// work.
sp := getcallersp()
s := spanOf(sp)
- if s != nil && s.state == mSpanManual && s.base() < sp && sp < s.limit {
+ if s != nil && s.state.get() == mSpanManual && s.base() < sp && sp < s.limit {
gp := *(**g)(unsafe.Pointer(s.base()))
return gp
}
@@ -412,10 +412,11 @@ func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool {
// GOTRACEBACK=crash when a signal is received.
var crashing int32
-// testSigtrap is used by the runtime tests. If non-nil, it is called
-// on SIGTRAP. If it returns true, the normal behavior on SIGTRAP is
-// suppressed.
+// testSigtrap and testSigusr1 are used by the runtime tests. If
+// non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the
+// normal behavior on this signal is suppressed.
var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
+var testSigusr1 func(gp *g) bool
// sighandler is invoked when a signal occurs. The global g will be
// set to a gsignal goroutine and we will be running on the alternate
@@ -441,6 +442,10 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
return
}
+ if sig == _SIGUSR1 && testSigusr1 != nil && testSigusr1(gp) {
+ return
+ }
+
flags := int32(_SigThrow)
if sig < uint32(len(sigtable)) {
flags = sigtable[sig].flags
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 93f9769899..b87aa0d61b 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -219,7 +219,7 @@ func stackpoolalloc(order uint8) gclinkptr {
// Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
func stackpoolfree(x gclinkptr, order uint8) {
s := spanOfUnchecked(uintptr(x))
- if s.state != mSpanManual {
+ if s.state.get() != mSpanManual {
throw("freeing stack not in a stack span")
}
if s.manualFreeList.ptr() == nil {
@@ -467,7 +467,7 @@ func stackfree(stk stack) {
}
} else {
s := spanOfUnchecked(uintptr(v))
- if s.state != mSpanManual {
+ if s.state.get() != mSpanManual {
println(hex(s.base()), v)
throw("bad span state")
}
@@ -786,10 +786,6 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
}
// Lock channels to prevent concurrent send/receive.
- // It's important that we *only* do this for async
- // copystack; otherwise, gp may be in the middle of
- // putting itself on wait queues and this would
- // self-deadlock.
var lastc *hchan
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if sg.c != lastc {
@@ -826,12 +822,7 @@ func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
// Copies gp's stack to a new stack of a different size.
// Caller must have changed gp status to Gcopystack.
-//
-// If sync is true, this is a self-triggered stack growth and, in
-// particular, no other G may be writing to gp's stack (e.g., via a
-// channel operation). If sync is false, copystack protects against
-// concurrent channel operations.
-func copystack(gp *g, newsize uintptr, sync bool) {
+func copystack(gp *g, newsize uintptr) {
if gp.syscallsp != 0 {
throw("stack growth not allowed in system call")
}
@@ -857,15 +848,16 @@ func copystack(gp *g, newsize uintptr, sync bool) {
// Adjust sudogs, synchronizing with channel ops if necessary.
ncopy := used
- if sync {
+ if !gp.activeStackChans {
adjustsudogs(gp, &adjinfo)
} else {
- // sudogs can point in to the stack. During concurrent
- // shrinking, these areas may be written to. Find the
- // highest such pointer so we can handle everything
- // there and below carefully. (This shouldn't be far
- // from the bottom of the stack, so there's little
- // cost in handling everything below it carefully.)
+ // sudogs may be pointing in to the stack and gp has
+ // released channel locks, so other goroutines could
+ // be writing to gp's stack. Find the highest such
+ // pointer so we can handle everything there and below
+ // carefully. (This shouldn't be far from the bottom
+ // of the stack, so there's little cost in handling
+ // everything below it carefully.)
adjinfo.sghi = findsghi(gp, old)
// Synchronize with channel ops and copy the part of
@@ -916,7 +908,7 @@ func round2(x int32) int32 {
// Stack growth is multiplicative, for constant amortized cost.
//
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
-// If the GC is trying to stop this g then it will set preemptscan to true.
+// If the scheduler is trying to stop this g, then it will set preemptStop.
//
// This must be nowritebarrierrec because it can be called as part of
// stack growth from other nowritebarrierrec functions, but the
@@ -983,7 +975,7 @@ func newstack() {
// it needs a lock held by the goroutine), that small preemption turns
// into a real deadlock.
if preempt {
- if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
+ if !canPreemptM(thisg.m) {
// Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time.
gp.stackguard0 = gp.stack.lo + _StackGuard
@@ -1017,34 +1009,19 @@ func newstack() {
if thisg.m.p == 0 && thisg.m.locks == 0 {
throw("runtime: g is running but p is not")
}
- // Synchronize with scang.
- casgstatus(gp, _Grunning, _Gwaiting)
- if gp.preemptscan {
- for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
- // Likely to be racing with the GC as
- // it sees a _Gwaiting and does the
- // stack scan. If so, gcworkdone will
- // be set and gcphasework will simply
- // return.
- }
- if !gp.gcscandone {
- // gcw is safe because we're on the
- // system stack.
- gcw := &gp.m.p.ptr().gcw
- scanstack(gp, gcw)
- gp.gcscandone = true
- }
- gp.preemptscan = false
- gp.preempt = false
- casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
- // This clears gcscanvalid.
- casgstatus(gp, _Gwaiting, _Grunning)
- gp.stackguard0 = gp.stack.lo + _StackGuard
- gogo(&gp.sched) // never return
+
+ if gp.preemptShrink {
+ // We're at a synchronous safe point now, so
+ // do the pending stack shrink.
+ gp.preemptShrink = false
+ shrinkstack(gp)
+ }
+
+ if gp.preemptStop {
+ preemptPark(gp) // never returns
}
// Act like goroutine called runtime.Gosched.
- casgstatus(gp, _Gwaiting, _Grunning)
gopreempt_m(gp) // never return
}
@@ -1062,7 +1039,7 @@ func newstack() {
// The concurrent GC will not scan the stack while we are doing the copy since
// the gp is in a Gcopystack status.
- copystack(gp, newsize, true)
+ copystack(gp, newsize)
if stackDebug >= 1 {
print("stack grow done\n")
}
@@ -1087,16 +1064,36 @@ func gostartcallfn(gobuf *gobuf, fv *funcval) {
gostartcall(gobuf, fn, unsafe.Pointer(fv))
}
+// isShrinkStackSafe returns whether it's safe to attempt to shrink
+// gp's stack. Shrinking the stack is only safe when we have precise
+// pointer maps for all frames on the stack.
+func isShrinkStackSafe(gp *g) bool {
+ // We can't copy the stack if we're in a syscall.
+ // The syscall might have pointers into the stack and
+ // often we don't have precise pointer maps for the innermost
+ // frames.
+ return gp.syscallsp == 0
+}
+
// Maybe shrink the stack being used by gp.
-// Called at garbage collection time.
-// gp must be stopped, but the world need not be.
+//
+// gp must be stopped and we must own its stack. It may be in
+// _Grunning, but only if this is our own user G.
func shrinkstack(gp *g) {
- gstatus := readgstatus(gp)
if gp.stack.lo == 0 {
throw("missing stack in shrinkstack")
}
- if gstatus&_Gscan == 0 {
- throw("bad status in shrinkstack")
+ if s := readgstatus(gp); s&_Gscan == 0 {
+ // We don't own the stack via _Gscan. We could still
+ // own it if this is our own user G and we're on the
+ // system stack.
+ if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
+ // We don't own the stack.
+ throw("bad status in shrinkstack")
+ }
+ }
+ if !isShrinkStackSafe(gp) {
+ throw("shrinkstack at bad time")
}
// Check for self-shrinks while in a libcall. These may have
// pointers into the stack disguised as uintptrs, but these
@@ -1132,17 +1129,11 @@ func shrinkstack(gp *g) {
return
}
- // We can't copy the stack if we're in a syscall.
- // The syscall might have pointers into the stack.
- if gp.syscallsp != 0 {
- return
- }
-
if stackDebug > 0 {
print("shrinking stack ", oldsize, "->", newsize, "\n")
}
- copystack(gp, newsize, false)
+ copystack(gp, newsize)
}
// freeStackSpans frees unused stack spans at the end of GC.
diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go
index 46825d5937..31304ce737 100644
--- a/src/runtime/sys_darwin.go
+++ b/src/runtime/sys_darwin.go
@@ -162,6 +162,14 @@ func pthread_self() (t pthread) {
}
func pthread_self_trampoline()
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_kill(t pthread, sig uint32) {
+ libcCall(unsafe.Pointer(funcPC(pthread_kill_trampoline)), unsafe.Pointer(&t))
+ return
+}
+func pthread_kill_trampoline()
+
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
args := struct {
addr unsafe.Pointer
@@ -415,6 +423,8 @@ func setNonblock(fd int32) {
//go:cgo_import_dynamic libc_pthread_attr_getstacksize pthread_attr_getstacksize "/usr/lib/libSystem.B.dylib"
//go:cgo_import_dynamic libc_pthread_attr_setdetachstate pthread_attr_setdetachstate "/usr/lib/libSystem.B.dylib"
//go:cgo_import_dynamic libc_pthread_create pthread_create "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_self pthread_self "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_pthread_kill pthread_kill "/usr/lib/libSystem.B.dylib"
//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib"
//go:cgo_import_dynamic libc_raise raise "/usr/lib/libSystem.B.dylib"
diff --git a/src/runtime/sys_darwin_386.s b/src/runtime/sys_darwin_386.s
index bea804b8dd..15b7cfb213 100644
--- a/src/runtime/sys_darwin_386.s
+++ b/src/runtime/sys_darwin_386.s
@@ -653,6 +653,31 @@ TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0
POPL BP
RET
+TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0
+ PUSHL BP
+ MOVL SP, BP
+ NOP SP // hide SP from vet
+ CALL libc_pthread_self(SB)
+ MOVL 8(SP), CX
+ MOVL AX, 0(CX) // return value
+ MOVL BP, SP
+ POPL BP
+ RET
+
+TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0
+ PUSHL BP
+ MOVL SP, BP
+ SUBL $8, SP
+ MOVL 16(SP), CX
+ MOVL 0(CX), AX // arg 1 thread
+ MOVL AX, 0(SP)
+ MOVL 4(CX), AX // arg 2 sig
+ MOVL AX, 4(SP)
+ CALL libc_pthread_kill(SB)
+ MOVL BP, SP
+ POPL BP
+ RET
+
// syscall calls a function in libc on behalf of the syscall package.
// syscall takes a pointer to a struct like:
// struct {
diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s
index ea8cf1abb1..a45ea42e5d 100644
--- a/src/runtime/sys_darwin_amd64.s
+++ b/src/runtime/sys_darwin_amd64.s
@@ -566,6 +566,24 @@ TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
+TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ DI, BX // BX is caller-save
+ CALL libc_pthread_self(SB)
+ MOVQ AX, 0(BX) // return value
+ POPQ BP
+ RET
+
+TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 sig
+ MOVQ 0(DI), DI // arg 1 thread
+ CALL libc_pthread_kill(SB)
+ POPQ BP
+ RET
+
// syscall calls a function in libc on behalf of the syscall package.
// syscall takes a pointer to a struct like:
// struct {
diff --git a/src/runtime/sys_darwin_arm.s b/src/runtime/sys_darwin_arm.s
index 84b0b0f5f4..c08a29e7e0 100644
--- a/src/runtime/sys_darwin_arm.s
+++ b/src/runtime/sys_darwin_arm.s
@@ -182,14 +182,8 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-16
TEXT runtime·sigtramp(SB),NOSPLIT,$0
// Reserve space for callee-save registers and arguments.
- SUB $40, R13
-
- MOVW R4, 16(R13)
- MOVW R5, 20(R13)
- MOVW R6, 24(R13)
- MOVW R7, 28(R13)
- MOVW R8, 32(R13)
- MOVW R11, 36(R13)
+ MOVM.DB.W [R4-R11], (R13)
+ SUB $16, R13
// Save arguments.
MOVW R0, 4(R13) // sig
@@ -238,14 +232,8 @@ nog:
MOVW R5, R13
// Restore callee-save registers.
- MOVW 16(R13), R4
- MOVW 20(R13), R5
- MOVW 24(R13), R6
- MOVW 28(R13), R7
- MOVW 32(R13), R8
- MOVW 36(R13), R11
-
- ADD $40, R13
+ ADD $16, R13
+ MOVM.IA.W (R13), [R4-R11]
RET
@@ -405,6 +393,18 @@ TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0
BL libc_pthread_cond_signal(SB)
RET
+TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0
+ MOVW R0, R4 // R4 is callee-save
+ BL libc_pthread_self(SB)
+ MOVW R0, 0(R4) // return value
+ RET
+
+TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0
+ MOVW 4(R0), R1 // arg 2 sig
+ MOVW 0(R0), R0 // arg 1 thread
+ BL libc_pthread_kill(SB)
+ RET
+
// syscall calls a function in libc on behalf of the syscall package.
// syscall takes a pointer to a struct like:
// struct {
diff --git a/src/runtime/sys_darwin_arm64.s b/src/runtime/sys_darwin_arm64.s
index 8d39a0727f..585d4f2c64 100644
--- a/src/runtime/sys_darwin_arm64.s
+++ b/src/runtime/sys_darwin_arm64.s
@@ -471,6 +471,18 @@ TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0
BL libc_pthread_cond_signal(SB)
RET
+TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0
+ MOVD R0, R19 // R19 is callee-save
+ BL libc_pthread_self(SB)
+ MOVD R0, 0(R19) // return value
+ RET
+
+TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 sig
+ MOVD 0(R0), R0 // arg 1 thread
+ BL libc_pthread_kill(SB)
+ RET
+
// syscall calls a function in libc on behalf of the syscall package.
// syscall takes a pointer to a struct like:
// struct {
diff --git a/src/runtime/sys_dragonfly_amd64.s b/src/runtime/sys_dragonfly_amd64.s
index 68962d9e30..580633af55 100644
--- a/src/runtime/sys_dragonfly_amd64.s
+++ b/src/runtime/sys_dragonfly_amd64.s
@@ -134,12 +134,16 @@ TEXT runtime·write1(SB),NOSPLIT,$-8
MOVL AX, ret+24(FP)
RET
-TEXT runtime·raise(SB),NOSPLIT,$16
+TEXT runtime·lwp_gettid(SB),NOSPLIT,$0-4
MOVL $496, AX // lwp_gettid
SYSCALL
- MOVQ $-1, DI // arg 1 - pid
- MOVQ AX, SI // arg 2 - tid
- MOVL sig+0(FP), DX // arg 3 - signum
+ MOVL AX, ret+0(FP)
+ RET
+
+TEXT runtime·lwp_kill(SB),NOSPLIT,$0-16
+ MOVL pid+0(FP), DI // arg 1 - pid
+ MOVL tid+4(FP), SI // arg 2 - tid
+ MOVQ sig+8(FP), DX // arg 3 - signum
MOVL $497, AX // lwp_kill
SYSCALL
RET
diff --git a/src/runtime/sys_freebsd_386.s b/src/runtime/sys_freebsd_386.s
index 48f64b9f8b..c346e719e1 100644
--- a/src/runtime/sys_freebsd_386.s
+++ b/src/runtime/sys_freebsd_386.s
@@ -131,17 +131,16 @@ TEXT runtime·write1(SB),NOSPLIT,$-4
MOVL AX, ret+12(FP)
RET
-TEXT runtime·raise(SB),NOSPLIT,$16
- // thr_self(&8(SP))
- LEAL 8(SP), AX
+TEXT runtime·thr_self(SB),NOSPLIT,$8-4
+ // thr_self(&0(FP))
+ LEAL ret+0(FP), AX
MOVL AX, 4(SP)
MOVL $432, AX
INT $0x80
- // thr_kill(self, SIGPIPE)
- MOVL 8(SP), AX
- MOVL AX, 4(SP)
- MOVL sig+0(FP), AX
- MOVL AX, 8(SP)
+ RET
+
+TEXT runtime·thr_kill(SB),NOSPLIT,$-4
+ // thr_kill(tid, sig)
MOVL $433, AX
INT $0x80
RET
diff --git a/src/runtime/sys_freebsd_amd64.s b/src/runtime/sys_freebsd_amd64.s
index d24ab1f643..010b2ec4d4 100644
--- a/src/runtime/sys_freebsd_amd64.s
+++ b/src/runtime/sys_freebsd_amd64.s
@@ -132,14 +132,17 @@ TEXT runtime·write1(SB),NOSPLIT,$-8
MOVL AX, ret+24(FP)
RET
-TEXT runtime·raise(SB),NOSPLIT,$16
- // thr_self(&8(SP))
- LEAQ 8(SP), DI // arg 1 &8(SP)
+TEXT runtime·thr_self(SB),NOSPLIT,$0-8
+ // thr_self(&0(FP))
+ LEAQ ret+0(FP), DI // arg 1
MOVL $432, AX
SYSCALL
- // thr_kill(self, SIGPIPE)
- MOVQ 8(SP), DI // arg 1 id
- MOVL sig+0(FP), SI // arg 2
+ RET
+
+TEXT runtime·thr_kill(SB),NOSPLIT,$0-16
+ // thr_kill(tid, sig)
+ MOVQ tid+0(FP), DI // arg 1 id
+ MOVQ sig+8(FP), SI // arg 2 sig
MOVL $433, AX
SYSCALL
RET
diff --git a/src/runtime/sys_freebsd_arm.s b/src/runtime/sys_freebsd_arm.s
index 8da36dff17..1e12f9cfcb 100644
--- a/src/runtime/sys_freebsd_arm.s
+++ b/src/runtime/sys_freebsd_arm.s
@@ -165,14 +165,17 @@ TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0
MOVW R0, ret+4(FP)
RET
-TEXT runtime·raise(SB),NOSPLIT,$8
- // thr_self(&4(R13))
- MOVW $4(R13), R0 // arg 1 &4(R13)
+TEXT runtime·thr_self(SB),NOSPLIT,$0-4
+ // thr_self(&0(FP))
+ MOVW $ret+0(FP), R0 // arg 1
MOVW $SYS_thr_self, R7
SWI $0
- // thr_kill(self, SIGPIPE)
- MOVW 4(R13), R0 // arg 1 id
- MOVW sig+0(FP), R1 // arg 2 - signal
+ RET
+
+TEXT runtime·thr_kill(SB),NOSPLIT,$0-8
+ // thr_kill(tid, sig)
+ MOVW tid+0(FP), R0 // arg 1 id
+ MOVW sig+4(FP), R1 // arg 2 signal
MOVW $SYS_thr_kill, R7
SWI $0
RET
@@ -243,7 +246,11 @@ TEXT runtime·asmSigaction(SB),NOSPLIT|NOFRAME,$0
MOVW R0, ret+12(FP)
RET
-TEXT runtime·sigtramp(SB),NOSPLIT,$12
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
+ // Reserve space for callee-save registers and arguments.
+ MOVM.DB.W [R4-R11], (R13)
+ SUB $16, R13
+
// this might be called in external code context,
// where g is not set.
// first save R0, because runtime·load_g will clobber it
@@ -255,6 +262,11 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$12
MOVW R1, 8(R13)
MOVW R2, 12(R13)
BL runtime·sigtrampgo(SB)
+
+ // Restore callee-save registers.
+ ADD $16, R13
+ MOVM.IA.W (R13), [R4-R11]
+
RET
TEXT runtime·mmap(SB),NOSPLIT,$16
diff --git a/src/runtime/sys_freebsd_arm64.s b/src/runtime/sys_freebsd_arm64.s
new file mode 100644
index 0000000000..e0ef2f679d
--- /dev/null
+++ b/src/runtime/sys_freebsd_arm64.s
@@ -0,0 +1,543 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//
+// System calls and other sys.stuff for arm64, FreeBSD
+// /usr/src/sys/kern/syscalls.master for syscall numbers.
+//
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "textflag.h"
+
+#define CLOCK_REALTIME 0
+#define CLOCK_MONOTONIC 4
+#define FD_CLOEXEC 1
+#define F_SETFD 2
+#define F_GETFL 3
+#define F_SETFL 4
+#define O_NONBLOCK 4
+
+#define SYS_exit 1
+#define SYS_read 3
+#define SYS_write 4
+#define SYS_open 5
+#define SYS_close 6
+#define SYS_getpid 20
+#define SYS_kill 37
+#define SYS_sigaltstack 53
+#define SYS_munmap 73
+#define SYS_madvise 75
+#define SYS_setitimer 83
+#define SYS_fcntl 92
+#define SYS___sysctl 202
+#define SYS_nanosleep 240
+#define SYS_clock_gettime 232
+#define SYS_sched_yield 331
+#define SYS_sigprocmask 340
+#define SYS_kqueue 362
+#define SYS_kevent 363
+#define SYS_sigaction 416
+#define SYS_thr_exit 431
+#define SYS_thr_self 432
+#define SYS_thr_kill 433
+#define SYS__umtx_op 454
+#define SYS_thr_new 455
+#define SYS_mmap 477
+#define SYS_cpuset_getaffinity 487
+#define SYS_pipe2 542
+
+TEXT emptyfunc<>(SB),0,$0-0
+ RET
+
+// func sys_umtx_op(addr *uint32, mode int32, val uint32, uaddr1 uintptr, ut *umtx_time) int32
+TEXT runtime·sys_umtx_op(SB),NOSPLIT,$0
+ MOVD addr+0(FP), R0
+ MOVW mode+8(FP), R1
+ MOVW val+12(FP), R2
+ MOVD uaddr1+16(FP), R3
+ MOVD ut+24(FP), R4
+ MOVD $SYS__umtx_op, R8
+ SVC
+ MOVW R0, ret+32(FP)
+ RET
+
+// func thr_new(param *thrparam, size int32) int32
+TEXT runtime·thr_new(SB),NOSPLIT,$0
+ MOVD param+0(FP), R0
+ MOVW size+8(FP), R1
+ MOVD $SYS_thr_new, R8
+ SVC
+ MOVW R0, ret+16(FP)
+ RET
+
+// func thr_start()
+TEXT runtime·thr_start(SB),NOSPLIT,$0
+ // set up g
+ MOVD m_g0(R0), g
+ MOVD R0, g_m(g)
+ BL emptyfunc<>(SB) // fault if stack check is wrong
+ BL runtime·mstart(SB)
+
+ MOVD $2, R8 // crash (not reached)
+ MOVD R8, (R8)
+ RET
+
+// func exit(code int32)
+TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
+ MOVW code+0(FP), R0
+ MOVD $SYS_exit, R8
+ SVC
+ MOVD $0, R0
+ MOVD R0, (R0)
+
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
+ MOVD wait+0(FP), R0
+ // We're done using the stack.
+ MOVW $0, R1
+ STLRW R1, (R0)
+ MOVW $0, R0
+ MOVD $SYS_thr_exit, R8
+ SVC
+ JMP 0(PC)
+
+// func open(name *byte, mode, perm int32) int32
+TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20
+ MOVD name+0(FP), R0
+ MOVW mode+8(FP), R1
+ MOVW perm+12(FP), R2
+ MOVD $SYS_open, R8
+ SVC
+ BCC ok
+ MOVW $-1, R0
+ok:
+ MOVW R0, ret+16(FP)
+ RET
+
+// func closefd(fd int32) int32
+TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12
+ MOVW fd+0(FP), R0
+ MOVD $SYS_close, R8
+ SVC
+ BCC ok
+ MOVW $-1, R0
+ok:
+ MOVW R0, ret+8(FP)
+ RET
+
+// func pipe() (r, w int32, errno int32)
+TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
+ ADD $8, RSP, R0
+ MOVW $0, R1
+ MOVD $SYS_pipe2, R8
+ SVC
+ BCC ok
+ NEG R0, R0
+ok:
+ MOVW R0, errno+8(FP)
+ RET
+
+// func pipe2(flags int32) (r, w int32, errno int32)
+TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
+ ADD $16, RSP, R0
+ MOVW flags+0(FP), R1
+ MOVD $SYS_pipe2, R8
+ SVC
+ BCC ok
+ NEG R0, R0
+ok:
+ MOVW R0, errno+16(FP)
+ RET
+
+// func write1(fd uintptr, p unsafe.Pointer, n int32) int32
+TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28
+ MOVD fd+0(FP), R0
+ MOVD p+8(FP), R1
+ MOVW n+16(FP), R2
+ MOVD $SYS_write, R8
+ SVC
+ BCC ok
+ NEG R0, R0 // caller expects negative errno
+ok:
+ MOVW R0, ret+24(FP)
+ RET
+
+// func read(fd int32, p unsafe.Pointer, n int32) int32
+TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
+ MOVW fd+0(FP), R0
+ MOVD p+8(FP), R1
+ MOVW n+16(FP), R2
+ MOVD $SYS_read, R8
+ SVC
+ BCC ok
+ NEG R0, R0 // caller expects negative errno
+ok:
+ MOVW R0, ret+24(FP)
+ RET
+
+// func usleep(usec uint32)
+TEXT runtime·usleep(SB),NOSPLIT,$24-4
+ MOVWU usec+0(FP), R3
+ MOVD R3, R5
+ MOVW $1000000, R4
+ UDIV R4, R3
+ MOVD R3, 8(RSP)
+ MUL R3, R4
+ SUB R4, R5
+ MOVW $1000, R4
+ MUL R4, R5
+ MOVD R5, 16(RSP)
+
+ // nanosleep(&ts, 0)
+ ADD $8, RSP, R0
+ MOVD $0, R1
+ MOVD $SYS_nanosleep, R8
+ SVC
+ RET
+
+// func thr_self() thread
+TEXT runtime·thr_self(SB),NOSPLIT,$8-8
+ MOVD $ptr-8(SP), R0 // arg 1 &8(SP)
+ MOVD $SYS_thr_self, R8
+ SVC
+ MOVD ptr-8(SP), R0
+ MOVD R0, ret+0(FP)
+ RET
+
+// func thr_kill(t thread, sig int)
+TEXT runtime·thr_kill(SB),NOSPLIT,$0-16
+ MOVD tid+0(FP), R0 // arg 1 pid
+ MOVD sig+8(FP), R1 // arg 2 sig
+ MOVD $SYS_thr_kill, R8
+ SVC
+ RET
+
+// func raiseproc(sig uint32)
+TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0
+ MOVD $SYS_getpid, R8
+ SVC
+ MOVW sig+0(FP), R1
+ MOVD $SYS_kill, R8
+ SVC
+ RET
+
+// func setitimer(mode int32, new, old *itimerval)
+TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24
+ MOVW mode+0(FP), R0
+ MOVD new+8(FP), R1
+ MOVD old+16(FP), R2
+ MOVD $SYS_setitimer, R8
+ SVC
+ RET
+
+// func fallback_walltime() (sec int64, nsec int32)
+TEXT runtime·fallback_walltime(SB),NOSPLIT,$24-12
+ MOVW $CLOCK_REALTIME, R0
+ MOVD $8(RSP), R1
+ MOVD $SYS_clock_gettime, R8
+ SVC
+ MOVD 8(RSP), R0 // sec
+ MOVW 16(RSP), R1 // nsec
+ MOVD R0, sec+0(FP)
+ MOVW R1, nsec+8(FP)
+ RET
+
+// func fallback_nanotime() int64
+TEXT runtime·fallback_nanotime(SB),NOSPLIT,$24-8
+ MOVD $CLOCK_MONOTONIC, R0
+ MOVD $8(RSP), R1
+ MOVD $SYS_clock_gettime, R8
+ SVC
+ MOVD 8(RSP), R0 // sec
+ MOVW 16(RSP), R2 // nsec
+
+ // sec is in R0, nsec in R2
+ // return nsec in R2
+ MOVD $1000000000, R3
+ MUL R3, R0
+ ADD R2, R0
+
+ MOVD R0, ret+0(FP)
+ RET
+
+// func asmSigaction(sig uintptr, new, old *sigactiont) int32
+TEXT runtime·asmSigaction(SB),NOSPLIT|NOFRAME,$0
+ MOVD sig+0(FP), R0 // arg 1 sig
+ MOVD new+8(FP), R1 // arg 2 act
+ MOVD old+16(FP), R2 // arg 3 oact
+ MOVD $SYS_sigaction, R8
+ SVC
+ BCC ok
+ MOVW $-1, R0
+ok:
+ MOVW R0, ret+24(FP)
+ RET
+
+// func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
+TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
+ MOVW sig+8(FP), R0
+ MOVD info+16(FP), R1
+ MOVD ctx+24(FP), R2
+ MOVD fn+0(FP), R11
+ BL (R11)
+ RET
+
+// func sigtramp()
+TEXT runtime·sigtramp(SB),NOSPLIT,$192
+ // Save callee-save registers in the case of signal forwarding.
+ // Please refer to https://golang.org/issue/31827 .
+ MOVD R19, 8*4(RSP)
+ MOVD R20, 8*5(RSP)
+ MOVD R21, 8*6(RSP)
+ MOVD R22, 8*7(RSP)
+ MOVD R23, 8*8(RSP)
+ MOVD R24, 8*9(RSP)
+ MOVD R25, 8*10(RSP)
+ MOVD R26, 8*11(RSP)
+ MOVD R27, 8*12(RSP)
+ MOVD g, 8*13(RSP)
+ MOVD R29, 8*14(RSP)
+ FMOVD F8, 8*15(RSP)
+ FMOVD F9, 8*16(RSP)
+ FMOVD F10, 8*17(RSP)
+ FMOVD F11, 8*18(RSP)
+ FMOVD F12, 8*19(RSP)
+ FMOVD F13, 8*20(RSP)
+ FMOVD F14, 8*21(RSP)
+ FMOVD F15, 8*22(RSP)
+
+ // this might be called in external code context,
+ // where g is not set.
+ // first save R0, because runtime·load_g will clobber it
+ MOVW R0, 8(RSP)
+ MOVBU runtime·iscgo(SB), R0
+ CMP $0, R0
+ BEQ 2(PC)
+ BL runtime·load_g(SB)
+
+ MOVD R1, 16(RSP)
+ MOVD R2, 24(RSP)
+ MOVD $runtime·sigtrampgo(SB), R0
+ BL (R0)
+
+ // Restore callee-save registers.
+ MOVD 8*4(RSP), R19
+ MOVD 8*5(RSP), R20
+ MOVD 8*6(RSP), R21
+ MOVD 8*7(RSP), R22
+ MOVD 8*8(RSP), R23
+ MOVD 8*9(RSP), R24
+ MOVD 8*10(RSP), R25
+ MOVD 8*11(RSP), R26
+ MOVD 8*12(RSP), R27
+ MOVD 8*13(RSP), g
+ MOVD 8*14(RSP), R29
+ FMOVD 8*15(RSP), F8
+ FMOVD 8*16(RSP), F9
+ FMOVD 8*17(RSP), F10
+ FMOVD 8*18(RSP), F11
+ FMOVD 8*19(RSP), F12
+ FMOVD 8*20(RSP), F13
+ FMOVD 8*21(RSP), F14
+ FMOVD 8*22(RSP), F15
+
+ RET
+
+// func mmap(addr uintptr, n uintptr, prot int, flags int, fd int, off int64) (ret uintptr, err error)
+TEXT runtime·mmap(SB),NOSPLIT|NOFRAME,$0
+ MOVD addr+0(FP), R0
+ MOVD n+8(FP), R1
+ MOVW prot+16(FP), R2
+ MOVW flags+20(FP), R3
+ MOVW fd+24(FP), R4
+ MOVW off+28(FP), R5
+ MOVD $SYS_mmap, R8
+ SVC
+ BCS fail
+ MOVD R0, p+32(FP)
+ MOVD $0, err+40(FP)
+ RET
+fail:
+ MOVD $0, p+32(FP)
+ MOVD R0, err+40(FP)
+ RET
+
+// func munmap(addr uintptr, n uintptr) (err error)
+TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0
+ MOVD addr+0(FP), R0
+ MOVD n+8(FP), R1
+ MOVD $SYS_munmap, R8
+ SVC
+ BCS fail
+ RET
+fail:
+ MOVD $0, R0
+ MOVD R0, (R0) // crash
+
+// func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
+TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0
+ MOVD addr+0(FP), R0
+ MOVD n+8(FP), R1
+ MOVW flags+16(FP), R2
+ MOVD $SYS_madvise, R8
+ SVC
+ BCC ok
+ MOVW $-1, R0
+ok:
+ MOVW R0, ret+24(FP)
+ RET
+
+// func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
+TEXT runtime·sysctl(SB),NOSPLIT,$0
+ MOVD mib+0(FP), R0
+ MOVD miblen+8(FP), R1
+ MOVD out+16(FP), R2
+ MOVD size+24(FP), R3
+ MOVD dst+32(FP), R4
+ MOVD ndst+40(FP), R5
+ MOVD $SYS___sysctl, R8
+ SVC
+ BCC ok
+ NEG R0, R0
+ok:
+ MOVW R0, ret+48(FP)
+ RET
+
+// func sigaltstack(new, old *stackt)
+TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0
+ MOVD new+0(FP), R0
+ MOVD old+8(FP), R1
+ MOVD $SYS_sigaltstack, R8
+ SVC
+ BCS fail
+ RET
+fail:
+ MOVD $0, R0
+ MOVD R0, (R0) // crash
+
+// func osyield()
+TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0
+ MOVD $SYS_sched_yield, R8
+ SVC
+ RET
+
+// func sigprocmask(how int32, new, old *sigset)
+TEXT runtime·sigprocmask(SB),NOSPLIT|NOFRAME,$0-24
+ MOVW how+0(FP), R0
+ MOVD new+8(FP), R1
+ MOVD old+16(FP), R2
+ MOVD $SYS_sigprocmask, R8
+ SVC
+ BCS fail
+ RET
+fail:
+ MOVD $0, R0
+ MOVD R0, (R0) // crash
+
+// func cpuset_getaffinity(level int, which int, id int64, size int, mask *byte) int32
+TEXT runtime·cpuset_getaffinity(SB),NOSPLIT|NOFRAME,$0-44
+ MOVD level+0(FP), R0
+ MOVD which+8(FP), R1
+ MOVD id+16(FP), R2
+ MOVD size+24(FP), R3
+ MOVD mask+32(FP), R4
+ MOVD $SYS_cpuset_getaffinity, R8
+ SVC
+ BCC ok
+ MOVW $-1, R0
+ok:
+ MOVW R0, ret+40(FP)
+ RET
+
+// func kqueue() int32
+TEXT runtime·kqueue(SB),NOSPLIT|NOFRAME,$0
+ MOVD $SYS_kqueue, R8
+ SVC
+ BCC ok
+ MOVW $-1, R0
+ok:
+ MOVW R0, ret+0(FP)
+ RET
+
+// func kevent(kq int, ch unsafe.Pointer, nch int, ev unsafe.Pointer, nev int, ts *Timespec) (n int, err error)
+TEXT runtime·kevent(SB),NOSPLIT,$0
+ MOVW kq+0(FP), R0
+ MOVD ch+8(FP), R1
+ MOVW nch+16(FP), R2
+ MOVD ev+24(FP), R3
+ MOVW nev+32(FP), R4
+ MOVD ts+40(FP), R5
+ MOVD $SYS_kevent, R8
+ SVC
+ BCC ok
+ NEG R0, R0
+ok:
+ MOVW R0, ret+48(FP)
+ RET
+
+// func closeonexec(fd int32)
+TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
+ MOVW fd+0(FP), R0
+ MOVD $F_SETFD, R1
+ MOVD $FD_CLOEXEC, R2
+ MOVD $SYS_fcntl, R8
+ SVC
+ RET
+
+// func runtime·setNonblock(fd int32)
+TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
+ MOVW fd+0(FP), R0
+ MOVD $F_GETFL, R1
+ MOVD $0, R2
+ MOVD $SYS_fcntl, R8
+ SVC
+ ORR $O_NONBLOCK, R0, R2
+ MOVW fd+0(FP), R0
+ MOVW $F_SETFL, R1
+ MOVW $SYS_fcntl, R7
+ SVC
+ RET
+
+// func getCntxct(physical bool) uint32
+TEXT runtime·getCntxct(SB),NOSPLIT,$0
+ MOVB physical+0(FP), R0
+ CMP $0, R0
+ BEQ 3(PC)
+
+ // get CNTPCT (Physical Count Register) into x0
+ // mrs x0, cntpct_el0 = d53be020
+ WORD $0xd53be020 // SIGILL
+ B 2(PC)
+
+ // get CNTVCT (Virtual Count Register) into x0
+ // mrs x0, cntvct_el0 = d53be040
+ WORD $0xd53be040
+
+ MOVW R0, ret+8(FP)
+ RET
+
+// func getisar0() uint64
+TEXT runtime·getisar0(SB),NOSPLIT,$0
+ // get Instruction Set Attributes 0 into x0
+ // mrs x0, ID_AA64ISAR0_EL1 = d5380600
+ WORD $0xd5380600
+ MOVD R0, ret+0(FP)
+ RET
+
+// func getisar1() uint64
+TEXT runtime·getisar1(SB),NOSPLIT,$0
+ // get Instruction Set Attributes 1 into x0
+ // mrs x0, ID_AA64ISAR1_EL1 = d5380620
+ WORD $0xd5380620
+ MOVD R0, ret+0(FP)
+ RET
+
+// func getpfr0() uint64
+TEXT runtime·getpfr0(SB),NOSPLIT,$0
+ // get Processor Feature Register 0 into x0
+ // mrs x0, ID_AA64PFR0_EL1 = d5380400
+ WORD $0xd5380400
+ MOVD R0, ret+0(FP)
+ RET
diff --git a/src/runtime/sys_linux_386.s b/src/runtime/sys_linux_386.s
index 4b440b13cb..373d9d3bc2 100644
--- a/src/runtime/sys_linux_386.s
+++ b/src/runtime/sys_linux_386.s
@@ -188,6 +188,20 @@ TEXT runtime·raiseproc(SB),NOSPLIT,$12
INVOKE_SYSCALL
RET
+TEXT ·getpid(SB),NOSPLIT,$0-4
+ MOVL $SYS_getpid, AX
+ INVOKE_SYSCALL
+ MOVL AX, ret+0(FP)
+ RET
+
+TEXT ·tgkill(SB),NOSPLIT,$0
+ MOVL $SYS_tgkill, AX
+ MOVL tgid+0(FP), BX
+ MOVL tid+4(FP), CX
+ MOVL sig+8(FP), DX
+ INVOKE_SYSCALL
+ RET
+
TEXT runtime·setitimer(SB),NOSPLIT,$0-12
MOVL $SYS_setittimer, AX
MOVL mode+0(FP), BX
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index 0728d1766e..d16060f6fa 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -171,6 +171,20 @@ TEXT runtime·raiseproc(SB),NOSPLIT,$0
SYSCALL
RET
+TEXT ·getpid(SB),NOSPLIT,$0-8
+ MOVL $SYS_getpid, AX
+ SYSCALL
+ MOVQ AX, ret+0(FP)
+ RET
+
+TEXT ·tgkill(SB),NOSPLIT,$0
+ MOVQ tgid+0(FP), DI
+ MOVQ tid+8(FP), SI
+ MOVQ sig+16(FP), DX
+ MOVL $SYS_tgkill, AX
+ SYSCALL
+ RET
+
TEXT runtime·setitimer(SB),NOSPLIT,$0-24
MOVL mode+0(FP), DI
MOVQ new+8(FP), SI
diff --git a/src/runtime/sys_linux_arm.s b/src/runtime/sys_linux_arm.s
index 9a9e1c92c7..9ef8c9258b 100644
--- a/src/runtime/sys_linux_arm.s
+++ b/src/runtime/sys_linux_arm.s
@@ -172,6 +172,20 @@ TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0
SWI $0
RET
+TEXT ·getpid(SB),NOSPLIT,$0-4
+ MOVW $SYS_getpid, R7
+ SWI $0
+ MOVW R0, ret+0(FP)
+ RET
+
+TEXT ·tgkill(SB),NOSPLIT,$0-12
+ MOVW tgid+0(FP), R0
+ MOVW tid+4(FP), R1
+ MOVW sig+8(FP), R2
+ MOVW $SYS_tgkill, R7
+ SWI $0
+ RET
+
TEXT runtime·mmap(SB),NOSPLIT,$0
MOVW addr+0(FP), R0
MOVW n+4(FP), R1
@@ -479,7 +493,11 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-16
MOVW R4, R13
RET
-TEXT runtime·sigtramp(SB),NOSPLIT,$12
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
+ // Reserve space for callee-save registers and arguments.
+ MOVM.DB.W [R4-R11], (R13)
+ SUB $16, R13
+
// this might be called in external code context,
// where g is not set.
// first save R0, because runtime·load_g will clobber it
@@ -492,6 +510,11 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$12
MOVW R2, 12(R13)
MOVW $runtime·sigtrampgo(SB), R11
BL (R11)
+
+ // Restore callee-save registers.
+ ADD $16, R13
+ MOVM.IA.W (R13), [R4-R11]
+
RET
TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
diff --git a/src/runtime/sys_linux_arm64.s b/src/runtime/sys_linux_arm64.s
index a77be98739..e0d681ebf1 100644
--- a/src/runtime/sys_linux_arm64.s
+++ b/src/runtime/sys_linux_arm64.s
@@ -175,6 +175,20 @@ TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0
SVC
RET
+TEXT ·getpid(SB),NOSPLIT|NOFRAME,$0-8
+ MOVD $SYS_getpid, R8
+ SVC
+ MOVD R0, ret+0(FP)
+ RET
+
+TEXT ·tgkill(SB),NOSPLIT,$0-24
+ MOVD tgid+0(FP), R0
+ MOVD tid+8(FP), R1
+ MOVD sig+16(FP), R2
+ MOVD $SYS_tgkill, R8
+ SVC
+ RET
+
TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24
MOVW mode+0(FP), R0
MOVD new+8(FP), R1
diff --git a/src/runtime/sys_linux_mips64x.s b/src/runtime/sys_linux_mips64x.s
index 49459b0cec..e4d02a3953 100644
--- a/src/runtime/sys_linux_mips64x.s
+++ b/src/runtime/sys_linux_mips64x.s
@@ -177,6 +177,20 @@ TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0
SYSCALL
RET
+TEXT ·getpid(SB),NOSPLIT|NOFRAME,$0-8
+ MOVV $SYS_getpid, R2
+ SYSCALL
+ MOVV R2, ret+0(FP)
+ RET
+
+TEXT ·tgkill(SB),NOSPLIT|NOFRAME,$0-24
+ MOVV tgid+0(FP), R4
+ MOVV tid+8(FP), R5
+ MOVV sig+16(FP), R6
+ MOVV $SYS_tgkill, R2
+ SYSCALL
+ RET
+
TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24
MOVW mode+0(FP), R4
MOVV new+8(FP), R5
diff --git a/src/runtime/sys_linux_mipsx.s b/src/runtime/sys_linux_mipsx.s
index 3c405c264e..15893a7a28 100644
--- a/src/runtime/sys_linux_mipsx.s
+++ b/src/runtime/sys_linux_mipsx.s
@@ -183,6 +183,20 @@ TEXT runtime·raiseproc(SB),NOSPLIT,$0
SYSCALL
RET
+TEXT ·getpid(SB),NOSPLIT,$0-4
+ MOVW $SYS_getpid, R2
+ SYSCALL
+ MOVW R2, ret+0(FP)
+ RET
+
+TEXT ·tgkill(SB),NOSPLIT,$0-12
+ MOVW tgid+0(FP), R4
+ MOVW tid+4(FP), R5
+ MOVW sig+8(FP), R6
+ MOVW $SYS_tgkill, R2
+ SYSCALL
+ RET
+
TEXT runtime·setitimer(SB),NOSPLIT,$0-12
MOVW mode+0(FP), R4
MOVW new+4(FP), R5
diff --git a/src/runtime/sys_linux_ppc64x.s b/src/runtime/sys_linux_ppc64x.s
index 203ce089c1..de14418338 100644
--- a/src/runtime/sys_linux_ppc64x.s
+++ b/src/runtime/sys_linux_ppc64x.s
@@ -156,6 +156,18 @@ TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0
SYSCALL $SYS_kill
RET
+TEXT ·getpid(SB),NOSPLIT|NOFRAME,$0-8
+ SYSCALL $SYS_getpid
+ MOVD R3, ret+0(FP)
+ RET
+
+TEXT ·tgkill(SB),NOSPLIT|NOFRAME,$0-24
+ MOVD tgid+0(FP), R3
+ MOVD tid+8(FP), R4
+ MOVD sig+16(FP), R5
+ SYSCALL $SYS_tgkill
+ RET
+
TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24
MOVW mode+0(FP), R3
MOVD new+8(FP), R4
diff --git a/src/runtime/sys_linux_s390x.s b/src/runtime/sys_linux_s390x.s
index df01271f7b..c15a1d5364 100644
--- a/src/runtime/sys_linux_s390x.s
+++ b/src/runtime/sys_linux_s390x.s
@@ -163,6 +163,20 @@ TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0
SYSCALL
RET
+TEXT ·getpid(SB),NOSPLIT|NOFRAME,$0-8
+ MOVW $SYS_getpid, R1
+ SYSCALL
+ MOVD R2, ret+0(FP)
+ RET
+
+TEXT ·tgkill(SB),NOSPLIT|NOFRAME,$0-24
+ MOVD tgid+0(FP), R2
+ MOVD tid+8(FP), R3
+ MOVD sig+16(FP), R4
+ MOVW $SYS_tgkill, R1
+ SYSCALL
+ RET
+
TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24
MOVW mode+0(FP), R2
MOVD new+8(FP), R3
diff --git a/src/runtime/sys_netbsd_386.s b/src/runtime/sys_netbsd_386.s
index 7a542da526..d0c470c457 100644
--- a/src/runtime/sys_netbsd_386.s
+++ b/src/runtime/sys_netbsd_386.s
@@ -140,12 +140,11 @@ TEXT runtime·usleep(SB),NOSPLIT,$24
INT $0x80
RET
-TEXT runtime·raise(SB),NOSPLIT,$12
- MOVL $SYS__lwp_self, AX
- INT $0x80
+TEXT runtime·lwp_kill(SB),NOSPLIT,$12-8
MOVL $0, 0(SP)
+ MOVL tid+0(FP), AX
MOVL AX, 4(SP) // arg 1 - target
- MOVL sig+0(FP), AX
+ MOVL sig+4(FP), AX
MOVL AX, 8(SP) // arg 2 - signo
MOVL $SYS__lwp_kill, AX
INT $0x80
diff --git a/src/runtime/sys_netbsd_amd64.s b/src/runtime/sys_netbsd_amd64.s
index 4d1d36f01b..dc9bd127d2 100644
--- a/src/runtime/sys_netbsd_amd64.s
+++ b/src/runtime/sys_netbsd_amd64.s
@@ -209,11 +209,9 @@ TEXT runtime·usleep(SB),NOSPLIT,$16
SYSCALL
RET
-TEXT runtime·raise(SB),NOSPLIT,$16
- MOVL $SYS__lwp_self, AX
- SYSCALL
- MOVQ AX, DI // arg 1 - target
- MOVL sig+0(FP), SI // arg 2 - signo
+TEXT runtime·lwp_kill(SB),NOSPLIT,$0-16
+ MOVL tid+0(FP), DI // arg 1 - target
+ MOVQ sig+8(FP), SI // arg 2 - signo
MOVL $SYS__lwp_kill, AX
SYSCALL
RET
diff --git a/src/runtime/sys_netbsd_arm.s b/src/runtime/sys_netbsd_arm.s
index c8ee262d59..678dea57c6 100644
--- a/src/runtime/sys_netbsd_arm.s
+++ b/src/runtime/sys_netbsd_arm.s
@@ -193,9 +193,9 @@ TEXT runtime·usleep(SB),NOSPLIT,$16
SWI $SYS___nanosleep50
RET
-TEXT runtime·raise(SB),NOSPLIT,$16
- SWI $SYS__lwp_self // the returned R0 is arg 1
- MOVW sig+0(FP), R1 // arg 2 - signal
+TEXT runtime·lwp_kill(SB),NOSPLIT,$0-8
+ MOVW tid+0(FP), R0 // arg 1 - tid
+ MOVW sig+4(FP), R1 // arg 2 - signal
SWI $SYS__lwp_kill
RET
@@ -300,7 +300,11 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-16
MOVW R4, R13
RET
-TEXT runtime·sigtramp(SB),NOSPLIT,$12
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
+ // Reserve space for callee-save registers and arguments.
+ MOVM.DB.W [R4-R11], (R13)
+ SUB $16, R13
+
// this might be called in external code context,
// where g is not set.
// first save R0, because runtime·load_g will clobber it
@@ -312,6 +316,11 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$12
MOVW R1, 8(R13)
MOVW R2, 12(R13)
BL runtime·sigtrampgo(SB)
+
+ // Restore callee-save registers.
+ ADD $16, R13
+ MOVM.IA.W (R13), [R4-R11]
+
RET
TEXT runtime·mmap(SB),NOSPLIT,$12
diff --git a/src/runtime/sys_netbsd_arm64.s b/src/runtime/sys_netbsd_arm64.s
index ccc34142aa..e70be0fa74 100644
--- a/src/runtime/sys_netbsd_arm64.s
+++ b/src/runtime/sys_netbsd_arm64.s
@@ -205,10 +205,9 @@ TEXT runtime·usleep(SB),NOSPLIT,$24-4
SVC $SYS___nanosleep50
RET
-TEXT runtime·raise(SB),NOSPLIT,$16
- SVC $SYS__lwp_self
- // arg 1 - target (lwp_self)
- MOVW sig+0(FP), R1 // arg 2 - signo
+TEXT runtime·lwp_kill(SB),NOSPLIT,$0-16
+ MOVW tid+0(FP), R0 // arg 1 - target
+ MOVD sig+8(FP), R1 // arg 2 - signo
SVC $SYS__lwp_kill
RET
diff --git a/src/runtime/sys_openbsd_386.s b/src/runtime/sys_openbsd_386.s
index 9805a43802..24fbfd6266 100644
--- a/src/runtime/sys_openbsd_386.s
+++ b/src/runtime/sys_openbsd_386.s
@@ -97,12 +97,17 @@ TEXT runtime·usleep(SB),NOSPLIT,$24
INT $0x80
RET
-TEXT runtime·raise(SB),NOSPLIT,$16
+TEXT runtime·getthrid(SB),NOSPLIT,$0-4
MOVL $299, AX // sys_getthrid
INT $0x80
+ MOVL AX, ret+0(FP)
+ RET
+
+TEXT runtime·thrkill(SB),NOSPLIT,$16-8
MOVL $0, 0(SP)
+ MOVL tid+0(FP), AX
MOVL AX, 4(SP) // arg 1 - tid
- MOVL sig+0(FP), AX
+ MOVL sig+4(FP), AX
MOVL AX, 8(SP) // arg 2 - signum
MOVL $0, 12(SP) // arg 3 - tcb
MOVL $119, AX // sys_thrkill
diff --git a/src/runtime/sys_openbsd_amd64.s b/src/runtime/sys_openbsd_amd64.s
index 66526bff0d..37d70ab9aa 100644
--- a/src/runtime/sys_openbsd_amd64.s
+++ b/src/runtime/sys_openbsd_amd64.s
@@ -171,11 +171,15 @@ TEXT runtime·usleep(SB),NOSPLIT,$16
SYSCALL
RET
-TEXT runtime·raise(SB),NOSPLIT,$16
+TEXT runtime·getthrid(SB),NOSPLIT,$0-4
MOVL $299, AX // sys_getthrid
SYSCALL
- MOVQ AX, DI // arg 1 - tid
- MOVL sig+0(FP), SI // arg 2 - signum
+ MOVL AX, ret+0(FP)
+ RET
+
+TEXT runtime·thrkill(SB),NOSPLIT,$0-16
+ MOVL tid+0(FP), DI // arg 1 - tid
+ MOVQ sig+8(FP), SI // arg 2 - signum
MOVQ $0, DX // arg 3 - tcb
MOVL $119, AX // sys_thrkill
SYSCALL
diff --git a/src/runtime/sys_openbsd_arm.s b/src/runtime/sys_openbsd_arm.s
index 92ab3270be..11f6e00100 100644
--- a/src/runtime/sys_openbsd_arm.s
+++ b/src/runtime/sys_openbsd_arm.s
@@ -102,11 +102,15 @@ TEXT runtime·usleep(SB),NOSPLIT,$16
SWI $0
RET
-TEXT runtime·raise(SB),NOSPLIT,$12
+TEXT runtime·getthrid(SB),NOSPLIT,$0-4
MOVW $299, R12 // sys_getthrid
SWI $0
- // arg 1 - tid, already in R0
- MOVW sig+0(FP), R1 // arg 2 - signum
+ MOVW R0, ret+0(FP)
+ RET
+
+TEXT runtime·thrkill(SB),NOSPLIT,$0-8
+ MOVW tid+0(FP), R0 // arg 1 - tid
+ MOVW sig+4(FP), R1 // arg 2 - signum
MOVW $0, R2 // arg 3 - tcb
MOVW $119, R12 // sys_thrkill
SWI $0
@@ -243,7 +247,11 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-16
MOVW R4, R13
RET
-TEXT runtime·sigtramp(SB),NOSPLIT,$12
+TEXT runtime·sigtramp(SB),NOSPLIT,$0
+ // Reserve space for callee-save registers and arguments.
+ MOVM.DB.W [R4-R11], (R13)
+ SUB $16, R13
+
// If called from an external code context, g will not be set.
// Save R0, since runtime·load_g will clobber it.
MOVW R0, 4(R13) // signum
@@ -254,6 +262,11 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$12
MOVW R1, 8(R13)
MOVW R2, 12(R13)
BL runtime·sigtrampgo(SB)
+
+ // Restore callee-save registers.
+ ADD $16, R13
+ MOVM.IA.W (R13), [R4-R11]
+
RET
// int32 tfork(void *param, uintptr psize, M *mp, G *gp, void (*fn)(void));
diff --git a/src/runtime/sys_openbsd_arm64.s b/src/runtime/sys_openbsd_arm64.s
index c8bf2d345e..8e1a5bc542 100644
--- a/src/runtime/sys_openbsd_arm64.s
+++ b/src/runtime/sys_openbsd_arm64.s
@@ -114,11 +114,15 @@ TEXT runtime·usleep(SB),NOSPLIT,$24-4
SVC
RET
-TEXT runtime·raise(SB),NOSPLIT,$0
+TEXT runtime·getthrid(SB),NOSPLIT,$0-4
MOVD $299, R8 // sys_getthrid
SVC
- // arg 1 - tid, already in R0
- MOVW sig+0(FP), R1 // arg 2 - signum
+ MOVW R0, ret+0(FP)
+ RET
+
+TEXT runtime·thrkill(SB),NOSPLIT,$0-16
+ MOVW tid+0(FP), R0 // arg 1 - tid
+ MOVD sig+8(FP), R1 // arg 2 - signum
MOVW $0, R2 // arg 3 - tcb
MOVD $119, R8 // sys_thrkill
SVC
diff --git a/src/runtime/time.go b/src/runtime/time.go
index fea5d6871c..6c1170bbc0 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -14,7 +14,7 @@ import (
)
// Temporary scaffolding while the new timer code is added.
-const oldTimers = true
+const oldTimers = false
// Package time knows the layout of this structure.
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
@@ -989,10 +989,12 @@ func addAdjustedTimers(pp *p, moved []*timer) {
case timerDeleted:
// Timer has been deleted since we adjusted it.
// This timer is already out of the heap.
- if !atomic.Cas(&t.status, s, timerRemoved) {
- badTimer()
+ if atomic.Cas(&t.status, s, timerRemoving) {
+ if !atomic.Cas(&t.status, timerRemoving, timerRemoved) {
+ badTimer()
+ }
+ break loop
}
- break loop
case timerModifiedEarlier, timerModifiedLater:
// Timer has been modified again since
// we adjusted it.
@@ -1007,8 +1009,8 @@ func addAdjustedTimers(pp *p, moved []*timer) {
if s == timerModifiedEarlier {
atomic.Xadd(&pp.adjustTimers, -1)
}
+ break loop
}
- break loop
case timerNoStatus, timerRunning, timerRemoving, timerRemoved, timerMoving:
badTimer()
case timerModifying:
diff --git a/src/runtime/tls_arm64.h b/src/runtime/tls_arm64.h
index 27f517c155..f60f4f6d5b 100644
--- a/src/runtime/tls_arm64.h
+++ b/src/runtime/tls_arm64.h
@@ -20,6 +20,11 @@
#define MRS_TPIDR_R0 WORD $0xd53bd060 // MRS TPIDRRO_EL0, R0
#endif
+#ifdef GOOS_freebsd
+#define TPIDR TPIDR_EL0
+#define MRS_TPIDR_R0 WORD $0xd53bd040 // MRS TPIDR_EL0, R0
+#endif
+
#ifdef GOOS_netbsd
#define TPIDR TPIDRRO_EL0
#define MRS_TPIDR_R0 WORD $0xd53bd040 // MRS TPIDRRO_EL0, R0
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 0e4b75a7e6..9be7d739d1 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -860,6 +860,7 @@ var gStatusStrings = [...]string{
_Gwaiting: "waiting",
_Gdead: "dead",
_Gcopystack: "copystack",
+ _Gpreempted: "preempted",
}
func goroutineheader(gp *g) {
diff --git a/src/runtime/type.go b/src/runtime/type.go
index af1fa2e1ca..52b6cb30b4 100644
--- a/src/runtime/type.go
+++ b/src/runtime/type.go
@@ -292,7 +292,7 @@ func (t *_type) textOff(off textOff) unsafe.Pointer {
for i := range md.textsectmap {
sectaddr := md.textsectmap[i].vaddr
sectlen := md.textsectmap[i].length
- if uintptr(off) >= sectaddr && uintptr(off) <= sectaddr+sectlen {
+ if uintptr(off) >= sectaddr && uintptr(off) < sectaddr+sectlen {
res = md.textsectmap[i].baseaddr + uintptr(off) - uintptr(md.textsectmap[i].vaddr)
break
}
diff --git a/src/runtime/vdso_freebsd_arm64.go b/src/runtime/vdso_freebsd_arm64.go
new file mode 100644
index 0000000000..7d9f62d5f9
--- /dev/null
+++ b/src/runtime/vdso_freebsd_arm64.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _VDSO_TH_ALGO_ARM_GENTIM = 1
+)
+
+func getCntxct(physical bool) uint32
+
+//go:nosplit
+func (th *vdsoTimehands) getTimecounter() (uint32, bool) {
+ switch th.algo {
+ case _VDSO_TH_ALGO_ARM_GENTIM:
+ return getCntxct(false), true
+ default:
+ return 0, false
+ }
+}