aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorKatie Hockman <katie@golang.org>2021-02-19 09:18:36 -0500
committerKatie Hockman <katie@golang.org>2021-02-19 09:18:45 -0500
commit73a8c63503b70bf1f0d69cdbcac35cefa8fcef6e (patch)
treede84ce321aca939328501653aab419c13b19089e /src/runtime
parent563a6cb17fdfbd20067cfe56cd38608ae3824df9 (diff)
parent7764ee5614df2228e03326487af7670c7c5d268a (diff)
downloadgo-73a8c63503b70bf1f0d69cdbcac35cefa8fcef6e.tar.xz
[dev.fuzz] all: merge master (7764ee5) into dev.fuzz
Conflicts: - api/next.txt Merge List: + 2021-02-19 7764ee5614 runtime: fix invalid nil g check for for mips64x + 2021-02-19 87f425da14 cmd/go/internal/mvs: split Reqs into narrower per-function interfaces + 2021-02-19 4da0188c6c cmd/go/internal/modget: split resolveCandidates into two methods + 2021-02-19 5f2e24efb3 cmd/internal/diff: skip over Cygwin warning in diff output + 2021-02-19 ee7038f6a5 net: disable Windows netsh tests when netsh won't run + 2021-02-19 40765ffa95 os/exec: disable failing LookPathTest on windows/arm64 + 2021-02-19 b445d6ea34 runtime/pprof: expect tests to pass on macOS + 2021-02-19 b110a43628 runtime: delete gosave (dead code) + 2021-02-19 474d5f4f4d math: remove most 387 implementations + 2021-02-19 c7c6c113be runtime: convert windows/arm64 assembly + 2021-02-19 3527caa7d6 runtime: initial windows/arm64 implementation files + 2021-02-19 427bd7599d runtime: generate windows/arm64 callback asm + 2021-02-19 f6c4b4bf96 syscall: add windows/arm64 support + 2021-02-19 ac024a0c7b cmd/vendor: get golang.org/x/sys@beda7e5e158 + 2021-02-19 a3b97e7628 test: disable nilptr on windows/arm64 + 2021-02-19 985d087782 cmd/link: add windows/arm64 support + 2021-02-19 95a44d2409 cmd/internal/objfile: recognize Windows ARM64 executables + 2021-02-19 0ca0551f02 debug/pe: recognize arm64 executables + 2021-02-19 0c633125f2 cmd/dist: add windows/arm64 support + 2021-02-19 bb6efb9609 build: set GOPATH consistently in run.bash, run.bat, run.rc + 2021-02-19 a1222b7535 cmd/link: add debug print in deadcode + 2021-02-19 1c659f2525 cmd/link: clean up windows PE generation + 2021-02-19 b6379f190b syscall: clean up windows a bit + 2021-02-19 09e059afb1 runtime: enable framepointer on all arm64 + 2021-02-19 b19e7b518e runtime: clean up windows a bit + 2021-02-19 5421c37a1d runtime: fix windows/arm externalthreadhandler + 2021-02-19 91cc484ea9 runtime: fix time on windows/arm under WINE + 2021-02-19 38672d3dcf runtime: crash earlier on windows for runtime.abort + 2021-02-19 a1e9148e3d runtime: print hex numbers with hex prefixes in traceback debug + 2021-02-19 75e273fc2c runtime: fix windows/arm CONTEXT_CONTROL + 2021-02-19 76ab626bfc runtime: factor common code out of defs_windows_*.go + 2021-02-19 ece954d8b8 runtime: find g in Windows profiler using SP + 2021-02-19 a54f7fc0fd runtime: do not treat asmcgocall as a topofstack on g0 + 2021-02-19 776ee4079a runtime: do not treat morestack as a topofstack + 2021-02-19 5ecd9e34df runtime: do not treat mcall as a topofstack + 2021-02-19 54da3ab385 runtime: use TOPFRAME to identify top-of-frame functions + 2021-02-19 fbe74dbf42 runtime: use FuncInfo SPWRITE flag to identify untraceable profile samples + 2021-02-19 4dd77bdc91 cmd/asm, cmd/link, runtime: introduce FuncInfo flag bits + 2021-02-19 aa0388f2ed runtime: remove unnecessary writes to gp.sched.g + 2021-02-19 6fe8981620 cmd/internal/obj/riscv: fix JMP name<>(SB) + 2021-02-19 01f05d8ff1 runtime: unify asmcgocall and systemstack traceback setup + 2021-02-19 229695a283 runtime: clean up funcID assignment + 2021-02-19 c80da0a33a runtime: handle nil gp in cpuprof + 2021-02-19 a78879ac67 runtime: move sys.DefaultGoroot to runtime.defaultGOROOT + 2021-02-19 8ac23a1f15 runtime: document, clean up internal/sys + 2021-02-19 678568a5cf runtime: delete windows setlasterror (unused) + 2021-02-19 0d94f989d1 runtime: clean up system calls during cgo callback init + 2021-02-19 e7ee3c1fa8 os: report Windows exit status in hex + 2021-02-18 eb982727e3 cmd/go/internal/mvs: fix Downgrade to match Algorithm 4 + 2021-02-18 3b7277d365 cmd/go: add a script test for artifacts resulting from 'go get -u' + 2021-02-18 f3c2208e2c cmd/go: add script tests for potential upgrades due to downgrades + 2021-02-18 a5c8a15f64 cmd/go/internal/mvs: clarify and annotate test cases + 2021-02-18 a76efea1fe cmd/go/internal/mvs: don't emit duplicates from Req + 2021-02-18 609d82b289 cmd/dist: set GOARM=7 for windows/arm + 2021-02-18 f0be3cc547 runtime: unbreak linux/riscv64 following regabi merge + 2021-02-18 07ef313525 runtime/cgo: add cast in C code to avoid C compiler warning + 2021-02-17 2f0da6d9e2 go/types: revert "no 'declared but not used' errors for invalid var decls" + 2021-02-17 70c37ee7d0 cmd/compile/internal/test: gofmt abiutils_test.go + 2021-02-16 84825599dc all: merge branch dev.regabi (d3cd4830ad) into master + 2021-02-16 d3cd4830ad [dev.regabi] test: run abi/regabipragma test with -c=1 + 2021-02-16 03cea563d1 [dev.regabi] all: merge master (5faf941) into dev.regabi + 2021-02-16 b8fb049c7a [dev.regabi] cmd/go: copy internal/abi in TestNewReleaseRebuildsStalePackagesInGOPATH + 2021-02-16 5faf941df0 internal/goversion: update Version to 1.17 + 2021-02-16 ed55da46ab [dev.regabi] go/types: overlapping embedded interfaces requires go1.14 + 2021-02-16 7696c94334 [dev.regabi] go/types: type alias decl requires go1.9 + 2021-02-16 c2358a1ae7 [dev.regabi] runtime: stub out spillArgs and unspillArgs + 2021-02-16 8cfbf34dd9 internal/abi: set register count constants to zero for regabi experiment + 2021-02-16 6f3da9d2f6 README: pull gopher image from website + 2021-02-16 d28aae26b0 [dev.regabi] cmd/link: recognize internal/abi as runtime package + 2021-02-16 098504c73f cmd/link: generate trampoline for inter-dependent packages + 2021-02-16 1004a7cb31 runtime/metrics: update documentation to current interface + 2021-02-16 6530f2617f doc/go1.16: remove draft notice + 2021-02-16 353e111455 doc/go1.16: fix mismatched id attribute + 2021-02-16 e0215315f5 [dev.regabi] reflect: support for register ABI on amd64 for reflect.(Value).Call + 2021-02-16 f0d23c9dbb internal/poll: netpollcheckerr before sendfile + 2021-02-16 0cb3415154 doc: remove all docs not tied to distribution + 2021-02-16 626ef08127 doc: remove install.html and install-source.html + 2021-02-16 30641e36aa internal/poll: if copy_file_range returns 0, assume it failed + 2021-02-15 33d72fd412 doc/faq: update generics entry to reflect accepted proposal + 2021-02-15 852ce7c212 cmd/go: provide a more helpful suggestion for "go vet -?" + 2021-02-13 66c27093d0 cmd/link: fix typo in link_test.go + 2021-02-13 b81efb7ec4 [dev.regabi] go/types: add support for language version checking + 2021-02-13 a7e9b4b948 [dev.regabi] go/types: untyped shift counts must fit into uint + 2021-02-13 060fa49bd2 [dev.regabi] go/types: refuse excessively long constants + 2021-02-12 baa6c75dce [dev.regabi] internal/abi: add new internal/abi package for ABI constants + 2021-02-12 d1fd9a8863 [dev.regabi] all: merge master (ff0e93e) into dev.regabi + 2021-02-12 ff0e93ea31 doc/go1.16: note that package path elements beginning with '.' are disallowed + 2021-02-11 249da7ec02 CONTRIBUTORS: update for the Go 1.16 release + 2021-02-11 864d4f1c6b cmd/go: multiple small 'go help' fixes + 2021-02-11 26ceae85a8 spec: More precise wording in section on function calls. + 2021-02-11 930c2c9a68 cmd/go: reject embedded files that can't be packed into modules + 2021-02-11 e5b08e6d5c io/fs: allow backslash in ValidPath, reject in os.DirFS.Open + 2021-02-10 ed8079096f cmd/compile: mark concrete call of reflect.(*rtype).Method as REFLECTMETHOD + 2021-02-10 59703d53e2 [dev.regabi] cmd/link: stop using ABI aliases if wrapper is enabled + 2021-02-09 e9c9683597 cmd/go: suppress errors from 'go get -d' for packages that only conditionally exist + 2021-02-09 168d6a49a5 [dev.regabi] go/types: use 512 bits as max. integer precision + 2021-02-09 0a62067708 [dev.regabi] go/types: adjust importer to match compiler importer + 2021-02-09 1c58fcf7ed [dev.regabi] go/types: handle untyped constant arithmetic overflow + 2021-02-09 493363ccff [dev.regabi] go/types: must not import a package called "init" + 2021-02-09 e0ac989cf3 archive/tar: detect out of bounds accesses in PAX records resulting from padded lengths + 2021-02-09 c48d1503ba [dev.regabi] go/types: report unused packages in source order + 2021-02-09 813958f13c [dev.regabi] go/types: factor out sorting of methods + 2021-02-09 11d15c171b [dev.regabi] go/types: convert untyped arguments to delete + 2021-02-09 c9d6f45fec runtime/metrics: fix a couple of documentation typpos + 2021-02-09 cea4e21b52 io/fs: backslash is always a glob meta character + 2021-02-08 dc725bfb3c doc/go1.16: mention new vet check for asn1.Unmarshal + 2021-02-08 618e3c15bd [dev.regabi] go/types: consistently report nil type as "untyped nil" + 2021-02-08 50449de66a [dev.regabi] all: merge master (1901853) into dev.regabi + 2021-02-08 7b0dfb177f [dev.regabi] runtime: use g register in some assembly functions on AMD64 + 2021-02-08 2e60c00f56 [dev.regabi] cmd/internal/obj/x86: use g register in stack bounds check + 2021-02-08 22f9e1ccbc [dev.regabi] runtime: initialize special registers before sigpanic + 2021-02-08 5d7dc53888 [dev.regabi] cmd/compile, runtime: reserve R14 as g registers on AMD64 + 2021-02-08 1901853098 runtime/metrics: fix panic in readingAllMetric example + 2021-02-08 ed3e4afa12 syscall/plan9: remove spooky fd action at a distance + 2021-02-08 a21de9ec73 [dev.regabi] cmd/link: resolve symbol ABI in shared linkage + 2021-02-05 724d0720b3 doc/go1.16: add missed heading tag in vet section + 2021-02-05 b54cd94d47 embed, io/fs: clarify that leading and trailing slashes are disallowed + 2021-02-05 4516afebed testing/fstest: avoid symlink-induced failures in tester + 2021-02-05 8fa84772ba [dev.regabi] runtime: delete gosave function + 2021-02-05 946351d5a2 [dev.regabi] runtime: zero X15 in racecall + 2021-02-05 397a46a10a [dev.regabi] cmd/asm: define g register on AMD64 + 2021-02-05 e79c2fd428 [dev.regabi] runtime: mark racecallbackthunk as ABIInternal + 2021-02-05 7cc6de59f2 [dev.regabi] runtime: don't mark rt0_go ABIInternal + 2021-02-05 63de211014 [dev.regabi] runtime/cgo: call setg_gcc in crosscall_amd64 + 2021-02-04 120b819f45 [dev.regabi] go/types: report error for invalid main function signature + 2021-02-04 52d5cb2822 [dev.regabi] cmd/internal/obj: access Attribute atomically + 2021-02-04 bc451b5770 [dev.regabi] go/types: port check_test.go ergonomics from dev.typeparams + 2021-02-04 afd67f3334 [dev.regabi] go/types: no "declared but not used" errors for invalid var decls + 2021-02-04 8869086d8f runtime: fix typo in histogram.go + 2021-02-03 401d7e5a24 [dev.regabi] cmd/compile: reserve X15 as zero register on AMD64 + 2021-02-03 bfc7418e6d [dev.regabi] runtime, syscall, etc.: mark Darwin syscall wrappers as ABIInternal + 2021-02-03 e491c6eea9 math/big: fix comment in divRecursiveStep + 2021-02-02 23b0c1f76e [dev.regabi] all: merge master (fca94ab) into dev.regabi + 2021-02-02 fca94ab3ab spec: improve the example in Type assertions section + 2021-02-02 98f8454a73 cmd/link: don't decode type symbol in shared library in deadcode + 2021-02-02 1426a571b7 cmd/link: fix off-by-1 error in findShlibSection + 2021-02-01 32e789f4fb test: fix incorrectly laid out instructions in issue11656.go + 2021-02-01 ca6999e27c [dev.regabi] test: add a test for inlining closures + 2021-02-01 0b6cfea634 doc/go1.16: document that on OpenBSD syscalls are now made through libc + 2021-02-01 26e29aa15a cmd/link: disable TestPIESize if CGO isn't enabled + 2021-02-01 6ac91e460c doc/go1.16: minor markup fixes + 2021-01-29 44361140c0 embed: update docs for proposal tweaks + 2021-01-29 68058edc39 runtime: document pointer write atomicity for memclrNoHeapPointers + 2021-01-28 c8bd8010ff syscall: generate readlen/writelen for openbsd libc + 2021-01-28 41bb49b878 cmd/go: revert TestScript/build_trimpath to use ioutil.ReadFile + 2021-01-28 725a642c2d runtime: correct syscall10/syscall10X on openbsd/amd64 + 2021-01-28 4b068cafb5 doc/go1.16: document go/build/constraint package + 2021-01-28 376518d77f runtime,syscall: convert syscall on openbsd/arm64 to libc + 2021-01-27 aca22bddf2 [dev.regabi] cmd/compile: remove nested functions from expands_calls.go + 2021-01-27 667e08ba8c [dev.regabi] cmd/go: Use GOMAXPROCS to limit default build, compile parallelism + 2021-01-27 00f2ff5c94 api/go1.16: add go/build/constraint APIs + 2021-01-27 35334caf18 crypto/x509: remove leftover CertificateRequest field + 2021-01-27 a5a5e2c968 runtime: make sure to remove open-coded defer entries in all cases after a recover + 2021-01-27 8cfa01943a runtime: block console ctrlhandler when the signal is handled + 2021-01-27 ff9e8364c6 cmd/go: skip issue33139 when the 'cc' script command is unavailable + 2021-01-27 cd176b3615 runtime: switch runtime to libc for openbsd/arm64 + 2021-01-27 6c8fbfbdcf runtime: convert openbsd/arm64 locking to libc + 2021-01-27 5cdf0da1bf syscall: clean up mkasm related changes + 2021-01-27 210f70e298 doc/go1.16: fix closing brace in .Export format + 2021-01-27 0f797f168d math: fix typo in sqrt.go code comment + 2021-01-26 9b636feafe [dev.regabi] cmd/compile: missing last patch set for cl286013 + 2021-01-26 f7dad5eae4 [dev.regabi] cmd/compile: remove leftover code form late call lowering work + 2021-01-26 8634a234df runtime,syscall: convert syscall on openbsd/amd64 to libc + 2021-01-26 1d5e14632e os: further document limitations around naked file descriptors + 2021-01-25 5e4a0cdde3 [dev.regabi] all: merge master (bf0f7c9) into dev.regabi + 2021-01-26 cf263e9f77 os: correct names in CreateTemp and MkdirTemp doc comments + 2021-01-26 ce8b318624 net/http/fcgi: remove locking added to prevent a test-only race + 2021-01-25 bf0f7c9d78 doc/go1.16: mention os.DirFS in os section + 2021-01-25 deaf29a8a8 cmd/compile: fix order-of-assignment issue w/ defers + 2021-01-25 ad2ca26a52 doc/go1.16: mention os.DirEntry and types moved from os to io/fs + 2021-01-25 a51921fa5b doc/go1.16: mention new testing/iotest functions + 2021-01-25 e6b6d107f7 doc/go1.16: mention deprecation of io/ioutil + 2021-01-25 7eaaf28cae [dev.regabi] cmd/compile: disallow taking address of SSA'd values + 2021-01-25 96a276363b doc/go1.16: mention go/build changes + 2021-01-25 3d85c69a0b html/template: revert "avoid race when escaping updates template" + 2021-01-25 54514c6b28 cmd/go: fix TestScript/cgo_path, cgo_path_space when CC set + 2021-01-25 6f5e79f470 [dev.regabi] cmd/compile/internal: specify memory layout + 2021-01-25 cabffc199d [dev.regabi] cmd/compile/internal: add internal ABI specification + 2021-01-25 6de8443f3b doc/asm: add a section on go_asm.h, clean up go_tls.h section + 2021-01-25 6a4739ccc5 [dev.regabi] cmd/compile: enable rational constant arithmetic + 2021-01-25 be9612a832 [dev.regabi] os: disable TestDirFS until #42637 is fixed + 2021-01-25 8ee3d39838 [dev.regabi] cmd/go: workaround -race issue on ppc64le + 2021-01-25 54b251f542 lib/time, time/tzdata: update tzdata to 2021a + 2021-01-25 5a76c3d548 [dev.regabi] cmd/compile: modify abiutils for recently updated ABI + 2021-01-25 ff82cc971a os: force consistent mtime before running fstest on directory on Windows + 2021-01-25 044f937a73 doc/go1.16: fix WalkDir and Walk links + 2021-01-25 063c72f06d [dev.regabi] cmd/compile: backport changes from dev.typeparams (9456804) + 2021-01-23 b634f5d97a doc/go1.16: add crypto/x509 memory optimization + 2021-01-23 9897655c61 doc/go1.16: reword ambiguously parsable sentence + 2021-01-23 cd99385ff4 cmd/internal/obj/arm64: fix VMOVQ instruction encoding error + 2021-01-23 d05d6fab32 [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet for SSA 2 + 2021-01-23 66ee8b158f runtime: restore cgo_import_dynamic for libc.so on openbsd + 2021-01-23 48badc5fa8 [dev.regabi] cmd/compile: fix escape analysis problem with closures + 2021-01-23 51e1819a8d [dev.regabi] cmd/compile: scan body of closure in tooHairy to check for disallowed nodes + 2021-01-22 25c39e4fb5 io/ioutil: fix example test for WriteFile to allow it to run in the playground + 2021-01-22 eb21b31e48 runtime: define dummy msanmove + 2021-01-22 3a778ff50f runtime: check for g0 stack last in signal handler + 2021-01-22 a2cef9b544 cmd/go: don't lookup the path for CC when invoking cgo + 2021-01-22 7e0a81d280 [dev.regabi] all: merge master (dab3e5a) into dev.regabi + 2021-01-22 dab3e5affe runtime: switch runtime to libc for openbsd/amd64 + 2021-01-22 a1b53d85da cmd/go: add documentation for test and xtest fields output by go list + 2021-01-22 b268b60774 runtime: remove pthread_kill/pthread_self for openbsd + 2021-01-22 ec4051763d runtime: fix typo in mgcscavenge.go + 2021-01-22 7ece3a7b17 net/http: fix flaky TestDisableKeepAliveUpgrade + 2021-01-22 50cba0506f time: clarify Timer.Reset behavior on AfterFunc Timers + 2021-01-22 cf10e69f17 doc/go1.16: mention net/http.Transport.GetProxyConnectHeader + 2021-01-22 ec1b945265 doc/go1.16: mention path/filepath.WalkDir + 2021-01-22 11def3d40b doc/go1.16: mention syscall.AllThreadsSyscall + 2021-01-21 07b0235609 doc/go1.16: add notes about package-specific fs.FS changes + 2021-01-21 e2b4f1fea5 doc/go1.16: minor formatting fix + 2021-01-21 9f43a9e07b doc/go1.16: mention new debug/elf constants + 2021-01-21 3c2f11ba5b cmd/go: overwrite program name with full path + 2021-01-21 953d1feca9 all: introduce and use internal/execabs + 2021-01-21 b186e4d70d cmd/go: add test case for cgo CC setting + 2021-01-21 5a8a2265fb cmd/cgo: report exec errors a bit more clearly + 2021-01-21 46e2e2e9d9 cmd/go: pass resolved CC, GCCGO to cgo + 2021-01-21 3d40895e36 runtime: switch openbsd/arm64 to pthreads + 2021-01-21 d95ca91380 crypto/elliptic: fix P-224 field reduction + 2021-01-21 d7e71c01ad [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet for dwarf + 2021-01-21 5248f59a22 [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet for SSA + 2021-01-21 970d8b6cb2 [dev.regabi] cmd/compile: replace ir.Name map with ir.NameSet in inlining + 2021-01-21 68a4664475 [dev.regabi] cmd/compile: remove tempAssigns in walkCall1 + 2021-01-21 fd9a391cdd [dev.regabi] cmd/compile: remove CallExpr.Rargs + 2021-01-21 19a6db6b63 [dev.regabi] cmd/compile: make sure mkcall* passed non-nil init + 2021-01-21 9f036844db [dev.regabi] cmd/compile: use ir.DoChildren directly in inlining + 2021-01-21 213c3905e9 [dev.regabi] cmd/compile: use node walked flag to prevent double walk for walkSelect + 2021-01-20 1760d736f6 [dev.regabi] cmd/compile: exporting, importing, and inlining functions with OCLOSURE + 2021-01-20 ecf4ebf100 cmd/internal/moddeps: check content of all modules in GOROOT + 2021-01-20 92cb157cf3 [dev.regabi] cmd/compile: late expansion of return values + 2021-01-20 d2d155d1ae runtime: don't adjust timer pp field in timerWaiting status + 2021-01-20 803d18fc6c cmd/go: set Incomplete field on go list output if no files match embed + 2021-01-20 6e243ce71d cmd/go: have go mod vendor copy embedded files in subdirs + 2021-01-20 be28e5abc5 cmd/go: fix mod_get_fallback test + 2021-01-20 928bda4f4a runtime: convert openbsd/amd64 locking to libc + 2021-01-19 824f2d635c cmd/go: allow go fmt to complete when embedded file is missing + 2021-01-19 0575e35e50 cmd/compile: require 'go 1.16' go.mod line for //go:embed + 2021-01-19 9423d50d53 [dev.regabi] cmd/compile: use '%q' for printing rune values less than 128 + 2021-01-19 ccb2e90688 cmd/link: exit before Asmb2 if error + 2021-01-19 ca5774a5a5 embed: treat uninitialized FS as empty + 2021-01-19 d047c91a6c cmd/link,runtime: switch openbsd/amd64 to pthreads + 2021-01-19 61debffd97 runtime: factor out usesLibcall + 2021-01-19 9fed39d281 runtime: factor out mStackIsSystemAllocated + 2021-01-19 a2f825c542 [dev.regabi] cmd/compile: directly create go.map and go.track symbols + 2021-01-19 4a4212c0e5 [dev.regabi] cmd/compile: refactor Linksym creation + 2021-01-19 4f5c603c0f [dev.regabi] cmd/compile: cleanup callTargetLSym + 2021-01-18 dbab079835 runtime: free Windows event handles after last lock is dropped + 2021-01-18 5a8fbb0d2d os: do not close syscall.Stdin in TestReadStdin + 2021-01-18 422f38fb6c [dev.regabi] cmd/compile: move stack objects to liveness + 2021-01-18 6113db0bb4 [dev.regabi] cmd/compile: convert OPANIC argument to interface{} during typecheck + 2021-01-18 4c835f9169 [dev.regabi] cmd/compile: use LinksymOffsetExpr in TypePtr/ItabAddr + 2021-01-18 0ffa1ead6e [dev.regabi] cmd/compile: use *obj.LSym instead of *ir.Name for staticdata functions + 2021-01-17 7e0fa38aad [dev.regabi] cmd/compile: remove unneeded packages from ir.Pkgs + 2021-01-17 99a5db11ac [dev.regabi] cmd/compile: use LinksymOffsetExpr in walkConvInterface + 2021-01-17 87845d14f9 [dev.regabi] cmd/compile: add ir.TailCallStmt + 2021-01-17 e3027c6828 [dev.regabi] cmd/compile: fix linux-amd64-noopt builder + 2021-01-17 59ff93fe64 [dev.regabi] cmd/compile: rename NameOffsetExpr to LinksymOffsetExpr + 2021-01-17 82b9cae700 [dev.regabi] cmd/compile: change ir.NameOffsetExpr to use *obj.LSym instead of *Name + 2021-01-17 88956fc4b1 [dev.regabi] cmd/compile: stop analyze NameOffsetExpr.Name_ in escape analysis + 2021-01-17 7ce2a8383d [dev.regabi] cmd/compile: simplify stack temp initialization + 2021-01-17 ba0e8a92fa [dev.regabi] cmd/compile: refactor temp construction in walk + 2021-01-17 78e5aabcdb [dev.regabi] cmd/compile: replace Node.HasCall with walk.mayCall + 2021-01-16 6de9423445 [dev.regabi] cmd/compile: cleanup OAS2FUNC ordering + 2021-01-16 a956a0e909 [dev.regabi] cmd/compile, runtime: fix up comments/error messages from recent renames + 2021-01-16 ab3b67abfd [dev.regabi] cmd/compile: remove ONEWOBJ + 2021-01-16 c9b1445ac8 [dev.regabi] cmd/compile: remove TypeAssertExpr {Src,Dst}Type fields + 2021-01-15 682a1d2176 runtime: detect errors in DuplicateHandle + 2021-01-15 9f83418b83 cmd/link: remove GOROOT write in TestBuildForTvOS + 2021-01-15 ec9470162f cmd/compile: allow embed into any string or byte slice type + 2021-01-15 54198b04db cmd/compile: disallow embed of var inside func + 2021-01-15 b386c735e7 cmd/go: fix go generate docs + 2021-01-15 bb5075a525 syscall: remove RtlGenRandom and move it into internal/syscall + 2021-01-15 1deae0b597 os: invoke processKiller synchronously in testKillProcess + 2021-01-15 03a875137f [dev.regabi] cmd/compile: unexport reflectdata.WriteType + 2021-01-15 14537e6e54 [dev.regabi] cmd/compile: move stkobj symbol generation to SSA + 2021-01-15 ab523fc510 [dev.regabi] cmd/compile: don't promote Byval CaptureVars if Addrtaken + 2021-01-15 ff196c3e84 crypto/x509: update iOS bundled roots to version 55188.40.9 + 2021-01-15 b7a698c73f [dev.regabi] test: disable test on windows because expected contains path separators. + 2021-01-15 4be7af23f9 [dev.regabi] cmd/compile: fix ICE during ir.Dump + 2021-01-14 e125ccd10e cmd/go: in 'go mod edit', validate versions given to -retract and -exclude + 2021-01-14 eb330020dc cmd/dist, cmd/go: pass -arch for C compilation on Darwin + 2021-01-14 84e8a06f62 cmd/cgo: remove unnecessary space in cgo export header + 2021-01-14 0c86b999c3 cmd/test2json: document passing -test.paniconexit0 + 2021-01-14 9135795891 cmd/go/internal/load: report positions for embed errors + 2021-01-14 35b9c66601 [dev.regabi] cmd/compile,cmd/link: additional code review suggestions for CL 270863 + 2021-01-14 d9b79e53bb cmd/compile: fix wrong complement for arm64 floating-point comparisons + 2021-01-14 c73232d08f cmd/go/internal/load: refactor setErrorPos to PackageError.setPos + 2021-01-14 6aa28d3e06 go/build: report positions for go:embed directives + 2021-01-14 9734fd482d [dev.regabi] cmd/compile: use node walked flag to prevent double walk for walkSwitch + 2021-01-14 f97983249a [dev.regabi] cmd/compile: move more PAUTOHEAP to SSA construction + 2021-01-14 4476300425 [dev.regabi] cmd/compile: use byte for CallExpr.Use + 2021-01-14 5a5ab24689 [dev.regabi] cmd/compile: do not rely on CallExpr.Rargs for detect already walked calls + 2021-01-14 983ac4b086 [dev.regabi] cmd/compile: fix ICE when initializing blank vars + 2021-01-13 7eb31d999c cmd/go: add hints to more missing sum error messages + 2021-01-13 d6d4673728 [dev.regabi] cmd/compile: fix GOEXPERIMENT=regabi builder + 2021-01-13 c41b999ad4 [dev.regabi] cmd/compile: refactor abiutils from "gc" into new "abi" + 2021-01-13 861707a8c8 [dev.regabi] cmd/compile: added limited //go:registerparams pragma for new ABI dev + 2021-01-13 c1370e918f [dev.regabi] cmd/compile: add code to support register ABI spills around morestack calls + 2021-01-13 2abd24f3b7 [dev.regabi] test: make run.go error messages slightly more informative + 2021-01-13 9a19481acb [dev.regabi] cmd/compile: make ordering for InvertFlags more stable + 2021-01-12 d9acf6f3a3 [dev.regabi] cmd/compile: remove Func.ClosureType + 2021-01-12 41352fd401 [dev.regabi] cmd/compile: transform closures during walk + 2021-01-12 d6ad88b4db [dev.regabi] cmd/compile: compile functions before closures + 2021-01-12 432f9ffb11 [dev.regabi] cmd/compile: unindent compileFunctions + 2021-01-12 cc90e7a51e [dev.regabi] cmd/compile: always use the compile queue + 2021-01-12 cd5b74d2df [dev.regabi] cmd/compile: call NeedFuncSym in InitLSym + 2021-01-12 ba76567bc2 cmd/go/internal/modload: delete unused *mvsReqs.next method + 2021-01-12 665def2c11 encoding/asn1: document unmarshaling behavior for IMPLICIT string fields + 2021-01-12 95acd8121b [dev.regabi] cmd/compile: remove Name.Typegen + 2021-01-12 12ee55ba7b [dev.regabi] cmd/compile: stop using Vargen for import/export + 2021-01-12 b4d2a0445b [dev.regabi] cmd/compile: refactor closure var setup/teardown + 2021-01-12 f57f484053 [dev.regabi] cmd/compile: decouple escape analysis from Name.Vargen + 2021-01-11 81ea89adf3 cmd/go: fix non-script staleness checks interacting badly with GOFLAGS + 2021-01-11 759309029f doc: update editors.html for Go 1.16 + 2021-01-11 c3b4c7093a cmd/internal/objfile: don't require runtime.symtab symbol for XCOFF + 2021-01-10 7fd84c6e46 [dev.regabi] cmd/compile: remove OCLOSUREREAD + 2021-01-10 c9c26d7ffb [dev.regabi] cmd/compile: use ClosureVars for method value wrappers + 2021-01-10 950cf4d46c [dev.regabi] cmd/compile: bind closure vars during SSA constructions + 2021-01-10 8b2efa990b [dev.regabi] cmd/compile: deref PAUTOHEAPs during SSA construction + 2021-01-08 59bfc18e34 cmd/go: add hint to read 'go help vcs' to GOVCS errors + 2021-01-08 6ee9b118a2 [dev.regabi] cmd/compile: remove fmt_test code; it has outlived its usefulness + 2021-01-08 cd6f3a54e4 cmd/go: revise 'go help' documentation for modules + 2021-01-08 6192b98751 cmd/go: make hints in error messages more consistent + 2021-01-08 25886cf4bd cmd/go: preserve sums for indirect deps fetched by 'go mod download' + 2021-01-08 6250833911 runtime/metrics: mark histogram metrics as cumulative + 2021-01-08 8f6a9acbb3 runtime/metrics: remove unused StopTheWorld Description field + 2021-01-08 6598c65646 cmd/compile: fix exponential-time init-cycle reporting + 2021-01-08 fefad1dc85 test: fix timeout code for invoking compiler + 2021-01-08 6728118e0a cmd/go: pass signals forward during "go tool" + 2021-01-08 e65c543f3c go/build/constraint: add parser for build tag constraint expressions + 2021-01-08 0c5afc4fb7 testing/fstest,os: clarify racy behavior of TestFS + 2021-01-08 32afcc9436 runtime/metrics: change unit on *-by-size metrics to match bucket unit + 2021-01-08 c6513bca5a io/fs: minor corrections to Glob doc + 2021-01-08 b241938e04 [dev.regabi] cmd/compile: fix some methods error text + 2021-01-08 304f769ffc cmd/compile: don't short-circuit copies whose source is volatile + 2021-01-08 ae97717133 runtime,runtime/metrics: use explicit histogram boundaries + 2021-01-08 a9ccd2d795 go/build: skip string literal while findEmbed + 2021-01-08 d92f8add32 archive/tar: fix typo in comment + 2021-01-08 cab1202183 cmd/link: accept extra blocks in TestFallocate + 2021-01-08 ee4d32249b io/fs: minor corrections to Glob release date + 2021-01-08 54bd1ccce2 cmd: update to latest golang.org/x/tools + 2021-01-07 9ec21a8f34 Revert "reflect: support multiple keys in struct tags" + 2021-01-07 091414b5b7 io/fs: correct WalkDirFunc documentation + 2021-01-07 9b55088d6b doc/go1.16: add release note for disallowing non-ASCII import paths + 2021-01-07 fa90aaca7d cmd/compile: fix late expand_calls leaf type for OpStructSelect/OpArraySelect + 2021-01-07 7cee66d4cb cmd/go: add documentation for Embed fields in go list output + 2021-01-07 e60cffa4ca html/template: attach functions to namespace + 2021-01-07 6da2d3b7d7 cmd/link: fix typo in asm.go + 2021-01-07 df81a15819 runtime: check mips64 VDSO clock_gettime return code + 2021-01-06 4787e906cf crypto/x509: rollback new CertificateRequest fields + 2021-01-06 c9658bee93 cmd/go: make module suggestion more friendly + 2021-01-06 4c668b25c6 runtime/metrics: fix panic message for Float64Histogram + 2021-01-06 d2131704a6 net/http/httputil: fix deadlock in DumpRequestOut + 2021-01-05 3e1e13ce6d cmd/go: set cfg.BuildMod to "readonly" by default with no module root + 2021-01-05 0b0d004983 cmd/go: pass embedcfg to gccgo if supported + 2021-01-05 cb05a0aa6a [dev.regabi] cmd/compile: remove toolstash scaffolding + 2021-01-05 9821838832 [dev.regabi] cmd/compile: remove CaptureVars + 2021-01-05 fd43831f44 [dev.regabi] cmd/compile: reimplement capture analysis + 2021-01-05 fb69c67cad [dev.regabi] test: enable finalizer tests on !amd64 + 2021-01-05 1b85e7c057 cmd/go: don't scan gccgo standard library packages for imports + 2021-01-05 81f4f0e912 [dev.regabi] cmd/compile: remove race-y check in Name.Canonical + 2021-01-05 6b37b15d95 runtime: don't take allglock in tracebackothers + 2021-01-05 4a9d9adea4 [dev.regabi] cmd/compile: remove initname function + 2021-01-05 77365c5ed7 [dev.regabi] cmd/compile: add Name.Canonical and move Byval + 2021-01-05 e09783cbc0 [dev.regabi] cmd/compile: make ir.StaticValue safer + 2021-01-05 9aa950c407 [dev.regabi] cmd/compile: make ir.OuterValue safer + 2021-01-05 eb626409d1 [dev.regabi] cmd/compile: simplify CaptureVars + 2021-01-05 c28ca67a96 [dev.regabi] cmd/compile: fix ir.Dump for []*CaseClause, etc + 2021-01-04 9eef49cfa6 math/rand: fix typo in comment + 2021-01-04 b01fb2af9e testing/fstest: fix typo in error message + 2021-01-04 f24e40c14a [dev.regabi] cmd/compile: remove Name.Class_ accessors + 2021-01-04 d89705e087 [dev.regabi] cmd/compile: fix re-export of parameters + 2021-01-04 290b4154b7 [dev.regabi] cmd/compile: fix ICE due to large uint64 constants + 2021-01-04 a30fd52884 [dev.regabi] cmd/compile: use ir.NewNameAt in SubstArgTypes + 2021-01-03 8fc44cf0fa [dev.regabi] cmd/compile: remove a couple CloneName calls + 2021-01-03 907a4bfdc7 [dev.regabi] cmd/compile: fix map assignment order + 2021-01-03 f2e6dab048 [dev.regabi] cmd/compile: remove walkReturn "common case" path + 2021-01-03 d36a6bf44d [dev.regabi] cmd/compile: improve walkReturn common case + 2021-01-03 a317067d65 [dev.regabi] cmd/compile: improve ascompatee + 2021-01-03 5d80a590a2 [dev.regabi] cmd/compile: simplify walkReturn + 2021-01-03 bb1b6c95c2 [dev.regabi] cmd/compile: remove Node.{,Set}Walkdef + 2021-01-03 57c426c9a5 [dev.regabi] cmd/compile: tighten typecheckdef to *ir.Name + 2021-01-03 b1747756e3 [dev.regabi] cmd/compile: reorganize escape analysis somewhat + 2021-01-02 f2538033c0 [dev.regabi] cmd/compile: remove Nodes.Set [generated] + 2021-01-02 2f2d4b4e68 [dev.regabi] cmd/compile: remove {Ptr,Set}Init from Node interface + 2021-01-01 3dd5867605 doc: 2021 is the Year of the Gopher + 2021-01-01 1544a03198 [dev.regabi] cmd/compile: refactor redundant type conversion [generated] + 2021-01-01 7958a23ea3 [dev.regabi] cmd/compile: use *ir.Name where possible in inl.go + 2021-01-01 bfa97ba48f [dev.regabi] test: add another closure test case + 2021-01-01 67ad695416 [dev.regabi] cmd/compile: split escape analysis state + 2021-01-01 fad9a8b528 [dev.regabi] cmd/compile: simplify inlining of closures + 2021-01-01 7d55669847 [dev.regabi] cmd/compile: simplify dwarfgen.declPos + 2021-01-01 9ed1577779 [dev.regabi] cmd/compile: remove Func.ClosureEnter + 2021-01-01 ece345aa69 [dev.regabi] cmd/compile: expand documentation for Func.Closure{Vars,Enter} + 2021-01-01 6ddbc75efd [dev.regabi] cmd/compile: earlier deadcode removal + 2021-01-01 68e6fa4f68 [dev.regabi] cmd/compile: fix package-initialization order + 2021-01-01 3a4474cdfd [dev.regabi] cmd/compile: some more manual shuffling + 2021-01-01 0f1d2129c4 [dev.regabi] cmd/compile: reshuffle type-checking code [generated] + 2021-01-01 b8fd3440cd [dev.regabi] cmd/compile: report unused variables during typecheck + 2021-01-01 fd22df9905 [dev.regabi] cmd/compile: remove idempotent Name() calls [generated] + 2020-12-31 dfbcff80c6 [dev.regabi] cmd/compile: make copyExpr return *ir.Name directly + 2020-12-31 77fd81a3e6 [dev.regabi] cmd/compile: use names for keep alive variables in function call + 2020-12-31 8fe1197654 [dev.regabi] cmd/compile: remove Name.orig + 2020-12-31 477b049060 [dev.regabi] cmd/compile: fix printing of method expressions + 2020-12-31 95ce805d14 io/fs: remove darwin/arm64 special condition + 2020-12-30 20d0991b86 lib/time, time/tzdata: update tzdata to 2020f + 2020-12-30 ed301733bb misc/cgo/testcarchive: remove special flags for Darwin/ARM + 2020-12-30 0ae2e032f2 misc/cgo/test: enable TestCrossPackageTests on darwin/arm64 + 2020-12-30 178c667db2 [dev.regabi] cmd/compile: fix OSLICEARR comments + 2020-12-30 f0d99def5b [dev.regabi] cmd/compile: add newline to ir.Dump + 2020-12-30 451693af71 [dev.regabi] cmd/compile: simplify typecheckdef + 2020-12-30 0c1a899a6c [dev.regabi] cmd/compile: fix defined-pointer method call check + 2020-12-30 f9b67f76a5 [dev.regabi] cmd/compile: change ir.DoChildren to use bool result type + 2020-12-30 499851bac8 [dev.regabi] cmd/compile: generalize ir/mknode.go + 2020-12-30 82ab3d1448 [dev.regabi] cmd/compile: use *ir.Name for Decl.X + 2020-12-30 9958b7ed3e [dev.regabi] cmd/compile: unexport ir.FmtNode + 2020-12-29 780b4de16b misc/ios: fix wording for command line instructions + 2020-12-29 b4a71c95d2 doc/go1.16: reference misc/ios/README for how to build iOS programs + 2020-12-29 f83e0f6616 misc/ios: add to README how to build ios executables + 2020-12-29 f5816624cd [dev.regabi] cmd/compile: change AddrExpr.Alloc to AddrExpr.Prealloc + 2020-12-29 850aa7c60c [dev.regabi] cmd/compile: use *ir.Name instead of ir.Node for CaseClause.Var + 2020-12-29 37babc97bb [dev.regabi] cmd/compile: allow visitor visits *ir.Name + 2020-12-29 5cf3c87fa6 [dev.regabi] cmd/compile: generate case/comm clause functions in mknode.go + 2020-12-29 b3e1ec97fd [dev.regabi] cmd/compile: move new addrtaken bit back to the old name + 2020-12-29 0620c674dd [dev.regabi] cmd/compile: remove original addrtaken bit + 2020-12-29 0523d525ae [dev.regabi] cmd/compile: separate out address taken computation from typechecker + 2020-12-29 9ea272e5ec [dev.regabi] cmd/compile: simplify ir.Func somewhat + 2020-12-29 e40cb4d4ae [dev.regabi] cmd/compile: remove more unused code + 2020-12-29 6f30c95048 [dev.regabi] cmd/compile: remove unneeded indirection + 2020-12-29 171fc6f223 [dev.regabi] cmd/compile: remove workarounds for go/constant issues + 2020-12-29 33801cdc62 [dev.regabi] cmd/compile: use Ntype where possible + 2020-12-29 82ad3083f8 [dev.regabi] cmd/compile: remove typ from AssignOpStmt + 2020-12-29 e34c44a7c4 [dev.regabi] cmd/compile: refactoring typecheck arith + 2020-12-29 a5ec920160 [dev.regabi] cmd/compile: more Linksym cleanup + 2020-12-29 ec59b197d5 [dev.regabi] cmd/compile: rewrite to use linksym helpers [generated] + 2020-12-29 25c613c02d [dev.regabi] cmd/compile: add Linksym helpers + 2020-12-29 289da2b33e [dev.regabi] cmd/compile: move Node.Opt to Name + 2020-12-29 6acbae4fcc [dev.regabi] cmd/compile: address some ir TODOs + 2020-12-29 4629f6a51d [dev.regabi] cmd/compile: merge {Selector,CallPart,Method}Expr + 2020-12-29 e563715b30 [dev.regabi] cmd/compile: remove Sym.Importdef + 2020-12-29 3f370b75fb [dev.regabi] cmd/compile: cleanup //go:generate directives + 2020-12-28 4fd9455882 io/fs: fix typo in comment + 2020-12-28 07569dac4e [dev.regabi] all: merge master (1d78139) into dev.regabi + 2020-12-28 76136be027 [dev.regabi] cmd/compile: check for recursive import in ImportBody + 2020-12-28 fda7ec3a3f [dev.regabi] cmd/compile: remove Name.IsDDD, etc + 2020-12-28 098a6490b9 [dev.regabi] cmd/compile: remove Declare in makepartialcall + 2020-12-28 137f0d2e06 [dev.regabi] cmd/compile: remove unnecessary Name.Sym call + 2020-12-28 3383b5c74a [dev.regabi] cmd/compile: flatten dependency graph [generated] + 2020-12-28 f8afb8216a [dev.regabi] cmd/compile: rename CommStmt and CaseStmt [generated] + 2020-12-28 5f3bd59a0d [dev.regabi] cmd/compile: remove some unneeded code in package ir + 2020-12-28 3bdafb0d82 [dev.regabi] cmd/compile: remove CommStmt.List + 2020-12-28 2ecf52b841 [dev.regabi] cmd/compile: separate CommStmt from CaseStmt + 2020-12-28 ed9772e130 [dev.regabi] cmd/compile: add explicit file name in types generation + 2020-12-28 a59d26603f [dev.regabi] cmd/compile: use []*CaseStmt in {Select,Switch}Stmt + 2020-12-28 fbc4458c06 [dev.regabi] cmd/compile: simplify some tree traversal code + 2020-12-28 6c67677541 [dev.regabi] cmd/compile: simplify FuncName and PkgFuncName + 2020-12-28 676d794b81 [dev.regabi] cmd/compile: remove refersToCommonName + 2020-12-28 c98548e110 [dev.regabi] cmd/compile: merge ascompatee, ascompatee1, and reorder3 + 2020-12-28 4c215c4fa9 [dev.regabi] cmd/compile: simplify and optimize reorder3 + 2020-12-28 e6c973198d [dev.regabi] cmd/compile: stop mangling SelectorExpr.Sel for ODOTMETH + 2020-12-28 135ce1c485 [dev.regabi] cmd/compile: desugar OMETHEXPR into ONAME during walk + 2020-12-28 0f732f8c91 [dev.regabi] cmd/compile: minor walkExpr cleanups + 2020-12-28 0de8eafd98 [dev.regabi] cmd/compile: remove SelectorExpr.Offset field + 2020-12-28 a4f335f420 [dev.regabi] cmd/compile: always use a Field for ODOTPTR expressions + 2020-12-26 1d78139128 runtime/cgo: fix Android build with NDK 22 + 2020-12-25 2018b68a65 net/mail: don't use MDT in test + 2020-12-25 e4f293d853 [dev.regabi] cmd/compile: fix OCALLMETH desugaring + 2020-12-25 1d9a1f67d5 [dev.regabi] cmd/compile: don't emit reflect data for method types + 2020-12-25 396b6c2e7c [dev.regabi] cmd/compile: cleanup assignment typechecking + 2020-12-25 e24d2f3d05 [dev.regabi] cmd/compile: remove typ from RangeStmt + 2020-12-25 2785c691c2 [dev.regabi] cmd/compile: cleanup devirtualization docs + 2020-12-25 4b1d0fe66f [dev.regabi] cmd/compile: new devirtualization pkg [generated] + 2020-12-24 082cc8b7d9 [dev.regabi] cmd/compile: change ir.IsAssignable -> ir.IsAddressable + 2020-12-24 27b248b307 [dev.regabi] cmd/compile: separate range stmt Vars to Key, Value nodes + 2020-12-23 40818038bf [dev.regabi] cmd/compile: change CaseStmt.Vars to Var + 2020-12-23 b116404444 runtime: shift timeHistogram buckets and allow negative durations + 2020-12-23 8db7e2fecd runtime: fix allocs-by-size and frees-by-size buckets + 2020-12-23 fb96f07e1a runtime: fix nStackRoots comment about stack roots + 2020-12-23 d1502b3c72 lib/time, time/tzdata: update tzdata to 2020e + 2020-12-23 30c99cbb7a cmd/go: add the Retract field to 'go help mod edit' definition of the GoMod struct + 2020-12-23 49d0b239cb doc: fix a typo in contribute.html + 2020-12-23 9eeed291bc [dev.regabi] cmd/compile: eliminate usage of ir.Node in liveness + 2020-12-23 d1d64e4cea [dev.regabi] cmd/compile: split SliceExpr.List into separate fields + 2020-12-23 98a73030b0 cmd/go: in 'go get', promote named implicit dependencies to explicit + 2020-12-23 d19018e8f1 [dev.regabi] cmd/compile: split SliceHeaderExpr.LenCap into separate fields + 2020-12-23 53f082b0ee [dev.regabi] cmd/compile: cleanup export code further + 2020-12-23 31267f82e1 [dev.regabi] cmd/compile: simplify function/interface/struct typechecking + 2020-12-23 addade2cce [dev.regabi] cmd/compile: prefer types constructors over typecheck + 2020-12-23 18ebfb49e9 [dev.regabi] cmd/compile: cleanup noder + 2020-12-23 87a592b356 [dev.regabi] cmd/compile: cleanup import/export code + 2020-12-23 5898025026 [dev.regabi] cmd/compile: update mkbuiltin.go to use new type constructors + 2020-12-23 63c96c2ee7 [dev.regabi] cmd/compile: update mkbuiltin.go and re-enable TestBuiltin + 2020-12-23 37f138df6b [dev.regabi] cmd/compile: split out package test [generated] + 2020-12-23 3d8a3cb06b [dev.regabi] cmd/compile: split out package pkginit [generated] + 2020-12-23 3f04d964ab [dev.regabi] cmd/compile: split up walkexpr1, walkstmt [generated] + 2020-12-23 e4895ab4c0 [dev.regabi] cmd/compile: split out package walk [generated] + 2020-12-23 01fd2d05c8 [dev.regabi] cmd/compile: split out package dwarfgen [generated] + 2020-12-23 6c34d2f420 [dev.regabi] cmd/compile: split out package ssagen [generated] + 2020-12-23 de65151e50 [dev.regabi] cmd/compile: split out package reflectdata [generated] + 2020-12-23 4dfb5d91a8 [dev.regabi] cmd/compile: split out package staticdata [generated] + 2020-12-23 fbc82f03b1 [dev.regabi] cmd/compile: split out package noder [generated] + 2020-12-23 de454eef5f [dev.regabi] cmd/compile: split out package escape [generated] + 2020-12-23 071ab0a14c [dev.regabi] cmd/compile: split out package liveness [generated] + 2020-12-23 0ced54062e [dev.regabi] cmd/compile: split out package objw [generated] + 2020-12-23 575fd6ff0a [dev.regabi] cmd/compile: split out package inline [generated] + 2020-12-23 0256ba99a8 [dev.regabi] cmd/compile: split up typecheck1 [generated] + 2020-12-23 b9693d7627 [dev.regabi] cmd/compile: split out package typecheck [generated] + 2020-12-23 dac0de3748 [dev.regabi] cmd/compile: move type size calculations into package types [generated] + 2020-12-23 527a1895d6 [dev.regabi] cmd/compile: move helpers into package ir [generated] + 2020-12-23 65c4c6dfb2 [dev.regabi] cmd/compile: group known symbols, packages, names [generated] + 2020-12-23 9ee309255a [dev.regabi] cmd/compile: move helpers into package types [generated] + 2020-12-23 ead4957892 [dev.regabi] cmd/compile: move helpers into package base [generated] + 2020-12-23 440308ffd7 [dev.regabi] cmd/compile: simplify Nodes usage [generated] + 2020-12-23 f9d373720e [dev.regabi] cmd/compile: remove Left, Right etc methods [generated] + 2020-12-23 14d667341f [dev.regabi] cmd/compile: remove Node.Left etc [generated] + 2020-12-23 6f27d29be0 [dev.regabi] cmd/compile: remove ir.Nod [generated] + 2020-12-23 fd6ba1c8a2 os/signal: fix a deadlock with syscall.AllThreadsSyscall() use + 2020-12-23 69cf39089f [dev.regabi] cmd/compile: do not die in early base.FlushErrors + 2020-12-23 6d03cde88a [dev.regabi] cmd/dist: automatically bootstrap cmd subdirs + 2020-12-23 b0b0d98283 runtime: linux iscgo support for not blocking nptl signals + 2020-12-23 d1d1099c91 [dev.regabi] cmd/compile: fixes for big rewrite + 2020-12-22 223331fc0c cmd/go/internal/modload: add hint for missing implicit dependency + 2020-12-22 ec741b0447 [dev.regabi] all: merge master (c9fb4eb) into dev.regabi + 2020-12-22 acc32ea124 [dev.regabi] codereview.cfg: add config for dev.regabi + 2020-12-22 c9fb4eb0a2 cmd/link: handle grouped resource sections + 2020-12-22 c40934b33d [dev.regabi] cmd/compile: adjust one case in walkexpr + 2020-12-22 280e7fd1ee [dev.regabi] cmd/compile: only access Func method on concrete types + 2020-12-22 51ba53f5c2 [dev.regabi] cmd/compile: separate misc for gc split + 2020-12-22 572f168ed2 [dev.regabi] cmd/compile: separate various from Main + 2020-12-22 3b12c6dc08 [dev.regabi] cmd/compile: separate typecheck more cleanly + 2020-12-22 7c8f5356ab [dev.regabi] cmd/compile: separate dowidth better + 2020-12-22 c06a354bcc test: trigger SIGSEGV instead of SIGTRAP in issue11656.go + 2020-12-22 0aa9b4709a cmd/pack: r command create output file if not exist + 2020-12-22 cb28c96be8 [dev.regabi] cmd/compile,cmd/link: initial support for ABI wrappers + 2020-12-22 c8610e4700 [dev.regabi] cmd/compile: add ir.BasicLit to represent literals + 2020-12-22 3512cde10a [dev.regabi] cmd/compile: stop reusing Ntype for OSLICELIT length + 2020-12-22 2755361e6a [dev.regabi] cmd/compile: change noder.declNames to returns ir.Names + 2020-12-22 301af2cb71 [dev.regabi] runtime/race: adjust test pattern match for ABI wrapper + 2020-12-22 4d27c4c223 runtime: correct error handling in several FreeBSD syscall wrappers + 2020-12-22 9b6147120a cmd/pack: treat compiler's -linkobj output as "compiler object" + 2020-12-22 306b2451c8 [dev.regabi] runtime: fix ABI targets in runtime.panic{Index,Slice} shims + 2020-12-21 bc7e4d9257 syscall: don't generate ptrace on iOS + 2020-12-21 94cfeca0a5 [dev.regabi] cmd/compile: stop using ONONAME with Name + 2020-12-21 cb4898a77d [dev.regabi] cmd/compile: simplify declaration importing + 2020-12-21 06915ac14d [dev.regabi] cmd/compile: move itabname call out of implements + 2020-12-21 6cff874c47 runtime/metrics: add Read examples + 2020-12-21 4e8f681eff Merge "[dev.regabi] all: merge master into dev.regabi" into dev.regabi + 2020-12-21 1a523c8ab0 [dev.regabi] cmd/compile: separate nowritebarrierrec from main + 2020-12-21 e999c17022 [dev.regabi] cmd/compile: separate ssa from other phases + 2020-12-21 4836e28ac0 [dev.regabi] cmd/compile: separate noder more cleanly + 2020-12-21 85ce6ecfe3 [dev.regabi] cmd/compile: separate exportsym more cleanly + 2020-12-21 1a3b036b83 [dev.regabi] cmd/compile: collect global compilation state + 2020-12-21 2153a99914 [dev.regabi] cmd/compile: setup to move Addrconst, Patch into cmd/internal/obj + 2020-12-21 0bb0baf683 [dev.regabi] cmd/compile: cleanup for concrete types - more + 2020-12-21 ca8e17164e [dev.regabi] all: merge master into dev.regabi + 2020-12-21 8438a5779b runtime: use _exit on darwin + 2020-12-21 cb95819cf6 runtime: detect netbsd netpoll overrun in sysmon + 2020-12-21 53c984d976 runtime: skip wakep call in wakeNetPoller on Plan 9 + 2020-12-21 9abbe27710 test: skip issue11656.go on mips/mips64/ppc64 + 2020-12-20 89b44b4e2b cmd/compile: recognize reassignments involving receives + 2020-12-19 55b58018f4 test: for issue11656 try to execute trap, not call it + 2020-12-18 626cc7c02d test: permit "exponent too large" error + 2020-12-18 139cd0e12f go/build: make TestDependencies work again + 2020-12-18 2de7866470 os: remove dependency on strings package + 2020-12-18 c45313bf45 [dev.regabi] cmd/compile: remove prealloc map + 2020-12-18 ffb0cb7044 [dev.regabi] cmd/compile: remove uses of Name.Offset, Name.copy + 2020-12-18 c76be2a24e [dev.regabi] cmd/compile: add ONAMEOFFSET, delete to-be-deleted fields + 2020-12-18 4e8f1e139f [dev.regabi] cmd/compile: cleanup for concrete types - sinit + 2020-12-18 27aba22651 [dev.regabi] cmd/compile: cleanup for concrete types - walk + 2020-12-18 0b9cb63b8d [dev.regabi] cmd/compile: rename ir.Find to ir.Any and update uses + 2020-12-18 ae652a4ac9 os/signal: fix flaky tests for NotifyContext. + 2020-12-18 740851baca cmd/link: avoid use of -T when linking with lld + 2020-12-18 f1778c28a9 test: recognize and use gc build tag + 2020-12-17 8fcf318123 api/go1.16: remove crypto/tls APIs that are moved to Go 1.17 + 2020-12-17 520f3b72db crypto/tls: revert "add HandshakeContext method to Conn" + 2020-12-17 2ff33f5e44 api: promote next to go1.16 + 2020-12-17 aeedc9f804 [dev.regabi] cmd/compile: remove OSELRECV + 2020-12-17 0328c3b660 [dev.regabi] cmd/compile: use OSELRECV2 for all <-c variants + 2020-12-17 88e1415d08 [dev.regabi] cmd/compile: add type assertion in regabi test + 2020-12-17 9c384e881e [dev.regabi] cmd/compile: cleanup for concrete types - mop-up + 2020-12-17 be64c8bece [dev.regabi] cmd/compile: cleanup for concrete types - noder + 2020-12-17 5024396563 [dev.regabi] cmd/compile: cleanup for concrete types - subr + 2020-12-17 dd67b13d07 [dev.regabi] cmd/compile: cleanup for concrete types - range, select, swt + 2020-12-17 42fec2ded4 [dev.regabi] cmd/compile: cleanup for concrete types - const + 2020-12-17 389ae3d5ba [dev.regabi] cmd/compile: cleanup for concrete types - inl + 2020-12-17 5fe64298a4 [dev.regabi] cmd/compile: cleanup for concrete types - import/export + 2020-12-17 aa55d4e54b [dev.regabi] cmd/compile: cleanup for concrete types - escape + 2020-12-17 846740c17f [dev.regabi] cmd/compile: cleanup for concrete types - ssa + 2020-12-17 bf9bbbd6ed [dev.regabi] cmd/compile: cleanup for concrete types - order + 2020-12-17 4ac6a6317b [dev.regabi] cmd/compile: cleanup for concrete types - typecheck + 2020-12-17 f6efa3d4a4 [dev.regabi] cmd/compile: simplify ir.Find, replace ir.Inspect with ir.Visit + 2020-12-17 f6d2834f8f [dev.regabi] cmd/compile: limit Implicit method to nodes where it is defined + 2020-12-17 7fde0d2b50 [dev.regabi] cmd/compile: remove use of Initorder, Offset Node fields for initorder + 2020-12-17 114af2a044 [dev.regabi] cmd/compile: change Nodes to be a slice + 2020-12-17 4dfc7333f4 [dev.regabi] cmd/compile: update ir/fmt for concrete types + 2020-12-17 a997543292 [dev.regabi] cmd/compile: fix potential closure waste in Order + 2020-12-17 578fbbe3aa [dev.regabi] cmd/compile: rewrite some generic ir.Nod calls + 2020-12-17 5ae70b85c6 [dev.regabi] cmd/compile: cleanup preparing for concrete types, 2 + 2020-12-17 fa06894b36 [dev.regabi] cmd/compile: cleanup preparing for concrete types + 2020-12-17 5a4db102b2 html/template: avoid race when escaping updates template + 2020-12-16 b0f01e17f8 go/types: report error for invalid (but empty) expr switch + 2020-12-16 5abda2618b cmd/link: handle large relocation addend on darwin/arm64 + 2020-12-16 a318d56c1e cmd/link: pass arch-specific flags to external linker when testing supported flag + 2020-12-16 f4e7a6b905 cmd/internal/goobj: fix buglet in object file reader + 2020-12-16 75e16f5127 doc/go1.16: add link to reflect.StructTag + 2020-12-16 08b5091d03 net: close connection in localServer teardown + 2020-12-16 8981092d71 cmd/link: ignore SEH marking on PE objects + 2020-12-15 731bb54038 test: update for gofrontend error message changes + 2020-12-15 129bb1917b doc/go1.15: mention 1.15.3 cgo restriction on empty structs + 2020-12-15 685a322fe4 test: match gofrontend error messages + 2020-12-15 3d6467824c test: only require issue11674 errors with gc compiler + 2020-12-15 7cdc84a15b test: remove bug429 (duplicates runtime.TestSimpleDeadlock) + 2020-12-15 412dc2f4d3 test: adjust issue11371 to fit in required precision + 2020-12-15 8e2d74b705 test: only check for issue11362 error with gc + 2020-12-15 f8ac237032 test: import file name for issue19028 + 2020-12-15 a508840c67 doc/go1.16: fix path, path/filepath release notes + 2020-12-15 5046cb8a6e doc/go1.16: fix formatting in net, net/http and net/http/httputil sections + 2020-12-15 3298300ddf text/template: error on range over send channel + 2020-12-15 4c2d66f642 [dev.regabi] cmd/compile: use ir.Ident for imported identifiers + 2020-12-15 305d93ef84 [dev.regabi] cmd/compile: type check externdcl earlier + 2020-12-15 9f16620f46 [dev.regabi] cmd/compile: fix latent Sym.SetPkgDef issue + 2020-12-15 5a25a3fd1d test: recognize gofrontend error messages + 2020-12-14 fea898a4b0 [dev.regabi] cmd/compile: intercept the making of OADDR nodes + 2020-12-14 663cd862ed cmd/link: do not mark resource section as writable + 2020-12-14 48dfa2b2dc cmd/link: deal with ADDR32NB relocations the same way as ADDR32 on arm + 2020-12-14 033390d9ad cmd/link: recognize arm header of PE objects + 2020-12-14 48906a6d57 net/http/pprof: don't treat os.Args as format string in Cmdline handler + 2020-12-14 6e3cc5c56f go/types: report invalid ... in conversions + 2020-12-14 278b9a8a4a io/fs: fix package reference in FS godoc + 2020-12-14 617383377f [dev.regabi] cmd/compile: reorg generated array hash loop + 2020-12-14 d06794da4a doc/go1.16: add missing <code> tag + 2020-12-14 dea6d94a44 math/big: add test for recursive division panic + 2020-12-14 2f5b1a3974 test: make a race detector test robust to timing variations + 2020-12-14 c81343ce3a net/http: attempt deadlock fix in TestDisableKeepAliveUpgrade + 2020-12-14 828746ec57 debug/dwarf: don't try to parse addr/rnglists header + 2020-12-14 be10af7c4e test: match gofrontend error messages + 2020-12-14 89f38323fa [dev.regabi] cmd/compile: add register ABI analysis utilities + 2020-12-14 ce61ccca8f test: match gofrontend error messages + 2020-12-14 a58be734ea cmd/compile: fix incorrect shift count type with s390x rules + 2020-12-14 8ce37e4110 [dev.regabi] cmd/compile: fix noopt builder + 2020-12-14 7e17b46c58 [dev.regabi] cmd/compile/internal/types: add IsScalar query method + 2020-12-14 2b76429eb0 [dev.regabi] cmd/compile: refactor type initialization code into helper + 2020-12-14 9c5241e520 [dev.regabi] cmd/compile: remove unnecessary String methods + 2020-12-14 267975dc47 Merge branch 'master' into dev.regabi + 2020-12-14 64d8846aae cmd/go: print hint when 'go install' run without version outside module + 2020-12-14 451b6b38fd cmd/go: refactor error reporting in internal/load + 2020-12-09 63bc23b545 [dev.regabi] cmd/compile: first start towards using Ident + 2020-12-09 eae8fd519b [dev.regabi] cmd/compile: iexport debug crumbs for toolstash + 2020-12-09 837b35cc55 [dev.regabi] cmd/compile: adjust IR representations + 2020-12-09 0c49440664 [dev.regabi] cmd/compile: arrange for walkstmt, walkexpr, to return from switch cases + 2020-12-09 4090af83c5 [dev.regabi] cmd/compile: use reflection in ir.Dump + 2020-12-09 e2d278bfeb [dev.regabi] cmd/compile: two small fixes + 2020-12-08 dbf2fc8cff [dev.regabi] cmd/compile: replace many uses of ir.Node with *ir.Name + 2020-12-08 bb31c75343 [dev.regabi] cmd/compile: ir.Node is no longer an ssa.Aux + 2020-12-08 6db970e20a [dev.regabi] cmd/compile: rewrite Aux uses of ir.Node to *ir.Name [generated] + 2020-12-08 1c8943a6ad [dev.regabi] cmd/compile: introduce FwdRefAux for wrapping ir.Node as ssa.Aux + 2020-12-08 dcec658f6c [dev.regabi] cmd/compile: change LocalSlot.N to *ir.Name + 2020-12-08 1a98ab0e2d [dev.regabi] cmd/compile: add ssa.Aux tag interface for Value.Aux + 2020-12-07 63722da46b [dev.regabi] cmd/compile: fix comment + 2020-12-07 6d783e7440 [dev.regabi] cmd/compile: export all Node fields [generated] + 2020-12-07 2de0af3b1b [dev.regabi] cmd/compile: prepare mknode for rename of Func.body + 2020-12-07 724374f859 [dev.regabi] cmd/compile: rewrite stale format strings + 2020-12-07 61889ba680 [dev.regabi] cmd/compile: simplify fmtmap + 2020-12-07 6ea2b8c54c [dev.regabi] cmd/compile: clean up and document formatting + 2020-12-07 bb4a37bd93 [dev.regabi] cmd/compile: move Type, Sym printing to package types [generated] + 2020-12-07 70155cca81 [dev.regabi] cmd/compile: untangle FmtFlag, FmtMode + 2020-12-07 3904a62829 [dev.regabi] cmd/compile: remove mode.Sprintf etc in printer + 2020-12-07 fb17dfa43d [dev.regabi] cmd/compile: narrow interface between ir and types + 2020-12-07 3b25f3c150 [dev.regabi] cmd/compile: simplify Op, Node, Nodes printing + 2020-12-07 8ce2605c5b [dev.regabi] cmd/compile: untangle ir.Dump printing + 2020-12-07 158c9dd131 [dev.regabi] cmd/compile: reorganize ir/fmt.go + 2020-12-07 a79742f39a [dev.regabi] cmd/compile: remove "short" node header mode + 2020-12-07 ef5964dd6b [dev.regabi] cmd/compile: arrange for typecheck1 to end in switch + 2020-12-07 dcc640e839 [dev.regabi] test: add exhaustive test of evaluated but not used + 2020-12-07 2cec6c4a8c [dev.regabi] cmd/compile: generate Node methods using program + 2020-12-07 d90b199e9c [dev.regabi] cmd/compile: silence errors about missing blank methods + 2020-12-06 e885df2731 [dev.regabi] cmd/compile: change iexport to avoid map[ir.Node] + 2020-12-06 2d4c95565a [dev.regabi] cmd/compile: change nowritebarrierrec to use map[*ir.Func] + 2020-12-06 1b5eed8982 [dev.regabi] cmd/compile: replace NodeQueue with NameQueue + 2020-12-06 6c5967e528 [dev.regabi] cmd/compile: change NodeSet to NameSet + 2020-12-04 46b6e70e3b [dev.regabi] cmd/compile: replace ir.Node with *ir.Name in Order + 2020-12-04 b75f51c645 [dev.regabi] cmd/compile: replace ir.Node with *ir.Name in Liveness + 2020-12-04 133b03e1c3 [dev.regabi] cmd/compile: rewrite code to use DeclaredBy + 2020-12-04 d9cb84c84b [dev.regabi] cmd/compile: add SameSource, Uses, and DeclaredBy helpers + 2020-12-04 5dbd2e8e44 [dev.regabi] cmd/compile: remove DeepCopyNode interface + 2020-12-04 9ab3d854ad [dev.regabi] cmd/compile: avoid general traversal in deadcode + 2020-12-04 bb5aa2b664 [dev.regabi] cmd/compile: implement editChildren for nodes + 2020-12-04 4725c3ffd1 [dev.regabi] cmd/compile: implement doChildren for nodes + 2020-12-04 18f2df7e81 [dev.regabi] cmd/compile: implement copy for nodes + 2020-12-04 d855b30fe4 [dev.regabi] cmd/compile: use ir.EditChildren for inline rewriting + 2020-12-04 b9df26d7a8 [dev.regabi] cmd/compile: use ir.Find for "search" traversals + 2020-12-04 0d1b44c645 [dev.regabi] cmd/compile: introduce IR visitors + 2020-12-04 7fcf5b994c [dev.regabi] cmd/compile: replace inlcopy with ir.DeepCopy + 2020-12-04 989a3f5041 [dev.regabi] cmd/compile: adjustments to Copy and DeepCopy + 2020-12-04 99ecfcae31 [dev.regabi] cmd/compile: swap inlining order of if then vs else blocks + 2020-12-04 84cb51d7d7 [dev.regabi] cmd/compile: eliminate more SetOrig + 2020-12-03 351bc2f38c [dev.regabi] cmd/compile: store types.Field on {Selector,CallPart}Expr + 2020-12-03 a2058bac21 [dev.regabi] cmd/compile: add ConstExpr + 2020-12-03 beb5e05404 [dev.regabi] cmd/compile: refactoring prep for ConstExpr + 2020-12-03 7e81135be7 [dev.regabi] cmd/compile: rename addinit(n, init) to initExpr(init, n) + 2020-12-03 6e30fc10fc [dev.regabi] all: merge master (d0c0dc682c1f) into dev.regabi + 2020-12-03 59b8916d48 [dev.regabi] cmd/compile: handle OCONVNOP better in ssa + 2020-12-03 00e5727790 [dev.regabi] cmd/compile: remove okAs + 2020-12-03 5a3b6796cd [dev.regabi] cmd/compile: remove extra typ field in Name struct + 2020-12-02 64bc656aed [dev.regabi] cmd/compile: use explicit block statements for init + 2020-12-02 ecc8d15bc5 [dev.regabi] cmd/compile: delete OEMPTY + 2020-12-02 ec5f349b22 [dev.regabi] cmd/compile: merge OBLOCK and OEMPTY + 2020-12-02 c769d393de [dev.regabi] cmd/compile: add ir.NewDeclNameAt + 2020-12-02 c10b0ad628 [dev.regabi] cmd/compile: add Pkg parameter to type constructors + 2020-12-02 42e46f4ae0 [dev.regabi] cmd/compile: comment out //go:linkname warning + 2020-12-02 77a71e0057 [dev.regabi] cmd/compile: add Interface, Signature, and Struct constructors + 2020-12-02 15085f8974 [dev.regabi] cmd/compile: tweak hash bucket type descriptor + 2020-12-01 1408d26ccc [dev.regabi] cmd/compile: cleanup some leftover cruft + 2020-12-01 5ffa275f3c [dev.regabi] cmd/compile: first pass at abstracting Type + 2020-12-01 6ca23a45fe [dev.regabi] cmd/compile: only save ONAMEs on Curfn.Dcl + 2020-12-01 a17c5e2fce [dev.regabi] cmd/compile: add NewBasic and cleanup universe + 2020-12-01 f37aa5e4e2 [dev.regabi] cmd/compile: add NewNamed + 2020-12-01 63a6f08b39 [dev.regabi] cmd/compile: move setUnderlying to package types + 2020-12-01 f2311462ab [dev.regabi] cmd/compile: cleanup type-checking of defined types + 2020-12-01 2d6ff998ed [dev.regabi] cmd/compile: process //go:linknames after declarations + 2020-12-01 ecff7628ea [dev.regabi] cmd/compile: unexport Node.RawCopy + 2020-12-01 4da41fb3f8 [dev.regabi] cmd/compile: use ir.Copy instead of direct use of RawCopy + 2020-12-01 dadfc80bc1 [dev.regabi] cmd/compile: improve findTypeLoop + 2020-12-01 45f3b646d4 [dev.regabi] cmd/compile: add OSTMTEXPR Op + 2020-12-01 9a5a11adfa [dev.regabi] cmd/compile: add custom expression Node implementations + 2020-12-01 0f9f27287b [dev.regabi] cmd/compile: remove types.InitSyms + 2020-11-30 41ad4dec99 [dev.regabi] cmd/compile: fix -h + 2020-11-30 ffa68716a0 [dev.regabi] cmd/compile: add custom statement Node implementations + 2020-11-30 2bc814cd18 [dev.regabi] cmd/compile: clean up ONEW node + 2020-11-30 b7f67b75d2 [dev.regabi] cmd/compile: clean up in preparation for expression Nodes + 2020-11-30 5fc192af56 [dev.regabi] cmd/compile: clean up Order.copyExpr TODO + 2020-11-30 7c9b6b1ca2 [dev.regabi] cmd/compile: clean up in preparation for statement Nodes + 2020-11-30 c6de5d8d1f [dev.regabi] cmd/compile: simplify export data representation of nil + 2020-11-30 ae1a337809 [dev.regabi] cmd/compile: remove ODCLFIELD and ODDD ops + 2020-11-30 4e7685ef1a [dev.regabi] cmd/compile: add custom type syntax Node implementations + 2020-11-30 d40869fced [dev.regabi] cmd/compile: move gc.treecopy to ir.DeepCopy + 2020-11-30 f0001e8867 [dev.regabi] cmd/compile: add OTSLICE Op + 2020-11-30 1b84aabb01 [dev.regabi] cmd/compile: move typenod, typenodl to ir.TypeNode, ir.TypeNodeAt [generated] + 2020-11-30 e5c6463e20 [dev.regabi] cmd/compile: add ir.CallPartExpr + 2020-11-30 4eaef981b5 [dev.regabi] cmd/compile: add ir.Closure, ir.ClosureRead + 2020-11-30 e84b27bec5 [dev.regabi] cmd/compile: clean up Name and Func uses + 2020-11-30 c4bd0b7474 [dev.regabi] cmd/compile: make ir.Func the ODCLFUNC Node implementation + 2020-11-30 65ae15ac5d [dev.regabi] cmd/compile: move func code from node.go to func.go + 2020-11-30 862f638a89 [dev.regabi] cmd/compile: make ir.Name the ONAME Node implementation + 2020-11-30 f6106d195d [dev.regabi] cmd/compile: add ir.PkgName + 2020-11-30 420809ab08 [dev.regabi] cmd/compile: move name code from node.go to name.go + 2020-11-30 be3d8b40b5 [dev.regabi] cmd/compile: ir.BranchStmt, add ir.EmptyStmt, ir.LabelStmt + 2020-11-30 b09dbc6913 [dev.regabi] cmd/compile: remove SetOp(OEMPTY) calls + 2020-11-30 171787efcd [dev.regabi] cmd/compile: remove Orig, SetOrig from Node interface + 2020-11-30 79a3d5ce15 [dev.regabi] cmd/compile: setup for new Node implementations + 2020-11-30 0c65a2f317 [dev.regabi] cmd/compile: drop Node.HasOpt method + 2020-11-30 65f4ec2fae [dev.regabi] cmd/compile: cleanup label handling + 2020-11-25 88e33f6ecb [dev.regabi] cmd/compile: fix latent import/export issue with break/continue + 2020-11-25 40f5bc4d55 [dev.regabi] merge master 4481ad6eb6 into dev.regabi + 2020-11-25 41f3af9d04 [dev.regabi] cmd/compile: replace *Node type with an interface Node [generated] + 2020-11-25 4d0d9c2c5c [dev.regabi] cmd/compile: introduce ir.INode interface for *ir.Node + 2020-11-25 c26aead50c [dev.regabi] cmd/compile: convert types.Node (a pointer) to types.IRNode (an interface) + 2020-11-25 acb4d1cef1 [dev.regabi] cmd/compile: use Node getters and setters [generated] + 2020-11-25 41ab6689ed [dev.regabi] cmd/compile: rewrite a few ++/--/+=/-= to prep for getters/setters [generated] + 2020-11-25 048debb224 [dev.regabi] cmd/compile: remove gc ↔ ssa cycle hacks + 2020-11-25 84e2bd611f [dev.regabi] cmd/compile: introduce cmd/compile/internal/ir [generated] + 2020-11-25 331b8b4797 [dev.regabi] cmd/compile: move okforconst into its own declaration + 2020-11-25 26b66fd60b [dev.regabi] cmd/compile: introduce cmd/compile/internal/base [generated] + 2020-11-25 eb3086e5a8 [dev.regabi] cmd/compile: finish cleanup of Debug parsing + 2020-11-25 3c240f5d17 [dev.regabi] cmd/compile: clean up debug flag (-d) handling [generated] + 2020-11-25 756661c82a [dev.regabi] cmd/compile: finish cleanup of Flag initialization + 2020-11-25 259fd8adbb [dev.regabi] cmd/compile: fix reporting of overflow + 2020-11-25 18573aea3c [dev.regabi] cmd/compile: clean up flag handling [generated] + 2020-11-25 6e583d65ab [dev.regabi] cmd/compile: simplify fmt handling of Nodes + 2020-11-25 d166ef6876 [dev.regabi] cmd/compile: add Node field getters and setters + 2020-11-25 9262909764 [dev.regabi] cmd/compile: rewrite problematic use of Node fields + 2020-11-25 9e0e43d84d [dev.regabi] cmd/compile: remove uses of dummy + 2020-11-25 4a6b4fd139 [dev.regabi] add FatalfAt and fix Fatalf docs + 2020-11-25 484449c641 [dev.regabi] cmd/compile: remove file mistakenly added by CL 272248 + 2020-11-25 7d72951229 [dev.regabi] cmd/compile: replace Val with go/constant.Value + 2020-11-24 6826287c6b [dev.regabi] cmd/compile: replace evconst with non-mutating version + 2020-11-24 c22bc745c3 [dev.regabi] cmd/compile: delete n.List after collapsing OADDSTR to OLITERAL + 2020-11-24 ee6132a698 [dev.regabi] cmd/compile: introduce OMETHEXPR instead of overloading ONAME + 2020-11-24 4f9d54e41d [dev.regabi] cmd/compile: add OMETHEXPR + 2020-11-24 fd11a32c92 [dev.regabi] cmd/compile: clean up Node.Func + 2020-11-24 8e2106327c [dev.regabi] cmd/compile: clean up tests to know less about Node + 2020-11-24 742c05e3bc [dev.regabi] cmd/compile: prep refactoring for switching to go/constant + 2020-11-24 015423a15b [dev.regabi] strconv: add to bootstrap packages + 2020-11-24 c767d73227 [dev.regabi] cmd/compile: remove CTRUNE + 2020-11-24 6dae48fb0b [dev.regabi] cmd/compile: refactor type/value assertions + 2020-11-24 88a9e2f9ad [dev.regabi] cmd/compile: replace CTNIL with ONIL + 2020-11-24 4af2decf30 [dev.regabi] cmd/compile: add (unused) ONIL constant + 2020-11-24 668e3a598f [dev.regabi] cmd/compile: cleanup type switch typechecking + 2020-11-24 96f3fb7244 [dev.regabi] go/constant: avoid heap allocations in match + 2020-11-24 1abb12fc97 [dev.regabi] go/constant: optimize BitLen + 2020-11-24 228b732ad9 [dev.regabi] cmd/compile: prepare for package ir + 2020-11-24 e37597f7f0 [dev.regabi] cmd/compile: rename a few 'base' identifiers + 2020-11-24 357c576878 [dev.regabi] cmd/compile: clean up error API + 2020-11-24 5fd949e4bd [dev.regabi] cmd/compile: initialize importMap lazily + 2020-11-24 7b144ed4f7 [dev.regabi] cmd/compile: rewrite concurrentFlagOk to be clearer + 2020-11-24 c754f25241 [dev.regabi] cmd/compile/internal/types: remove Func.Nname + 2020-11-24 c50c7a8c06 [dev.regabi] cmd/compile/internal/gc: refactor to use stop using Func.Nname + 2020-11-24 d5928847de [dev.regabi] cmd/compile/internal/gc: prep for Func.Nname removal refactoring + 2020-11-24 b30c7a8044 [dev.regabi] cmd/compile/internal/gc: add MethodName for getting referenced method + 2020-11-24 e1047302bd [dev.regabi] cmd/compile/internal/types: add pos/sym/typ params to NewField Change-Id: I9c6085171cb95684cc2c71879b915fa650c31dab
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/asm.s5
-rw-r--r--src/runtime/asm_386.s92
-rw-r--r--src/runtime/asm_amd64.s282
-rw-r--r--src/runtime/asm_arm.s117
-rw-r--r--src/runtime/asm_arm64.s107
-rw-r--r--src/runtime/asm_mips64x.s88
-rw-r--r--src/runtime/asm_mipsx.s81
-rw-r--r--src/runtime/asm_ppc64x.s84
-rw-r--r--src/runtime/asm_riscv64.s84
-rw-r--r--src/runtime/asm_s390x.s82
-rw-r--r--src/runtime/asm_wasm.s38
-rw-r--r--src/runtime/cgo/gcc_amd64.S7
-rw-r--r--src/runtime/cgo/gcc_darwin_amd64.c11
-rw-r--r--src/runtime/cgo/gcc_dragonfly_amd64.c7
-rw-r--r--src/runtime/cgo/gcc_freebsd_amd64.c7
-rw-r--r--src/runtime/cgo/gcc_linux_386.c2
-rw-r--r--src/runtime/cgo/gcc_linux_amd64.c9
-rw-r--r--src/runtime/cgo/gcc_linux_arm.c2
-rw-r--r--src/runtime/cgo/gcc_linux_arm64.c2
-rw-r--r--src/runtime/cgo/gcc_netbsd_amd64.c7
-rw-r--r--src/runtime/cgo/gcc_openbsd_amd64.c7
-rw-r--r--src/runtime/cgo/gcc_solaris_amd64.c7
-rw-r--r--src/runtime/cgo/gcc_windows_amd64.c10
-rw-r--r--src/runtime/cgo/libcgo.h2
-rw-r--r--src/runtime/cgo/linux_syscall.c2
-rw-r--r--src/runtime/cgocall.go9
-rw-r--r--src/runtime/cpuprof.go11
-rw-r--r--src/runtime/crash_test.go12
-rw-r--r--src/runtime/defer_test.go28
-rw-r--r--src/runtime/defs_freebsd_386.go9
-rw-r--r--src/runtime/defs_freebsd_amd64.go9
-rw-r--r--src/runtime/defs_freebsd_arm.go9
-rw-r--r--src/runtime/defs_freebsd_arm64.go9
-rw-r--r--src/runtime/defs_openbsd.go14
-rw-r--r--src/runtime/defs_openbsd_amd64.go14
-rw-r--r--src/runtime/defs_openbsd_arm64.go14
-rw-r--r--src/runtime/defs_windows.go128
-rw-r--r--src/runtime/defs_windows_386.go84
-rw-r--r--src/runtime/defs_windows_amd64.go87
-rw-r--r--src/runtime/defs_windows_arm.go85
-rw-r--r--src/runtime/defs_windows_arm64.go83
-rw-r--r--src/runtime/duff_amd64.s128
-rw-r--r--src/runtime/export_test.go6
-rw-r--r--src/runtime/extern.go4
-rw-r--r--src/runtime/histogram.go62
-rw-r--r--src/runtime/histogram_test.go22
-rw-r--r--src/runtime/internal/sys/arch.go34
-rw-r--r--src/runtime/internal/sys/arch_386.go13
-rw-r--r--src/runtime/internal/sys/arch_amd64.go13
-rw-r--r--src/runtime/internal/sys/arch_arm.go13
-rw-r--r--src/runtime/internal/sys/arch_arm64.go13
-rw-r--r--src/runtime/internal/sys/arch_mips.go13
-rw-r--r--src/runtime/internal/sys/arch_mips64.go13
-rw-r--r--src/runtime/internal/sys/arch_mips64le.go13
-rw-r--r--src/runtime/internal/sys/arch_mipsle.go13
-rw-r--r--src/runtime/internal/sys/arch_ppc64.go13
-rw-r--r--src/runtime/internal/sys/arch_ppc64le.go13
-rw-r--r--src/runtime/internal/sys/arch_riscv64.go15
-rw-r--r--src/runtime/internal/sys/arch_s390x.go13
-rw-r--r--src/runtime/internal/sys/arch_wasm.go13
-rw-r--r--src/runtime/internal/sys/stubs.go16
-rw-r--r--src/runtime/mbarrier.go10
-rw-r--r--src/runtime/memclr_386.s2
-rw-r--r--src/runtime/memclr_amd64.s2
-rw-r--r--src/runtime/memclr_arm.s3
-rw-r--r--src/runtime/memclr_arm64.s3
-rw-r--r--src/runtime/memclr_mips64x.s2
-rw-r--r--src/runtime/memclr_mipsx.s2
-rw-r--r--src/runtime/memclr_plan9_386.s2
-rw-r--r--src/runtime/memclr_plan9_amd64.s2
-rw-r--r--src/runtime/memclr_ppc64x.s2
-rw-r--r--src/runtime/memclr_riscv64.s2
-rw-r--r--src/runtime/memclr_s390x.s2
-rw-r--r--src/runtime/memclr_wasm.s2
-rw-r--r--src/runtime/metrics.go53
-rw-r--r--src/runtime/metrics/description.go16
-rw-r--r--src/runtime/metrics/doc.go9
-rw-r--r--src/runtime/metrics/example_test.go96
-rw-r--r--src/runtime/metrics/histogram.go29
-rw-r--r--src/runtime/metrics/sample.go8
-rw-r--r--src/runtime/metrics/value.go4
-rw-r--r--src/runtime/metrics_test.go38
-rw-r--r--src/runtime/mfinal.go7
-rw-r--r--src/runtime/mgcmark.go4
-rw-r--r--src/runtime/mgcscavenge.go2
-rw-r--r--src/runtime/mkduff.go14
-rw-r--r--src/runtime/mmap.go11
-rw-r--r--src/runtime/msan0.go9
-rw-r--r--src/runtime/os2_aix.go46
-rw-r--r--src/runtime/os3_plan9.go4
-rw-r--r--src/runtime/os3_solaris.go25
-rw-r--r--src/runtime/os_aix.go5
-rw-r--r--src/runtime/os_darwin.go10
-rw-r--r--src/runtime/os_dragonfly.go10
-rw-r--r--src/runtime/os_freebsd.go13
-rw-r--r--src/runtime/os_js.go17
-rw-r--r--src/runtime/os_linux.go28
-rw-r--r--src/runtime/os_netbsd.go10
-rw-r--r--src/runtime/os_openbsd.go90
-rw-r--r--src/runtime/os_openbsd_libc.go58
-rw-r--r--src/runtime/os_openbsd_syscall.go46
-rw-r--r--src/runtime/os_openbsd_syscall1.go20
-rw-r--r--src/runtime/os_openbsd_syscall2.go100
-rw-r--r--src/runtime/os_plan9.go17
-rw-r--r--src/runtime/os_windows.go139
-rw-r--r--src/runtime/os_windows_arm64.go14
-rw-r--r--src/runtime/panic.go76
-rw-r--r--src/runtime/pprof/pprof_test.go24
-rw-r--r--src/runtime/print.go26
-rw-r--r--src/runtime/proc.go265
-rw-r--r--src/runtime/race/output_test.go87
-rw-r--r--src/runtime/race_amd64.s26
-rw-r--r--src/runtime/rt0_windows_arm64.s12
-rw-r--r--src/runtime/runtime2.go16
-rw-r--r--src/runtime/runtime_test.go4
-rw-r--r--src/runtime/signal_amd64.go7
-rw-r--r--src/runtime/signal_arm64.go2
-rw-r--r--src/runtime/signal_openbsd.go2
-rw-r--r--src/runtime/signal_unix.go49
-rw-r--r--src/runtime/signal_windows.go63
-rw-r--r--src/runtime/signal_windows_test.go64
-rw-r--r--src/runtime/sigqueue.go34
-rw-r--r--src/runtime/sigqueue_plan9.go7
-rw-r--r--src/runtime/stack.go6
-rw-r--r--src/runtime/stubs.go70
-rw-r--r--src/runtime/stubs2.go12
-rw-r--r--src/runtime/stubs3.go7
-rw-r--r--src/runtime/stubs_386.go3
-rw-r--r--src/runtime/stubs_amd64.go5
-rw-r--r--src/runtime/stubs_arm.go5
-rw-r--r--src/runtime/stubs_arm64.go7
-rw-r--r--src/runtime/stubs_mips64x.go5
-rw-r--r--src/runtime/symtab.go49
-rw-r--r--src/runtime/sys_darwin.go52
-rw-r--r--src/runtime/sys_darwin_amd64.s96
-rw-r--r--src/runtime/sys_freebsd_386.s6
-rw-r--r--src/runtime/sys_freebsd_amd64.s6
-rw-r--r--src/runtime/sys_freebsd_arm.s3
-rw-r--r--src/runtime/sys_freebsd_arm64.s6
-rw-r--r--src/runtime/sys_libc.go53
-rw-r--r--src/runtime/sys_linux_amd64.s17
-rw-r--r--src/runtime/sys_linux_mips64x.s14
-rw-r--r--src/runtime/sys_openbsd.go60
-rw-r--r--src/runtime/sys_openbsd1.go39
-rw-r--r--src/runtime/sys_openbsd2.go256
-rw-r--r--src/runtime/sys_openbsd3.go113
-rw-r--r--src/runtime/sys_openbsd_amd64.s1020
-rw-r--r--src/runtime/sys_openbsd_arm64.s920
-rw-r--r--src/runtime/sys_wasm.go6
-rw-r--r--src/runtime/sys_windows_386.s98
-rw-r--r--src/runtime/sys_windows_amd64.s91
-rw-r--r--src/runtime/sys_windows_arm.s204
-rw-r--r--src/runtime/sys_windows_arm64.s579
-rw-r--r--src/runtime/sys_x86.go6
-rw-r--r--src/runtime/syscall2_solaris.go2
-rw-r--r--src/runtime/syscall_windows.go15
-rw-r--r--src/runtime/testdata/testprog/deadlock.go39
-rw-r--r--src/runtime/testdata/testwinsignal/main.go19
-rw-r--r--src/runtime/textflag.h2
-rw-r--r--src/runtime/time.go6
-rw-r--r--src/runtime/timestub2.go5
-rw-r--r--src/runtime/tls_arm64.h12
-rw-r--r--src/runtime/tls_arm64.s12
-rw-r--r--src/runtime/traceback.go134
-rw-r--r--src/runtime/type.go2
-rw-r--r--src/runtime/wincallback.go29
-rw-r--r--src/runtime/zcallback_windows_arm64.s4012
167 files changed, 9239 insertions, 2806 deletions
diff --git a/src/runtime/asm.s b/src/runtime/asm.s
index 27d8df9e06..72c744925d 100644
--- a/src/runtime/asm.s
+++ b/src/runtime/asm.s
@@ -11,3 +11,8 @@
DATA runtime·no_pointers_stackmap+0x00(SB)/4, $2
DATA runtime·no_pointers_stackmap+0x04(SB)/4, $0
GLOBL runtime·no_pointers_stackmap(SB),RODATA, $8
+
+#ifndef GOARCH_amd64
+TEXT ·sigpanic0<ABIInternal>(SB),NOSPLIT,$0-0
+ JMP ·sigpanic<ABIInternal>(SB)
+#endif
diff --git a/src/runtime/asm_386.s b/src/runtime/asm_386.s
index fa3b1be339..5b0852f780 100644
--- a/src/runtime/asm_386.s
+++ b/src/runtime/asm_386.s
@@ -89,7 +89,7 @@ GLOBL _rt0_386_lib_argc<>(SB),NOPTR, $4
DATA _rt0_386_lib_argv<>(SB)/4, $0
GLOBL _rt0_386_lib_argv<>(SB),NOPTR, $4
-TEXT runtime·rt0_go(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·rt0_go(SB),NOSPLIT|NOFRAME|TOPFRAME,$0
// Copy arguments forward on an even stack.
// Users of this function jump to it, they don't call it.
MOVL 0(SP), AX
@@ -269,35 +269,23 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0
FLDCW runtime·controlWord64(SB)
RET
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ CALL runtime·mstart0(SB)
+ RET // not reached
+
/*
* go-routine
*/
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT, $0-4
- MOVL buf+0(FP), AX // gobuf
- LEAL buf+0(FP), BX // caller's SP
- MOVL BX, gobuf_sp(AX)
- MOVL 0(SP), BX // caller's PC
- MOVL BX, gobuf_pc(AX)
- MOVL $0, gobuf_ret(AX)
- // Assert ctxt is zero. See func save.
- MOVL gobuf_ctxt(AX), BX
- TESTL BX, BX
- JZ 2(PC)
- CALL runtime·badctxt(SB)
- get_tls(CX)
- MOVL g(CX), BX
- MOVL BX, gobuf_g(AX)
- RET
-
// void gogo(Gobuf*)
// restore state from Gobuf; longjmp
-TEXT runtime·gogo(SB), NOSPLIT, $8-4
+TEXT runtime·gogo(SB), NOSPLIT, $0-4
MOVL buf+0(FP), BX // gobuf
MOVL gobuf_g(BX), DX
MOVL 0(DX), CX // make sure g != nil
+ JMP gogo<>(SB)
+
+TEXT gogo<>(SB), NOSPLIT, $0
get_tls(CX)
MOVL DX, g(CX)
MOVL gobuf_sp(BX), SP // restore SP
@@ -322,7 +310,6 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-4
MOVL BX, (g_sched+gobuf_pc)(AX)
LEAL fn+0(FP), BX // caller's SP
MOVL BX, (g_sched+gobuf_sp)(AX)
- MOVL AX, (g_sched+gobuf_g)(AX)
// switch to m->g0 & its stack, call fn
MOVL g(DX), BX
@@ -371,18 +358,12 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-4
// switch stacks
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
- MOVL $runtime·systemstack_switch(SB), (g_sched+gobuf_pc)(AX)
- MOVL SP, (g_sched+gobuf_sp)(AX)
- MOVL AX, (g_sched+gobuf_g)(AX)
+ CALL gosave_systemstack_switch<>(SB)
// switch to g0
get_tls(CX)
MOVL DX, g(CX)
MOVL (g_sched+gobuf_sp)(DX), BX
- // make it look like mstart called systemstack on g0, to stop traceback
- SUBL $4, BX
- MOVL $runtime·mstart(SB), DX
- MOVL DX, 0(BX)
MOVL BX, SP
// call target function
@@ -457,7 +438,6 @@ TEXT runtime·morestack(SB),NOSPLIT,$0-0
// Set g->sched to context in f.
MOVL 0(SP), AX // f's PC
MOVL AX, (g_sched+gobuf_pc)(SI)
- MOVL SI, (g_sched+gobuf_g)(SI)
LEAL 4(SP), AX // f's SP
MOVL AX, (g_sched+gobuf_sp)(SI)
MOVL DX, (g_sched+gobuf_ctxt)(SI)
@@ -477,7 +457,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
JMP runtime·morestack(SB)
// reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
@@ -489,8 +469,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
JMP AX
// Note: can't just "JMP NAME(SB)" - bad inlining results.
-TEXT ·reflectcall(SB), NOSPLIT, $0-20
- MOVL argsize+12(FP), CX
+TEXT ·reflectcall(SB), NOSPLIT, $0-28
+ MOVL frameSize+20(FP), CX
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64)
@@ -522,11 +502,11 @@ TEXT ·reflectcall(SB), NOSPLIT, $0-20
JMP AX
#define CALLFN(NAME,MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-28; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
- MOVL argptr+8(FP), SI; \
- MOVL argsize+12(FP), CX; \
+ MOVL stackArgs+8(FP), SI; \
+ MOVL stackArgsSize+12(FP), CX; \
MOVL SP, DI; \
REP;MOVSB; \
/* call function */ \
@@ -535,10 +515,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
PCDATA $PCDATA_StackMapIndex, $0; \
CALL AX; \
/* copy return values back */ \
- MOVL argtype+0(FP), DX; \
- MOVL argptr+8(FP), DI; \
- MOVL argsize+12(FP), CX; \
- MOVL retoffset+16(FP), BX; \
+ MOVL stackArgsType+0(FP), DX; \
+ MOVL stackArgs+8(FP), DI; \
+ MOVL stackArgsSize+12(FP), CX; \
+ MOVL stackRetOffset+16(FP), BX; \
MOVL SP, SI; \
ADDL BX, DI; \
ADDL BX, SI; \
@@ -550,11 +530,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $16-0
+TEXT callRet<>(SB), NOSPLIT, $20-0
MOVL DX, 0(SP)
MOVL DI, 4(SP)
MOVL SI, 8(SP)
MOVL CX, 12(SP)
+ MOVL $0, 16(SP)
CALL runtime·reflectcallmove(SB)
RET
@@ -619,15 +600,18 @@ TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8
MOVL 0(DX), BX
JMP BX // but first run the deferred function
-// Save state of caller into g->sched.
-TEXT gosave<>(SB),NOSPLIT,$0
+// Save state of caller into g->sched,
+// but using fake PC from systemstack_switch.
+// Must only be called from functions with no locals ($0)
+// or else unwinding from systemstack_switch is incorrect.
+TEXT gosave_systemstack_switch<>(SB),NOSPLIT,$0
PUSHL AX
PUSHL BX
get_tls(BX)
MOVL g(BX), BX
LEAL arg+0(FP), AX
MOVL AX, (g_sched+gobuf_sp)(BX)
- MOVL -4(AX), AX
+ MOVL $runtime·systemstack_switch(SB), AX
MOVL AX, (g_sched+gobuf_pc)(BX)
MOVL $0, (g_sched+gobuf_ret)(BX)
// Assert ctxt is zero. See func save.
@@ -639,6 +623,22 @@ TEXT gosave<>(SB),NOSPLIT,$0
POPL AX
RET
+// func asmcgocall_no_g(fn, arg unsafe.Pointer)
+// Call fn(arg) aligned appropriately for the gcc ABI.
+// Called on a system stack, and there may be no g yet (during needm).
+TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-8
+ MOVL fn+0(FP), AX
+ MOVL arg+4(FP), BX
+ MOVL SP, DX
+ SUBL $32, SP
+ ANDL $~15, SP // alignment, perhaps unnecessary
+ MOVL DX, 8(SP) // save old SP
+ MOVL BX, 0(SP) // first argument in x86-32 ABI
+ CALL AX
+ MOVL 8(SP), DX
+ MOVL DX, SP
+ RET
+
// func asmcgocall(fn, arg unsafe.Pointer) int32
// Call fn(arg) on the scheduler stack,
// aligned appropriately for the gcc ABI.
@@ -663,7 +663,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-12
JEQ noswitch
CMPL DI, m_gsignal(BP)
JEQ noswitch
- CALL gosave<>(SB)
+ CALL gosave_systemstack_switch<>(SB)
get_tls(CX)
MOVL SI, g(CX)
MOVL (g_sched+gobuf_sp)(SI), SP
@@ -1311,7 +1311,7 @@ TEXT _cgo_topofstack(SB),NOSPLIT,$0
// The top-most function running on a goroutine
// returns to goexit+PCQuantum.
-TEXT runtime·goexit(SB),NOSPLIT,$0-0
+TEXT runtime·goexit(SB),NOSPLIT|TOPFRAME,$0-0
BYTE $0x90 // NOP
CALL runtime·goexit1(SB) // does not return
// traceback from goexit1 must hit code range of goexit
diff --git a/src/runtime/asm_amd64.s b/src/runtime/asm_amd64.s
index 196252e1dd..a68dc72ae5 100644
--- a/src/runtime/asm_amd64.s
+++ b/src/runtime/asm_amd64.s
@@ -84,9 +84,7 @@ GLOBL _rt0_amd64_lib_argc<>(SB),NOPTR, $8
DATA _rt0_amd64_lib_argv<>(SB)/8, $0
GLOBL _rt0_amd64_lib_argv<>(SB),NOPTR, $8
-// Defined as ABIInternal since it does not use the stack-based Go ABI (and
-// in addition there are no calls to this entry point from Go code).
-TEXT runtime·rt0_go<ABIInternal>(SB),NOSPLIT,$0
+TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
// copy arguments forward on an even stack
MOVQ DI, AX // argc
MOVQ SI, BX // argv
@@ -181,6 +179,10 @@ needtls:
// skip TLS setup on Darwin
JMP ok
#endif
+#ifdef GOOS_openbsd
+ // skip TLS setup on OpenBSD
+ JMP ok
+#endif
LEAQ runtime·m0+m_tls(SB), DI
CALL runtime·settls(SB)
@@ -248,38 +250,26 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0
// No per-thread init.
RET
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ CALL runtime·mstart0(SB)
+ RET // not reached
+
/*
* go-routine
*/
-// func gosave(buf *gobuf)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT, $0-8
- MOVQ buf+0(FP), AX // gobuf
- LEAQ buf+0(FP), BX // caller's SP
- MOVQ BX, gobuf_sp(AX)
- MOVQ 0(SP), BX // caller's PC
- MOVQ BX, gobuf_pc(AX)
- MOVQ $0, gobuf_ret(AX)
- MOVQ BP, gobuf_bp(AX)
- // Assert ctxt is zero. See func save.
- MOVQ gobuf_ctxt(AX), BX
- TESTQ BX, BX
- JZ 2(PC)
- CALL runtime·badctxt(SB)
- get_tls(CX)
- MOVQ g(CX), BX
- MOVQ BX, gobuf_g(AX)
- RET
-
// func gogo(buf *gobuf)
// restore state from Gobuf; longjmp
-TEXT runtime·gogo(SB), NOSPLIT, $16-8
+TEXT runtime·gogo(SB), NOSPLIT, $0-8
MOVQ buf+0(FP), BX // gobuf
MOVQ gobuf_g(BX), DX
MOVQ 0(DX), CX // make sure g != nil
+ JMP gogo<>(SB)
+
+TEXT gogo<>(SB), NOSPLIT, $0
get_tls(CX)
MOVQ DX, g(CX)
+ MOVQ DX, R14 // set the g register
MOVQ gobuf_sp(BX), SP // restore SP
MOVQ gobuf_ret(BX), AX
MOVQ gobuf_ctxt(BX), DX
@@ -304,7 +294,6 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8
MOVQ BX, (g_sched+gobuf_pc)(AX)
LEAQ fn+0(FP), BX // caller's SP
MOVQ BX, (g_sched+gobuf_sp)(AX)
- MOVQ AX, (g_sched+gobuf_g)(AX)
MOVQ BP, (g_sched+gobuf_bp)(AX)
// switch to m->g0 & its stack, call fn
@@ -316,6 +305,7 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8
MOVQ $runtime·badmcall(SB), AX
JMP AX
MOVQ SI, g(CX) // g = m->g0
+ MOVQ SI, R14 // set the g register
MOVQ (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
PUSHQ AX
MOVQ DI, DX
@@ -354,19 +344,12 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8
// switch stacks
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
- MOVQ $runtime·systemstack_switch(SB), SI
- MOVQ SI, (g_sched+gobuf_pc)(AX)
- MOVQ SP, (g_sched+gobuf_sp)(AX)
- MOVQ AX, (g_sched+gobuf_g)(AX)
- MOVQ BP, (g_sched+gobuf_bp)(AX)
+ CALL gosave_systemstack_switch<>(SB)
// switch to g0
MOVQ DX, g(CX)
+ MOVQ DX, R14 // set the g register
MOVQ (g_sched+gobuf_sp)(DX), BX
- // make it look like mstart called systemstack on g0, to stop traceback
- SUBQ $8, BX
- MOVQ $runtime·mstart(SB), DX
- MOVQ DX, 0(BX)
MOVQ BX, SP
// call target function
@@ -441,7 +424,6 @@ TEXT runtime·morestack(SB),NOSPLIT,$0-0
// Set g->sched to context in f.
MOVQ 0(SP), AX // f's PC
MOVQ AX, (g_sched+gobuf_pc)(SI)
- MOVQ SI, (g_sched+gobuf_g)(SI)
LEAQ 8(SP), AX // f's SP
MOVQ AX, (g_sched+gobuf_sp)(SI)
MOVQ BP, (g_sched+gobuf_bp)(SI)
@@ -460,8 +442,77 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
MOVL $0, DX
JMP runtime·morestack(SB)
+// REFLECTCALL_USE_REGABI is not defined. It must be defined in conjunction with the
+// register constants in the internal/abi package.
+
+#ifdef REFLECTCALL_USE_REGABI
+// spillArgs stores return values from registers to a *internal/abi.RegArgs in R12.
+TEXT spillArgs<>(SB),NOSPLIT,$0-0
+ MOVQ AX, 0(R12)
+ MOVQ BX, 8(R12)
+ MOVQ CX, 16(R12)
+ MOVQ DI, 24(R12)
+ MOVQ SI, 32(R12)
+ MOVQ R8, 40(R12)
+ MOVQ R9, 48(R12)
+ MOVQ R10, 56(R12)
+ MOVQ R11, 64(R12)
+ MOVQ X0, 72(R12)
+ MOVQ X1, 80(R12)
+ MOVQ X2, 88(R12)
+ MOVQ X3, 96(R12)
+ MOVQ X4, 104(R12)
+ MOVQ X5, 112(R12)
+ MOVQ X6, 120(R12)
+ MOVQ X7, 128(R12)
+ MOVQ X8, 136(R12)
+ MOVQ X9, 144(R12)
+ MOVQ X10, 152(R12)
+ MOVQ X11, 160(R12)
+ MOVQ X12, 168(R12)
+ MOVQ X13, 176(R12)
+ MOVQ X14, 184(R12)
+ RET
+
+// unspillArgs loads args into registers from a *internal/abi.RegArgs in R12.
+TEXT unspillArgs<>(SB),NOSPLIT,$0-0
+ MOVQ 0(R12), AX
+ MOVQ 8(R12), BX
+ MOVQ 16(R12), CX
+ MOVQ 24(R12), DI
+ MOVQ 32(R12), SI
+ MOVQ 40(R12), R8
+ MOVQ 48(R12), R9
+ MOVQ 56(R12), R10
+ MOVQ 64(R12), R11
+ MOVQ 72(R12), X0
+ MOVQ 80(R12), X1
+ MOVQ 88(R12), X2
+ MOVQ 96(R12), X3
+ MOVQ 104(R12), X4
+ MOVQ 112(R12), X5
+ MOVQ 120(R12), X6
+ MOVQ 128(R12), X7
+ MOVQ 136(R12), X8
+ MOVQ 144(R12), X9
+ MOVQ 152(R12), X10
+ MOVQ 160(R12), X11
+ MOVQ 168(R12), X12
+ MOVQ 176(R12), X13
+ MOVQ 184(R12), X14
+ RET
+#else
+// spillArgs stores return values from registers to a pointer in R12.
+TEXT spillArgs<>(SB),NOSPLIT,$0-0
+ RET
+
+// unspillArgs loads args into registers from a pointer in R12.
+TEXT unspillArgs<>(SB),NOSPLIT,$0-0
+ RET
+#endif
+
// reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
@@ -473,8 +524,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0
JMP AX
// Note: can't just "JMP NAME(SB)" - bad inlining results.
-TEXT ·reflectcall<ABIInternal>(SB), NOSPLIT, $0-32
- MOVLQZX argsize+24(FP), CX
+TEXT ·reflectcall<ABIInternal>(SB), NOSPLIT, $0-48
+ MOVLQZX frameSize+32(FP), CX
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64)
@@ -506,23 +557,28 @@ TEXT ·reflectcall<ABIInternal>(SB), NOSPLIT, $0-32
JMP AX
#define CALLFN(NAME,MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
- MOVQ argptr+16(FP), SI; \
- MOVLQZX argsize+24(FP), CX; \
+ MOVQ stackArgs+16(FP), SI; \
+ MOVLQZX stackArgsSize+24(FP), CX; \
MOVQ SP, DI; \
REP;MOVSB; \
+ /* set up argument registers */ \
+ MOVQ regArgs+40(FP), R12; \
+ CALL unspillArgs<>(SB); \
/* call function */ \
MOVQ f+8(FP), DX; \
PCDATA $PCDATA_StackMapIndex, $0; \
- MOVQ (DX), AX; \
- CALL AX; \
- /* copy return values back */ \
- MOVQ argtype+0(FP), DX; \
- MOVQ argptr+16(FP), DI; \
- MOVLQZX argsize+24(FP), CX; \
- MOVLQZX retoffset+28(FP), BX; \
+ MOVQ (DX), R12; \
+ CALL R12; \
+ /* copy register return values back */ \
+ MOVQ regArgs+40(FP), R12; \
+ CALL spillArgs<>(SB); \
+ MOVLQZX stackArgsSize+24(FP), CX; \
+ MOVLQZX stackRetOffset+28(FP), BX; \
+ MOVQ stackArgs+16(FP), DI; \
+ MOVQ stackArgsType+0(FP), DX; \
MOVQ SP, SI; \
ADDQ BX, DI; \
ADDQ BX, SI; \
@@ -534,12 +590,13 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
NO_LOCAL_POINTERS
MOVQ DX, 0(SP)
MOVQ DI, 8(SP)
MOVQ SI, 16(SP)
MOVQ CX, 24(SP)
+ MOVQ R12, 32(SP)
CALL runtime·reflectcallmove(SB)
RET
@@ -600,23 +657,46 @@ TEXT runtime·jmpdefer(SB), NOSPLIT, $0-16
MOVQ 0(DX), BX
JMP BX // but first run the deferred function
-// Save state of caller into g->sched. Smashes R8, R9.
-TEXT gosave<>(SB),NOSPLIT,$0
- get_tls(R8)
- MOVQ g(R8), R8
- MOVQ 0(SP), R9
- MOVQ R9, (g_sched+gobuf_pc)(R8)
+// Save state of caller into g->sched,
+// but using fake PC from systemstack_switch.
+// Must only be called from functions with no locals ($0)
+// or else unwinding from systemstack_switch is incorrect.
+// Smashes R9.
+TEXT gosave_systemstack_switch<>(SB),NOSPLIT,$0
+#ifndef GOEXPERIMENT_REGABI
+ get_tls(R14)
+ MOVQ g(R14), R14
+#endif
+ MOVQ $runtime·systemstack_switch(SB), R9
+ MOVQ R9, (g_sched+gobuf_pc)(R14)
LEAQ 8(SP), R9
- MOVQ R9, (g_sched+gobuf_sp)(R8)
- MOVQ $0, (g_sched+gobuf_ret)(R8)
- MOVQ BP, (g_sched+gobuf_bp)(R8)
+ MOVQ R9, (g_sched+gobuf_sp)(R14)
+ MOVQ $0, (g_sched+gobuf_ret)(R14)
+ MOVQ BP, (g_sched+gobuf_bp)(R14)
// Assert ctxt is zero. See func save.
- MOVQ (g_sched+gobuf_ctxt)(R8), R9
+ MOVQ (g_sched+gobuf_ctxt)(R14), R9
TESTQ R9, R9
JZ 2(PC)
CALL runtime·badctxt(SB)
RET
+// func asmcgocall_no_g(fn, arg unsafe.Pointer)
+// Call fn(arg) aligned appropriately for the gcc ABI.
+// Called on a system stack, and there may be no g yet (during needm).
+TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
+ MOVQ fn+0(FP), AX
+ MOVQ arg+8(FP), BX
+ MOVQ SP, DX
+ SUBQ $32, SP
+ ANDQ $~15, SP // alignment
+ MOVQ DX, 8(SP)
+ MOVQ BX, DI // DI = first argument in AMD64 ABI
+ MOVQ BX, CX // CX = first argument in Win64
+ CALL AX
+ MOVQ 8(SP), DX
+ MOVQ DX, SP
+ RET
+
// func asmcgocall(fn, arg unsafe.Pointer) int32
// Call fn(arg) on the scheduler stack,
// aligned appropriately for the gcc ABI.
@@ -645,7 +725,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
// Switch to system stack.
MOVQ m_g0(R8), SI
- CALL gosave<>(SB)
+ CALL gosave_systemstack_switch<>(SB)
MOVQ SI, g(CX)
MOVQ (g_sched+gobuf_sp)(SI), SP
@@ -842,6 +922,7 @@ settls:
TEXT setg_gcc<>(SB),NOSPLIT,$0
get_tls(AX)
MOVQ DI, g(AX)
+ MOVQ DI, R14 // set the g register
RET
TEXT runtime·abort(SB),NOSPLIT,$0-0
@@ -1363,7 +1444,7 @@ TEXT _cgo_topofstack(SB),NOSPLIT,$0
// so as to make it identifiable to traceback (this
// function it used as a sentinel; traceback wants to
// see the func PC, not a wrapper PC).
-TEXT runtime·goexit<ABIInternal>(SB),NOSPLIT,$0-0
+TEXT runtime·goexit<ABIInternal>(SB),NOSPLIT|TOPFRAME,$0-0
BYTE $0x90 // NOP
CALL runtime·goexit1(SB) // does not return
// traceback from goexit1 must hit code range of goexit
@@ -1378,6 +1459,18 @@ TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
POPQ R15
RET
+// Initialize special registers then jump to sigpanic.
+// This function is injected from the signal handler for panicking
+// signals. It is quite painful to set X15 in the signal context,
+// so we do it here.
+TEXT ·sigpanic0<ABIInternal>(SB),NOSPLIT,$0-0
+#ifdef GOEXPERIMENT_REGABI
+ get_tls(R14)
+ MOVQ g(R14), R14
+ XORPS X15, X15
+#endif
+ JMP ·sigpanic<ABIInternal>(SB)
+
// gcWriteBarrier performs a heap pointer write and informs the GC.
//
// gcWriteBarrier does NOT follow the Go ABI. It takes two arguments:
@@ -1386,24 +1479,28 @@ TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
// It clobbers FLAGS. It does not clobber any general-purpose registers,
// but may clobber others (e.g., SSE registers).
// Defined as ABIInternal since it does not use the stack-based Go ABI.
-TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$120
+TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$112
// Save the registers clobbered by the fast path. This is slightly
// faster than having the caller spill these.
- MOVQ R14, 104(SP)
- MOVQ R13, 112(SP)
+ MOVQ R12, 96(SP)
+ MOVQ R13, 104(SP)
// TODO: Consider passing g.m.p in as an argument so they can be shared
// across a sequence of write barriers.
+#ifdef GOEXPERIMENT_REGABI
+ MOVQ g_m(R14), R13
+#else
get_tls(R13)
MOVQ g(R13), R13
MOVQ g_m(R13), R13
+#endif
MOVQ m_p(R13), R13
- MOVQ (p_wbBuf+wbBuf_next)(R13), R14
+ MOVQ (p_wbBuf+wbBuf_next)(R13), R12
// Increment wbBuf.next position.
- LEAQ 16(R14), R14
- MOVQ R14, (p_wbBuf+wbBuf_next)(R13)
- CMPQ R14, (p_wbBuf+wbBuf_end)(R13)
+ LEAQ 16(R12), R12
+ MOVQ R12, (p_wbBuf+wbBuf_next)(R13)
+ CMPQ R12, (p_wbBuf+wbBuf_end)(R13)
// Record the write.
- MOVQ AX, -16(R14) // Record value
+ MOVQ AX, -16(R12) // Record value
// Note: This turns bad pointer writes into bad
// pointer reads, which could be confusing. We could avoid
// reading from obviously bad pointers, which would
@@ -1411,12 +1508,12 @@ TEXT runtime·gcWriteBarrier<ABIInternal>(SB),NOSPLIT,$120
// patch this up in the signal handler, or use XCHG to
// combine the read and the write.
MOVQ (DI), R13
- MOVQ R13, -8(R14) // Record *slot
+ MOVQ R13, -8(R12) // Record *slot
// Is the buffer full? (flags set in CMPQ above)
JEQ flush
ret:
- MOVQ 104(SP), R14
- MOVQ 112(SP), R13
+ MOVQ 96(SP), R12
+ MOVQ 104(SP), R13
// Do the write.
MOVQ AX, (DI)
RET
@@ -1446,10 +1543,10 @@ flush:
MOVQ R9, 64(SP)
MOVQ R10, 72(SP)
MOVQ R11, 80(SP)
- MOVQ R12, 88(SP)
+ // R12 already saved
// R13 already saved
- // R14 already saved
- MOVQ R15, 96(SP)
+ // R14 is g
+ MOVQ R15, 88(SP)
// This takes arguments DI and AX
CALL runtime·wbBufFlush(SB)
@@ -1465,8 +1562,7 @@ flush:
MOVQ 64(SP), R9
MOVQ 72(SP), R10
MOVQ 80(SP), R11
- MOVQ 88(SP), R12
- MOVQ 96(SP), R15
+ MOVQ 88(SP), R15
JMP ret
// gcWriteBarrierCX is gcWriteBarrier, but with args in DI and CX.
@@ -1728,67 +1824,67 @@ TEXT runtime·debugCallPanicked(SB),NOSPLIT,$16-16
TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicIndex(SB)
+ JMP runtime·goPanicIndex<ABIInternal>(SB)
TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicIndexU(SB)
+ JMP runtime·goPanicIndexU<ABIInternal>(SB)
TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSliceAlen(SB)
+ JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSliceAlenU(SB)
+ JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSliceAcap(SB)
+ JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSliceAcapU(SB)
+ JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicSliceB(SB)
+ JMP runtime·goPanicSliceB<ABIInternal>(SB)
TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicSliceBU(SB)
+ JMP runtime·goPanicSliceBU<ABIInternal>(SB)
TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ DX, x+0(FP)
MOVQ BX, y+8(FP)
- JMP runtime·goPanicSlice3Alen(SB)
+ JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ DX, x+0(FP)
MOVQ BX, y+8(FP)
- JMP runtime·goPanicSlice3AlenU(SB)
+ JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ DX, x+0(FP)
MOVQ BX, y+8(FP)
- JMP runtime·goPanicSlice3Acap(SB)
+ JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ DX, x+0(FP)
MOVQ BX, y+8(FP)
- JMP runtime·goPanicSlice3AcapU(SB)
+ JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSlice3B(SB)
+ JMP runtime·goPanicSlice3B<ABIInternal>(SB)
TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ CX, x+0(FP)
MOVQ DX, y+8(FP)
- JMP runtime·goPanicSlice3BU(SB)
+ JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicSlice3C(SB)
+ JMP runtime·goPanicSlice3C<ABIInternal>(SB)
TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
MOVQ AX, x+0(FP)
MOVQ CX, y+8(FP)
- JMP runtime·goPanicSlice3CU(SB)
+ JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
#ifdef GOOS_android
// Use the free TLS_SLOT_APP slot #2 on Android Q.
diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s
index c54b4eb006..f9535bb1bc 100644
--- a/src/runtime/asm_arm.s
+++ b/src/runtime/asm_arm.s
@@ -112,7 +112,7 @@ GLOBL _rt0_arm_lib_argv<>(SB),NOPTR,$4
// using NOFRAME means do not save LR on stack.
// argc is in R0, argv is in R1.
-TEXT runtime·rt0_go(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·rt0_go(SB),NOSPLIT|NOFRAME|TOPFRAME,$0
MOVW $0xcafebabe, R12
// copy arguments forward on an even stack
@@ -202,44 +202,24 @@ TEXT runtime·asminit(SB),NOSPLIT,$0-0
WORD $0xeee1ba10 // vmsr fpscr, r11
RET
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ BL runtime·mstart0(SB)
+ RET // not reached
+
/*
* go-routine
*/
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB),NOSPLIT|NOFRAME,$0-4
- MOVW buf+0(FP), R0
- MOVW R13, gobuf_sp(R0)
- MOVW LR, gobuf_pc(R0)
- MOVW g, gobuf_g(R0)
- MOVW $0, R11
- MOVW R11, gobuf_lr(R0)
- MOVW R11, gobuf_ret(R0)
- // Assert ctxt is zero. See func save.
- MOVW gobuf_ctxt(R0), R0
- CMP R0, R11
- B.EQ 2(PC)
- CALL runtime·badctxt(SB)
- RET
-
// void gogo(Gobuf*)
// restore state from Gobuf; longjmp
-TEXT runtime·gogo(SB),NOSPLIT,$8-4
+TEXT runtime·gogo(SB),NOSPLIT|NOFRAME,$0-4
MOVW buf+0(FP), R1
MOVW gobuf_g(R1), R0
- BL setg<>(SB)
+ MOVW 0(R0), R2 // make sure g != nil
+ B gogo<>(SB)
- // NOTE: We updated g above, and we are about to update SP.
- // Until LR and PC are also updated, the g/SP/LR/PC quadruple
- // are out of sync and must not be used as the basis of a traceback.
- // Sigprof skips the traceback when SP is not within g's bounds,
- // and when the PC is inside this function, runtime.gogo.
- // Since we are about to update SP, until we complete runtime.gogo
- // we must not leave this function. In particular, no calls
- // after this point: it must be straight-line code until the
- // final B instruction.
- // See large comment in sigprof for more details.
+TEXT gogo<>(SB),NOSPLIT|NOFRAME,$0
+ BL setg<>(SB)
MOVW gobuf_sp(R1), R13 // restore SP==R13
MOVW gobuf_lr(R1), LR
MOVW gobuf_ret(R1), R0
@@ -263,7 +243,6 @@ TEXT runtime·mcall(SB),NOSPLIT|NOFRAME,$0-4
MOVW LR, (g_sched+gobuf_pc)(g)
MOVW $0, R11
MOVW R11, (g_sched+gobuf_lr)(g)
- MOVW g, (g_sched+gobuf_g)(g)
// Switch to m->g0 & its stack, call fn.
MOVW g, R1
@@ -322,24 +301,14 @@ TEXT runtime·systemstack(SB),NOSPLIT,$0-4
switch:
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
- MOVW $runtime·systemstack_switch(SB), R3
- ADD $4, R3, R3 // get past push {lr}
- MOVW R3, (g_sched+gobuf_pc)(g)
- MOVW R13, (g_sched+gobuf_sp)(g)
- MOVW LR, (g_sched+gobuf_lr)(g)
- MOVW g, (g_sched+gobuf_g)(g)
+ BL gosave_systemstack_switch<>(SB)
// switch to g0
MOVW R0, R5
MOVW R2, R0
BL setg<>(SB)
MOVW R5, R0
- MOVW (g_sched+gobuf_sp)(R2), R3
- // make it look like mstart called systemstack on g0, to stop traceback
- SUB $4, R3, R3
- MOVW $runtime·mstart(SB), R4
- MOVW R4, 0(R3)
- MOVW R3, R13
+ MOVW (g_sched+gobuf_sp)(R2), R13
// call target function
MOVW R0, R7
@@ -421,7 +390,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
B runtime·morestack(SB)
// reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
@@ -432,8 +401,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
MOVW $NAME(SB), R1; \
B (R1)
-TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20
- MOVW argsize+12(FP), R0
+TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-28
+ MOVW frameSize+20(FP), R0
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64)
@@ -465,11 +434,11 @@ TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20
B (R1)
#define CALLFN(NAME,MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-28; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
- MOVW argptr+8(FP), R0; \
- MOVW argsize+12(FP), R2; \
+ MOVW stackArgs+8(FP), R0; \
+ MOVW stackArgsSize+12(FP), R2; \
ADD $4, R13, R1; \
CMP $0, R2; \
B.EQ 5(PC); \
@@ -483,10 +452,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
PCDATA $PCDATA_StackMapIndex, $0; \
BL (R0); \
/* copy return values back */ \
- MOVW argtype+0(FP), R4; \
- MOVW argptr+8(FP), R0; \
- MOVW argsize+12(FP), R2; \
- MOVW retoffset+16(FP), R3; \
+ MOVW stackArgsType+0(FP), R4; \
+ MOVW stackArgs+8(FP), R0; \
+ MOVW stackArgsSize+12(FP), R2; \
+ MOVW stackArgsRetOffset+16(FP), R3; \
ADD $4, R13, R1; \
ADD R3, R1; \
ADD R3, R0; \
@@ -498,11 +467,13 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $16-0
+TEXT callRet<>(SB), NOSPLIT, $20-0
MOVW R4, 4(R13)
MOVW R0, 8(R13)
MOVW R1, 12(R13)
MOVW R2, 16(R13)
+ MOVW $0, R7
+ MOVW R7, 20(R13)
BL runtime·reflectcallmove(SB)
RET
@@ -539,10 +510,6 @@ CALLFN(·call1073741824, 1073741824)
// 1. grab stored LR for caller
// 2. sub 4 bytes to get back to BL deferreturn
// 3. B to fn
-// TODO(rsc): Push things on stack and then use pop
-// to load all registers simultaneously, so that a profiling
-// interrupt can never see mismatched SP/LR/PC.
-// (And double-check that pop is atomic in that way.)
TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8
MOVW 0(R13), LR
MOVW $-4(LR), LR // BL deferreturn
@@ -552,19 +519,39 @@ TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8
MOVW 0(R7), R1
B (R1)
-// Save state of caller into g->sched. Smashes R11.
-TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
- MOVW LR, (g_sched+gobuf_pc)(g)
+// Save state of caller into g->sched,
+// but using fake PC from systemstack_switch.
+// Must only be called from functions with no locals ($0)
+// or else unwinding from systemstack_switch is incorrect.
+// Smashes R11.
+TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
+ MOVW $runtime·systemstack_switch(SB), R11
+ ADD $4, R11 // get past push {lr}
+ MOVW R11, (g_sched+gobuf_pc)(g)
MOVW R13, (g_sched+gobuf_sp)(g)
MOVW $0, R11
MOVW R11, (g_sched+gobuf_lr)(g)
MOVW R11, (g_sched+gobuf_ret)(g)
- MOVW R11, (g_sched+gobuf_ctxt)(g)
// Assert ctxt is zero. See func save.
MOVW (g_sched+gobuf_ctxt)(g), R11
- CMP $0, R11
+ TST R11, R11
B.EQ 2(PC)
- CALL runtime·badctxt(SB)
+ BL runtime·badctxt(SB)
+ RET
+
+// func asmcgocall_no_g(fn, arg unsafe.Pointer)
+// Call fn(arg) aligned appropriately for the gcc ABI.
+// Called on a system stack, and there may be no g yet (during needm).
+TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-8
+ MOVW fn+0(FP), R1
+ MOVW arg+4(FP), R0
+ MOVW R13, R2
+ SUB $32, R13
+ BIC $0x7, R13 // alignment for gcc ABI
+ MOVW R2, 8(R13)
+ BL (R1)
+ MOVW 8(R13), R2
+ MOVW R2, R13
RET
// func asmcgocall(fn, arg unsafe.Pointer) int32
@@ -590,7 +577,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-12
MOVW m_g0(R8), R3
CMP R3, g
BEQ nosave
- BL gosave<>(SB)
+ BL gosave_systemstack_switch<>(SB)
MOVW R0, R5
MOVW R3, R0
BL setg<>(SB)
diff --git a/src/runtime/asm_arm64.s b/src/runtime/asm_arm64.s
index a09172f0c9..699fc99d58 100644
--- a/src/runtime/asm_arm64.s
+++ b/src/runtime/asm_arm64.s
@@ -8,7 +8,7 @@
#include "funcdata.h"
#include "textflag.h"
-TEXT runtime·rt0_go(SB),NOSPLIT,$0
+TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
// SP = stack; R0 = argc; R1 = argv
SUB $32, RSP
@@ -73,6 +73,10 @@ nocgo:
BL runtime·check(SB)
+#ifdef GOOS_windows
+ BL runtime·wintls(SB)
+#endif
+
MOVW 8(RSP), R0 // copy argc
MOVW R0, -8(RSP)
MOVD 16(RSP), R0 // copy argv
@@ -109,35 +113,26 @@ TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
RET
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ BL runtime·mstart0(SB)
+ RET // not reached
+
/*
* go-routine
*/
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
- MOVD buf+0(FP), R3
- MOVD RSP, R0
- MOVD R0, gobuf_sp(R3)
- MOVD R29, gobuf_bp(R3)
- MOVD LR, gobuf_pc(R3)
- MOVD g, gobuf_g(R3)
- MOVD ZR, gobuf_lr(R3)
- MOVD ZR, gobuf_ret(R3)
- // Assert ctxt is zero. See func save.
- MOVD gobuf_ctxt(R3), R0
- CBZ R0, 2(PC)
- CALL runtime·badctxt(SB)
- RET
-
// void gogo(Gobuf*)
// restore state from Gobuf; longjmp
-TEXT runtime·gogo(SB), NOSPLIT, $24-8
+TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8
MOVD buf+0(FP), R5
- MOVD gobuf_g(R5), g
+ MOVD gobuf_g(R5), R6
+ MOVD 0(R6), R4 // make sure g != nil
+ B gogo<>(SB)
+
+TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
+ MOVD R6, g
BL runtime·save_g(SB)
- MOVD 0(g), R4 // make sure g is not nil
MOVD gobuf_sp(R5), R0
MOVD R0, RSP
MOVD gobuf_bp(R5), R29
@@ -164,7 +159,6 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
MOVD R29, (g_sched+gobuf_bp)(g)
MOVD LR, (g_sched+gobuf_pc)(g)
MOVD $0, (g_sched+gobuf_lr)(g)
- MOVD g, (g_sched+gobuf_g)(g)
// Switch to m->g0 & its stack, call fn.
MOVD g, R3
@@ -222,24 +216,12 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8
switch:
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
- MOVD $runtime·systemstack_switch(SB), R6
- ADD $8, R6 // get past prologue
- MOVD R6, (g_sched+gobuf_pc)(g)
- MOVD RSP, R0
- MOVD R0, (g_sched+gobuf_sp)(g)
- MOVD R29, (g_sched+gobuf_bp)(g)
- MOVD $0, (g_sched+gobuf_lr)(g)
- MOVD g, (g_sched+gobuf_g)(g)
+ BL gosave_systemstack_switch<>(SB)
// switch to g0
MOVD R5, g
BL runtime·save_g(SB)
MOVD (g_sched+gobuf_sp)(g), R3
- // make it look like mstart called systemstack on g0, to stop traceback
- SUB $16, R3
- AND $~15, R3
- MOVD $runtime·mstart(SB), R4
- MOVD R4, 0(R3)
MOVD R3, RSP
MOVD (g_sched+gobuf_bp)(g), R29
@@ -329,7 +311,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
B runtime·morestack(SB)
// reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
@@ -342,8 +324,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
B (R27)
// Note: can't just "B NAME(SB)" - bad inlining results.
-TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
- MOVWU argsize+24(FP), R16
+TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
+ MOVWU frameSize+32(FP), R16
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64)
@@ -375,11 +357,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
B (R0)
#define CALLFN(NAME,MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
- MOVD arg+16(FP), R3; \
- MOVWU argsize+24(FP), R4; \
+ MOVD stackArgs+16(FP), R3; \
+ MOVWU stackArgsSize+24(FP), R4; \
ADD $8, RSP, R5; \
BIC $0xf, R4, R6; \
CBZ R6, 6(PC); \
@@ -405,10 +387,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
PCDATA $PCDATA_StackMapIndex, $0; \
BL (R0); \
/* copy return values back */ \
- MOVD argtype+0(FP), R7; \
- MOVD arg+16(FP), R3; \
- MOVWU n+24(FP), R4; \
- MOVWU retoffset+28(FP), R6; \
+ MOVD stackArgsType+0(FP), R7; \
+ MOVD stackArgs+16(FP), R3; \
+ MOVWU stackArgsSize+24(FP), R4; \
+ MOVWU stackRetOffset+28(FP), R6; \
ADD $8, RSP, R5; \
ADD R6, R5; \
ADD R6, R3; \
@@ -420,11 +402,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $40-0
+TEXT callRet<>(SB), NOSPLIT, $48-0
MOVD R7, 8(RSP)
MOVD R3, 16(RSP)
MOVD R5, 24(RSP)
MOVD R4, 32(RSP)
+ MOVD $0, 40(RSP)
BL runtime·reflectcallmove(SB)
RET
@@ -875,9 +858,15 @@ TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
MOVD 0(R26), R3
B (R3)
-// Save state of caller into g->sched. Smashes R0.
-TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
- MOVD LR, (g_sched+gobuf_pc)(g)
+// Save state of caller into g->sched,
+// but using fake PC from systemstack_switch.
+// Must only be called from functions with no locals ($0)
+// or else unwinding from systemstack_switch is incorrect.
+// Smashes R0.
+TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·systemstack_switch(SB), R0
+ ADD $8, R0 // get past prologue
+ MOVD R0, (g_sched+gobuf_pc)(g)
MOVD RSP, R0
MOVD R0, (g_sched+gobuf_sp)(g)
MOVD R29, (g_sched+gobuf_bp)(g)
@@ -889,6 +878,17 @@ TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
CALL runtime·badctxt(SB)
RET
+// func asmcgocall_no_g(fn, arg unsafe.Pointer)
+// Call fn(arg) aligned appropriately for the gcc ABI.
+// Called on a system stack, and there may be no g yet (during needm).
+TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
+ MOVD fn+0(FP), R1
+ MOVD arg+8(FP), R0
+ SUB $16, RSP // skip over saved frame pointer below RSP
+ BL (R1)
+ ADD $16, RSP // skip over saved frame pointer below RSP
+ RET
+
// func asmcgocall(fn, arg unsafe.Pointer) int32
// Call fn(arg) on the scheduler stack,
// aligned appropriately for the gcc ABI.
@@ -913,8 +913,8 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
BEQ nosave
// Switch to system stack.
- MOVD R0, R9 // gosave<> and save_g might clobber R0
- BL gosave<>(SB)
+ MOVD R0, R9 // gosave_systemstack_switch<> and save_g might clobber R0
+ BL gosave_systemstack_switch<>(SB)
MOVD R3, g
BL runtime·save_g(SB)
MOVD (g_sched+gobuf_sp)(g), R0
@@ -967,7 +967,7 @@ nosave:
BL (R1)
// Restore stack pointer.
MOVD 8(RSP), R2
- MOVD R2, RSP
+ MOVD R2, RSP
MOVD R0, ret+16(FP)
RET
@@ -1115,6 +1115,9 @@ TEXT setg_gcc<>(SB),NOSPLIT,$8
MOVD savedR27-8(SP), R27
RET
+TEXT runtime·emptyfunc(SB),0,$0-0
+ RET
+
TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
MOVD ZR, R0
MOVD (R0), R0
diff --git a/src/runtime/asm_mips64x.s b/src/runtime/asm_mips64x.s
index 19781f7885..c123e96a71 100644
--- a/src/runtime/asm_mips64x.s
+++ b/src/runtime/asm_mips64x.s
@@ -11,7 +11,7 @@
#define REGCTXT R22
-TEXT runtime·rt0_go(SB),NOSPLIT,$0
+TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
// R29 = stack; R4 = argc; R5 = argv
ADDV $-24, R29
@@ -85,30 +85,24 @@ TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
RET
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ JAL runtime·mstart0(SB)
+ RET // not reached
+
/*
* go-routine
*/
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
- MOVV buf+0(FP), R1
- MOVV R29, gobuf_sp(R1)
- MOVV R31, gobuf_pc(R1)
- MOVV g, gobuf_g(R1)
- MOVV R0, gobuf_lr(R1)
- MOVV R0, gobuf_ret(R1)
- // Assert ctxt is zero. See func save.
- MOVV gobuf_ctxt(R1), R1
- BEQ R1, 2(PC)
- JAL runtime·badctxt(SB)
- RET
-
// void gogo(Gobuf*)
// restore state from Gobuf; longjmp
-TEXT runtime·gogo(SB), NOSPLIT, $16-8
+TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8
MOVV buf+0(FP), R3
- MOVV gobuf_g(R3), g // make sure g is not nil
+ MOVV gobuf_g(R3), R4
+ MOVV 0(R4), R0 // make sure g != nil
+ JMP gogo<>(SB)
+
+TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
+ MOVV R4, g
JAL runtime·save_g(SB)
MOVV 0(g), R2
@@ -132,7 +126,6 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
MOVV R29, (g_sched+gobuf_sp)(g)
MOVV R31, (g_sched+gobuf_pc)(g)
MOVV R0, (g_sched+gobuf_lr)(g)
- MOVV g, (g_sched+gobuf_g)(g)
// Switch to m->g0 & its stack, call fn.
MOVV g, R1
@@ -184,21 +177,12 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8
switch:
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
- MOVV $runtime·systemstack_switch(SB), R4
- ADDV $8, R4 // get past prologue
- MOVV R4, (g_sched+gobuf_pc)(g)
- MOVV R29, (g_sched+gobuf_sp)(g)
- MOVV R0, (g_sched+gobuf_lr)(g)
- MOVV g, (g_sched+gobuf_g)(g)
+ JAL gosave_systemstack_switch<>(SB)
// switch to g0
MOVV R3, g
JAL runtime·save_g(SB)
MOVV (g_sched+gobuf_sp)(g), R1
- // make it look like mstart called systemstack on g0, to stop traceback
- ADDV $-8, R1
- MOVV $runtime·mstart(SB), R2
- MOVV R2, 0(R1)
MOVV R1, R29
// call target function
@@ -279,7 +263,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
JMP runtime·morestack(SB)
// reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
@@ -292,8 +276,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
JMP (R4)
// Note: can't just "BR NAME(SB)" - bad inlining results.
-TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
- MOVWU argsize+24(FP), R1
+TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
+ MOVWU frameSize+32(FP), R1
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64)
@@ -325,11 +309,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
JMP (R4)
#define CALLFN(NAME,MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
- MOVV arg+16(FP), R1; \
- MOVWU argsize+24(FP), R2; \
+ MOVV stackArgs+16(FP), R1; \
+ MOVWU stackArgsSize+24(FP), R2; \
MOVV R29, R3; \
ADDV $8, R3; \
ADDV R3, R2; \
@@ -345,10 +329,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
PCDATA $PCDATA_StackMapIndex, $0; \
JAL (R4); \
/* copy return values back */ \
- MOVV argtype+0(FP), R5; \
- MOVV arg+16(FP), R1; \
- MOVWU n+24(FP), R2; \
- MOVWU retoffset+28(FP), R4; \
+ MOVV stackArgsType+0(FP), R5; \
+ MOVV stackArgs+16(FP), R1; \
+ MOVWU stackArgsSize+24(FP), R2; \
+ MOVWU stackRetOffset+28(FP), R4; \
ADDV $8, R29, R3; \
ADDV R4, R3; \
ADDV R4, R1; \
@@ -360,11 +344,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
MOVV R5, 8(R29)
MOVV R1, 16(R29)
MOVV R3, 24(R29)
MOVV R2, 32(R29)
+ MOVV $0, 40(R29)
JAL runtime·reflectcallmove(SB)
RET
@@ -415,9 +400,15 @@ TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
MOVV 0(REGCTXT), R4
JMP (R4)
-// Save state of caller into g->sched. Smashes R1.
-TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
- MOVV R31, (g_sched+gobuf_pc)(g)
+// Save state of caller into g->sched,
+// but using fake PC from systemstack_switch.
+// Must only be called from functions with no locals ($0)
+// or else unwinding from systemstack_switch is incorrect.
+// Smashes R1.
+TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
+ MOVV $runtime·systemstack_switch(SB), R1
+ ADDV $8, R1 // get past prologue
+ MOVV R1, (g_sched+gobuf_pc)(g)
MOVV R29, (g_sched+gobuf_sp)(g)
MOVV R0, (g_sched+gobuf_lr)(g)
MOVV R0, (g_sched+gobuf_ret)(g)
@@ -427,6 +418,15 @@ TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
JAL runtime·badctxt(SB)
RET
+// func asmcgocall_no_g(fn, arg unsafe.Pointer)
+// Call fn(arg) aligned appropriately for the gcc ABI.
+// Called on a system stack, and there may be no g yet (during needm).
+TEXT ·asmcgocall_no_g(SB),NOSPLIT,$0-16
+ MOVV fn+0(FP), R25
+ MOVV arg+8(FP), R4
+ JAL (R25)
+ RET
+
// func asmcgocall(fn, arg unsafe.Pointer) int32
// Call fn(arg) on the scheduler stack,
// aligned appropriately for the gcc ABI.
@@ -445,7 +445,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
MOVV m_g0(R5), R6
BEQ R6, g, g0
- JAL gosave<>(SB)
+ JAL gosave_systemstack_switch<>(SB)
MOVV R6, g
JAL runtime·save_g(SB)
MOVV (g_sched+gobuf_sp)(g), R29
diff --git a/src/runtime/asm_mipsx.s b/src/runtime/asm_mipsx.s
index ee87d81436..0c7d28dcf7 100644
--- a/src/runtime/asm_mipsx.s
+++ b/src/runtime/asm_mipsx.s
@@ -11,7 +11,7 @@
#define REGCTXT R22
-TEXT runtime·rt0_go(SB),NOSPLIT,$0
+TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
// R29 = stack; R4 = argc; R5 = argv
ADDU $-12, R29
@@ -86,33 +86,25 @@ TEXT runtime·breakpoint(SB),NOSPLIT,$0-0
TEXT runtime·asminit(SB),NOSPLIT,$0-0
RET
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ JAL runtime·mstart0(SB)
+ RET // not reached
+
/*
* go-routine
*/
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB),NOSPLIT|NOFRAME,$0-4
- MOVW buf+0(FP), R1
- MOVW R29, gobuf_sp(R1)
- MOVW R31, gobuf_pc(R1)
- MOVW g, gobuf_g(R1)
- MOVW R0, gobuf_lr(R1)
- MOVW R0, gobuf_ret(R1)
- // Assert ctxt is zero. See func save.
- MOVW gobuf_ctxt(R1), R1
- BEQ R1, 2(PC)
- JAL runtime·badctxt(SB)
- RET
-
// void gogo(Gobuf*)
// restore state from Gobuf; longjmp
-TEXT runtime·gogo(SB),NOSPLIT,$8-4
+TEXT runtime·gogo(SB),NOSPLIT|NOFRAME,$0-4
MOVW buf+0(FP), R3
- MOVW gobuf_g(R3), g // make sure g is not nil
- JAL runtime·save_g(SB)
+ MOVW gobuf_g(R3), R4
+ MOVW 0(R4), R5 // make sure g != nil
+ JMP gogo<>(SB)
- MOVW 0(g), R2
+TEXT gogo<>(SB),NOSPLIT|NOFRAME,$0
+ MOVW R4, g
+ JAL runtime·save_g(SB)
MOVW gobuf_sp(R3), R29
MOVW gobuf_lr(R3), R31
MOVW gobuf_ret(R3), R1
@@ -133,7 +125,6 @@ TEXT runtime·mcall(SB),NOSPLIT|NOFRAME,$0-4
MOVW R29, (g_sched+gobuf_sp)(g)
MOVW R31, (g_sched+gobuf_pc)(g)
MOVW R0, (g_sched+gobuf_lr)(g)
- MOVW g, (g_sched+gobuf_g)(g)
// Switch to m->g0 & its stack, call fn.
MOVW g, R1
@@ -185,21 +176,12 @@ TEXT runtime·systemstack(SB),NOSPLIT,$0-4
switch:
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
- MOVW $runtime·systemstack_switch(SB), R4
- ADDU $8, R4 // get past prologue
- MOVW R4, (g_sched+gobuf_pc)(g)
- MOVW R29, (g_sched+gobuf_sp)(g)
- MOVW R0, (g_sched+gobuf_lr)(g)
- MOVW g, (g_sched+gobuf_g)(g)
+ JAL gosave_systemstack_switch<>(SB)
// switch to g0
MOVW R3, g
JAL runtime·save_g(SB)
MOVW (g_sched+gobuf_sp)(g), R1
- // make it look like mstart called systemstack on g0, to stop traceback
- ADDU $-4, R1
- MOVW $runtime·mstart(SB), R2
- MOVW R2, 0(R1)
MOVW R1, R29
// call target function
@@ -280,7 +262,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
JMP runtime·morestack(SB)
// reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
@@ -291,8 +273,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
MOVW $NAME(SB), R4; \
JMP (R4)
-TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20
- MOVW argsize+12(FP), R1
+TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-28
+ MOVW frameSize+20(FP), R1
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
@@ -325,11 +307,11 @@ TEXT ·reflectcall(SB),NOSPLIT|NOFRAME,$0-20
JMP (R4)
#define CALLFN(NAME,MAXSIZE) \
-TEXT NAME(SB),WRAPPER,$MAXSIZE-20; \
+TEXT NAME(SB),WRAPPER,$MAXSIZE-28; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
- MOVW arg+8(FP), R1; \
- MOVW argsize+12(FP), R2; \
+ MOVW stackArgs+8(FP), R1; \
+ MOVW stackArgsSize+12(FP), R2; \
MOVW R29, R3; \
ADDU $4, R3; \
ADDU R3, R2; \
@@ -345,10 +327,10 @@ TEXT NAME(SB),WRAPPER,$MAXSIZE-20; \
PCDATA $PCDATA_StackMapIndex, $0; \
JAL (R4); \
/* copy return values back */ \
- MOVW argtype+0(FP), R5; \
- MOVW arg+8(FP), R1; \
- MOVW n+12(FP), R2; \
- MOVW retoffset+16(FP), R4; \
+ MOVW stackArgsType+0(FP), R5; \
+ MOVW stackArgs+8(FP), R1; \
+ MOVW stackArgsSize+12(FP), R2; \
+ MOVW stackRetOffset+16(FP), R4; \
ADDU $4, R29, R3; \
ADDU R4, R3; \
ADDU R4, R1; \
@@ -360,11 +342,12 @@ TEXT NAME(SB),WRAPPER,$MAXSIZE-20; \
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $16-0
+TEXT callRet<>(SB), NOSPLIT, $20-0
MOVW R5, 4(R29)
MOVW R1, 8(R29)
MOVW R3, 12(R29)
MOVW R2, 16(R29)
+ MOVW $0, 20(R29)
JAL runtime·reflectcallmove(SB)
RET
@@ -415,9 +398,15 @@ TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8
MOVW 0(REGCTXT), R4
JMP (R4)
-// Save state of caller into g->sched. Smashes R1.
-TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
- MOVW R31, (g_sched+gobuf_pc)(g)
+// Save state of caller into g->sched,
+// but using fake PC from systemstack_switch.
+// Must only be called from functions with no locals ($0)
+// or else unwinding from systemstack_switch is incorrect.
+// Smashes R1.
+TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
+ MOVW $runtime·systemstack_switch(SB), R1
+ ADDU $8, R1 // get past prologue
+ MOVW R1, (g_sched+gobuf_pc)(g)
MOVW R29, (g_sched+gobuf_sp)(g)
MOVW R0, (g_sched+gobuf_lr)(g)
MOVW R0, (g_sched+gobuf_ret)(g)
@@ -445,7 +434,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-12
MOVW m_g0(R5), R6
BEQ R6, g, g0
- JAL gosave<>(SB)
+ JAL gosave_systemstack_switch<>(SB)
MOVW R6, g
JAL runtime·save_g(SB)
MOVW (g_sched+gobuf_sp)(g), R29
diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s
index dc34c0e4c8..56e73742ea 100644
--- a/src/runtime/asm_ppc64x.s
+++ b/src/runtime/asm_ppc64x.s
@@ -16,7 +16,7 @@
#define cgoCalleeStackSize 32
#endif
-TEXT runtime·rt0_go(SB),NOSPLIT,$0
+TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
// R1 = stack; R3 = argc; R4 = argv; R13 = C TLS base pointer
// initialize essential registers
@@ -124,35 +124,26 @@ TEXT runtime·reginit(SB),NOSPLIT|NOFRAME,$0-0
XOR R0, R0
RET
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ BL runtime·mstart0(SB)
+ RET // not reached
+
/*
* go-routine
*/
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
- MOVD buf+0(FP), R3
- MOVD R1, gobuf_sp(R3)
- MOVD LR, R31
- MOVD R31, gobuf_pc(R3)
- MOVD g, gobuf_g(R3)
- MOVD R0, gobuf_lr(R3)
- MOVD R0, gobuf_ret(R3)
- // Assert ctxt is zero. See func save.
- MOVD gobuf_ctxt(R3), R3
- CMP R0, R3
- BEQ 2(PC)
- BL runtime·badctxt(SB)
- RET
-
// void gogo(Gobuf*)
// restore state from Gobuf; longjmp
-TEXT runtime·gogo(SB), NOSPLIT, $16-8
+TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8
MOVD buf+0(FP), R5
- MOVD gobuf_g(R5), g // make sure g is not nil
+ MOVD gobuf_g(R5), R6
+ MOVD 0(R6), R4 // make sure g != nil
+ BR gogo<>(SB)
+
+TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
+ MOVD R6, g
BL runtime·save_g(SB)
- MOVD 0(g), R4
MOVD gobuf_sp(R5), R1
MOVD gobuf_lr(R5), R31
#ifndef GOOS_aix
@@ -180,7 +171,6 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
MOVD LR, R31
MOVD R31, (g_sched+gobuf_pc)(g)
MOVD R0, (g_sched+gobuf_lr)(g)
- MOVD g, (g_sched+gobuf_g)(g)
// Switch to m->g0 & its stack, call fn.
MOVD g, R3
@@ -246,22 +236,12 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8
switch:
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
- MOVD $runtime·systemstack_switch(SB), R6
- ADD $16, R6 // get past prologue (including r2-setting instructions when they're there)
- MOVD R6, (g_sched+gobuf_pc)(g)
- MOVD R1, (g_sched+gobuf_sp)(g)
- MOVD R0, (g_sched+gobuf_lr)(g)
- MOVD g, (g_sched+gobuf_g)(g)
+ BL gosave_systemstack_switch<>(SB)
// switch to g0
MOVD R5, g
BL runtime·save_g(SB)
- MOVD (g_sched+gobuf_sp)(g), R3
- // make it look like mstart called systemstack on g0, to stop traceback
- SUB $FIXED_FRAME, R3
- MOVD $runtime·mstart(SB), R4
- MOVD R4, 0(R3)
- MOVD R3, R1
+ MOVD (g_sched+gobuf_sp)(g), R1
// call target function
MOVD 0(R11), R12 // code pointer
@@ -356,7 +336,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
BR runtime·morestack(SB)
// reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
@@ -370,8 +350,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
BR (CTR)
// Note: can't just "BR NAME(SB)" - bad inlining results.
-TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
- MOVWZ argsize+24(FP), R3
+TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
+ MOVWZ frameSize+32(FP), R3
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64)
@@ -404,11 +384,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
BR (CTR)
#define CALLFN(NAME,MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
- MOVD arg+16(FP), R3; \
- MOVWZ argsize+24(FP), R4; \
+ MOVD stackArgs+16(FP), R3; \
+ MOVWZ stackArgsSize+24(FP), R4; \
MOVD R1, R5; \
CMP R4, $8; \
BLT tailsetup; \
@@ -456,10 +436,10 @@ callfn: \
MOVD 24(R1), R2; \
#endif \
/* copy return values back */ \
- MOVD argtype+0(FP), R7; \
- MOVD arg+16(FP), R3; \
- MOVWZ n+24(FP), R4; \
- MOVWZ retoffset+28(FP), R6; \
+ MOVD stackArgsType+0(FP), R7; \
+ MOVD stackArgs+16(FP), R3; \
+ MOVWZ stackArgsSize+24(FP), R4; \
+ MOVWZ stackRetOffset+28(FP), R6; \
ADD $FIXED_FRAME, R1, R5; \
ADD R6, R5; \
ADD R6, R3; \
@@ -471,11 +451,12 @@ callfn: \
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
MOVD R7, FIXED_FRAME+0(R1)
MOVD R3, FIXED_FRAME+8(R1)
MOVD R5, FIXED_FRAME+16(R1)
MOVD R4, FIXED_FRAME+24(R1)
+ MOVD $0, FIXED_FRAME+32(R1)
BL runtime·reflectcallmove(SB)
RET
@@ -550,9 +531,14 @@ TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16
MOVD R12, CTR
BR (CTR)
-// Save state of caller into g->sched. Smashes R31.
-TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
- MOVD LR, R31
+// Save state of caller into g->sched,
+// but using fake PC from systemstack_switch.
+// Must only be called from functions with no locals ($0)
+// or else unwinding from systemstack_switch is incorrect.
+// Smashes R31.
+TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·systemstack_switch(SB), R31
+ ADD $16, R31 // get past prologue (including r2-setting instructions when they're there)
MOVD R31, (g_sched+gobuf_pc)(g)
MOVD R1, (g_sched+gobuf_sp)(g)
MOVD R0, (g_sched+gobuf_lr)(g)
@@ -593,7 +579,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
MOVD m_g0(R8), R6
CMP R6, g
BEQ g0
- BL gosave<>(SB)
+ BL gosave_systemstack_switch<>(SB)
MOVD R6, g
BL runtime·save_g(SB)
MOVD (g_sched+gobuf_sp)(g), R1
diff --git a/src/runtime/asm_riscv64.s b/src/runtime/asm_riscv64.s
index 01b42dc3de..30f2bd2e4a 100644
--- a/src/runtime/asm_riscv64.s
+++ b/src/runtime/asm_riscv64.s
@@ -7,7 +7,7 @@
#include "textflag.h"
// func rt0_go()
-TEXT runtime·rt0_go(SB),NOSPLIT,$0
+TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
// X2 = stack; A0 = argc; A1 = argv
ADD $-24, X2
MOV A0, 8(X2) // argc
@@ -70,6 +70,10 @@ nocgo:
WORD $0 // crash if reached
RET
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ CALL runtime·mstart0(SB)
+ RET // not reached
+
// void setg_gcc(G*); set g called from gcc with g in A0
TEXT setg_gcc<>(SB),NOSPLIT,$0-0
MOV A0, g
@@ -114,21 +118,12 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8
switch:
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
- MOV $runtime·systemstack_switch(SB), T2
- ADD $8, T2 // get past prologue
- MOV T2, (g_sched+gobuf_pc)(g)
- MOV X2, (g_sched+gobuf_sp)(g)
- MOV ZERO, (g_sched+gobuf_lr)(g)
- MOV g, (g_sched+gobuf_g)(g)
+ CALL gosave_systemstack_switch<>(SB)
// switch to g0
MOV T1, g
CALL runtime·save_g(SB)
MOV (g_sched+gobuf_sp)(g), T0
- // make it look like mstart called systemstack on g0, to stop traceback
- ADD $-8, T0
- MOV $runtime·mstart(SB), T1
- MOV T1, 0(T0)
MOV T0, X2
// call target function
@@ -233,12 +228,16 @@ TEXT runtime·return0(SB), NOSPLIT, $0
// restore state from Gobuf; longjmp
// func gogo(buf *gobuf)
-TEXT runtime·gogo(SB), NOSPLIT, $16-8
+TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8
MOV buf+0(FP), T0
- MOV gobuf_g(T0), g // make sure g is not nil
+ MOV gobuf_g(T0), T1
+ MOV 0(T1), ZERO // make sure g != nil
+ JMP gogo<>(SB)
+
+TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
+ MOV T1, g
CALL runtime·save_g(SB)
- MOV (g), ZERO // make sure g is not nil
MOV gobuf_sp(T0), X2
MOV gobuf_lr(T0), RA
MOV gobuf_ret(T0), A0
@@ -279,7 +278,6 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
MOV X2, (g_sched+gobuf_sp)(g)
MOV RA, (g_sched+gobuf_pc)(g)
MOV ZERO, (g_sched+gobuf_lr)(g)
- MOV g, (g_sched+gobuf_g)(g)
// Switch to m->g0 & its stack, call fn.
MOV g, T0
@@ -297,24 +295,15 @@ TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8
JALR RA, T1
JMP runtime·badmcall2(SB)
-// func gosave(buf *gobuf)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8
- MOV buf+0(FP), T1
- MOV X2, gobuf_sp(T1)
- MOV RA, gobuf_pc(T1)
- MOV g, gobuf_g(T1)
- MOV ZERO, gobuf_lr(T1)
- MOV ZERO, gobuf_ret(T1)
- // Assert ctxt is zero. See func save.
- MOV gobuf_ctxt(T1), T1
- BEQ T1, ZERO, 2(PC)
- CALL runtime·badctxt(SB)
- RET
-
-// Save state of caller into g->sched. Smashes X31.
-TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
- MOV X1, (g_sched+gobuf_pc)(g)
+// Save state of caller into g->sched,
+// but using fake PC from systemstack_switch.
+// Must only be called from functions with no locals ($0)
+// or else unwinding from systemstack_switch is incorrect.
+// Smashes X31.
+TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
+ MOV $runtime·systemstack_switch(SB), X31
+ ADD $8, X31 // get past prologue
+ MOV X31, (g_sched+gobuf_pc)(g)
MOV X2, (g_sched+gobuf_sp)(g)
MOV ZERO, (g_sched+gobuf_lr)(g)
MOV ZERO, (g_sched+gobuf_ret)(g)
@@ -342,7 +331,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
MOV m_g0(X6), X7
BEQ X7, g, g0
- CALL gosave<>(SB)
+ CALL gosave_systemstack_switch<>(SB)
MOV X7, g
CALL runtime·save_g(SB)
MOV (g_sched+gobuf_sp)(g), X2
@@ -374,7 +363,7 @@ TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
RET
// reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
@@ -386,13 +375,13 @@ TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
JALR ZERO, T2
// Note: can't just "BR NAME(SB)" - bad inlining results.
-// func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32)
+// func call(stackArgsType *rtype, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
TEXT reflect·call(SB), NOSPLIT, $0-0
JMP ·reflectcall(SB)
-// func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
-TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
- MOVWU argsize+24(FP), T0
+// func call(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
+TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48
+ MOVWU frameSize+32(FP), T0
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64)
@@ -424,11 +413,11 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32
JALR ZERO, T2
#define CALLFN(NAME,MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
- MOV arg+16(FP), A1; \
- MOVWU argsize+24(FP), A2; \
+ MOV stackArgs+16(FP), A1; \
+ MOVWU stackArgsSize+24(FP), A2; \
MOV X2, A3; \
ADD $8, A3; \
ADD A3, A2; \
@@ -444,10 +433,10 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
PCDATA $PCDATA_StackMapIndex, $0; \
JALR RA, A4; \
/* copy return values back */ \
- MOV argtype+0(FP), A5; \
- MOV arg+16(FP), A1; \
- MOVWU n+24(FP), A2; \
- MOVWU retoffset+28(FP), A4; \
+ MOV stackArgsType+0(FP), A5; \
+ MOV stackArgs+16(FP), A1; \
+ MOVWU stackArgsSize+24(FP), A2; \
+ MOVWU stackRetOffset+28(FP), A4; \
ADD $8, X2, A3; \
ADD A4, A3; \
ADD A4, A1; \
@@ -459,11 +448,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
MOV A5, 8(X2)
MOV A1, 16(X2)
MOV A3, 24(X2)
MOV A2, 32(X2)
+ MOV ZERO, 40(X2)
CALL runtime·reflectcallmove(SB)
RET
diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s
index 7baef37324..f9fb1a4c55 100644
--- a/src/runtime/asm_s390x.s
+++ b/src/runtime/asm_s390x.s
@@ -84,7 +84,7 @@ GLOBL _rt0_s390x_lib_argc<>(SB), NOPTR, $8
DATA _rt0_s90x_lib_argv<>(SB)/8, $0
GLOBL _rt0_s390x_lib_argv<>(SB), NOPTR, $8
-TEXT runtime·rt0_go(SB),NOSPLIT,$0
+TEXT runtime·rt0_go(SB),NOSPLIT|TOPFRAME,$0
// R2 = argc; R3 = argv; R11 = temp; R13 = g; R15 = stack pointer
// C TLS base pointer in AR0:AR1
@@ -170,30 +170,24 @@ TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
RET
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ CALL runtime·mstart0(SB)
+ RET // not reached
+
/*
* go-routine
*/
-// void gosave(Gobuf*)
-// save state in Gobuf; setjmp
-TEXT runtime·gosave(SB), NOSPLIT, $-8-8
- MOVD buf+0(FP), R3
- MOVD R15, gobuf_sp(R3)
- MOVD LR, gobuf_pc(R3)
- MOVD g, gobuf_g(R3)
- MOVD $0, gobuf_lr(R3)
- MOVD $0, gobuf_ret(R3)
- // Assert ctxt is zero. See func save.
- MOVD gobuf_ctxt(R3), R3
- CMPBEQ R3, $0, 2(PC)
- BL runtime·badctxt(SB)
- RET
-
// void gogo(Gobuf*)
// restore state from Gobuf; longjmp
-TEXT runtime·gogo(SB), NOSPLIT, $16-8
+TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8
MOVD buf+0(FP), R5
- MOVD gobuf_g(R5), g // make sure g is not nil
+ MOVD gobuf_g(R5), R6
+ MOVD 0(R6), R7 // make sure g != nil
+ BR gogo<>(SB)
+
+TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0
+ MOVD R6, g
BL runtime·save_g(SB)
MOVD 0(g), R4
@@ -218,7 +212,6 @@ TEXT runtime·mcall(SB), NOSPLIT, $-8-8
MOVD R15, (g_sched+gobuf_sp)(g)
MOVD LR, (g_sched+gobuf_pc)(g)
MOVD $0, (g_sched+gobuf_lr)(g)
- MOVD g, (g_sched+gobuf_g)(g)
// Switch to m->g0 & its stack, call fn.
MOVD g, R3
@@ -271,22 +264,12 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8
switch:
// save our state in g->sched. Pretend to
// be systemstack_switch if the G stack is scanned.
- MOVD $runtime·systemstack_switch(SB), R6
- ADD $16, R6 // get past prologue
- MOVD R6, (g_sched+gobuf_pc)(g)
- MOVD R15, (g_sched+gobuf_sp)(g)
- MOVD $0, (g_sched+gobuf_lr)(g)
- MOVD g, (g_sched+gobuf_g)(g)
+ BL gosave_systemstack_switch<>(SB)
// switch to g0
MOVD R5, g
BL runtime·save_g(SB)
- MOVD (g_sched+gobuf_sp)(g), R3
- // make it look like mstart called systemstack on g0, to stop traceback
- SUB $8, R3
- MOVD $runtime·mstart(SB), R4
- MOVD R4, 0(R3)
- MOVD R3, R15
+ MOVD (g_sched+gobuf_sp)(g), R15
// call target function
MOVD 0(R12), R3 // code pointer
@@ -368,7 +351,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
BR runtime·morestack(SB)
// reflectcall: call a function with the given argument list
-// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs).
// we don't have variable-sized frames, so we use a small number
// of constant-sized-frame functions to encode a few bits of size in the pc.
// Caution: ugly multiline assembly macros in your future!
@@ -381,8 +364,8 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
BR (R5)
// Note: can't just "BR NAME(SB)" - bad inlining results.
-TEXT ·reflectcall(SB), NOSPLIT, $-8-32
- MOVWZ argsize+24(FP), R3
+TEXT ·reflectcall(SB), NOSPLIT, $-8-48
+ MOVWZ frameSize+32(FP), R3
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
DISPATCH(runtime·call64, 64)
@@ -414,11 +397,11 @@ TEXT ·reflectcall(SB), NOSPLIT, $-8-32
BR (R5)
#define CALLFN(NAME,MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \
/* copy arguments to stack */ \
- MOVD arg+16(FP), R4; \
- MOVWZ argsize+24(FP), R5; \
+ MOVD stackArgs+16(FP), R4; \
+ MOVWZ stackArgsSize+24(FP), R5; \
MOVD $stack-MAXSIZE(SP), R6; \
loopArgs: /* copy 256 bytes at a time */ \
CMP R5, $256; \
@@ -439,11 +422,11 @@ callFunction: \
PCDATA $PCDATA_StackMapIndex, $0; \
BL (R8); \
/* copy return values back */ \
- MOVD argtype+0(FP), R7; \
- MOVD arg+16(FP), R6; \
- MOVWZ n+24(FP), R5; \
+ MOVD stackArgsType+0(FP), R7; \
+ MOVD stackArgs+16(FP), R6; \
+ MOVWZ stackArgsSize+24(FP), R5; \
MOVD $stack-MAXSIZE(SP), R4; \
- MOVWZ retoffset+28(FP), R1; \
+ MOVWZ stackRetOffset+28(FP), R1; \
ADD R1, R4; \
ADD R1, R6; \
SUB R1, R5; \
@@ -454,11 +437,12 @@ callFunction: \
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
MOVD R7, 8(R15)
MOVD R6, 16(R15)
MOVD R4, 24(R15)
MOVD R5, 32(R15)
+ MOVD $0, 40(R15)
BL runtime·reflectcallmove(SB)
RET
@@ -512,9 +496,15 @@ TEXT runtime·jmpdefer(SB),NOSPLIT|NOFRAME,$0-16
MOVD 0(R12), R3
BR (R3)
-// Save state of caller into g->sched. Smashes R1.
-TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
- MOVD LR, (g_sched+gobuf_pc)(g)
+// Save state of caller into g->sched,
+// but using fake PC from systemstack_switch.
+// Must only be called from functions with no locals ($0)
+// or else unwinding from systemstack_switch is incorrect.
+// Smashes R1.
+TEXT gosave_systemstack_switch<>(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·systemstack_switch(SB), R1
+ ADD $16, R1 // get past prologue
+ MOVD R1, (g_sched+gobuf_pc)(g)
MOVD R15, (g_sched+gobuf_sp)(g)
MOVD $0, (g_sched+gobuf_lr)(g)
MOVD $0, (g_sched+gobuf_ret)(g)
@@ -543,7 +533,7 @@ TEXT ·asmcgocall(SB),NOSPLIT,$0-20
MOVD g_m(g), R6
MOVD m_g0(R6), R6
CMPBEQ R6, g, g0
- BL gosave<>(SB)
+ BL gosave_systemstack_switch<>(SB)
MOVD R6, g
BL runtime·save_g(SB)
MOVD (g_sched+gobuf_sp)(g), R15
diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s
index fcb780f1dc..33c335ba5a 100644
--- a/src/runtime/asm_wasm.s
+++ b/src/runtime/asm_wasm.s
@@ -7,7 +7,7 @@
#include "funcdata.h"
#include "textflag.h"
-TEXT runtime·rt0_go(SB), NOSPLIT|NOFRAME, $0
+TEXT runtime·rt0_go(SB), NOSPLIT|NOFRAME|TOPFRAME, $0
// save m->g0 = g0
MOVD $runtime·g0(SB), runtime·m0+m_g0(SB)
// save m0 to g0->m
@@ -24,6 +24,10 @@ TEXT runtime·rt0_go(SB), NOSPLIT|NOFRAME, $0
CALL runtime·mstart(SB) // WebAssembly stack will unwind when switching to another goroutine
UNDEF
+TEXT runtime·mstart(SB),NOSPLIT|TOPFRAME,$0
+ CALL runtime·mstart0(SB)
+ RET // not reached
+
DATA runtime·mainPC+0(SB)/8,$runtime·main(SB)
GLOBL runtime·mainPC(SB),RODATA,$8
@@ -34,7 +38,9 @@ TEXT ·checkASM(SB), NOSPLIT, $0-1
TEXT runtime·gogo(SB), NOSPLIT, $0-8
MOVD buf+0(FP), R0
- MOVD gobuf_g(R0), g
+ MOVD gobuf_g(R0), R1
+ MOVD 0(R1), R2 // make sure g != nil
+ MOVD R1, g
MOVD gobuf_sp(R0), SP
// Put target PC at -8(SP), wasm_pc_f_loop will pick it up
@@ -69,7 +75,6 @@ TEXT runtime·mcall(SB), NOSPLIT, $0-8
// save state in g->sched
MOVD 0(SP), g_sched+gobuf_pc(g) // caller's PC
MOVD $fn+0(FP), g_sched+gobuf_sp(g) // caller's SP
- MOVD g, g_sched+gobuf_g(g)
// if g == g0 call badmcall
Get g
@@ -143,7 +148,6 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8
MOVD $runtime·systemstack_switch(SB), g_sched+gobuf_pc(g)
MOVD SP, g_sched+gobuf_sp(g)
- MOVD g, g_sched+gobuf_g(g)
// switch to g0
MOVD R2, g
@@ -270,7 +274,6 @@ TEXT runtime·morestack(SB), NOSPLIT, $0-0
// Set g->sched to context in f.
MOVD 0(SP), g_sched+gobuf_pc(g)
- MOVD g, g_sched+gobuf_g(g)
MOVD $8(SP), g_sched+gobuf_sp(g) // f's SP
MOVD CTXT, g_sched+gobuf_ctxt(g)
@@ -296,14 +299,14 @@ TEXT ·asmcgocall(SB), NOSPLIT, $0-0
JMP NAME(SB); \
End
-TEXT ·reflectcall(SB), NOSPLIT, $0-32
+TEXT ·reflectcall(SB), NOSPLIT, $0-48
I64Load fn+8(FP)
I64Eqz
If
CALLNORESUME runtime·sigpanic<ABIInternal>(SB)
End
- MOVW argsize+24(FP), R0
+ MOVW frameSize+32(FP), R0
DISPATCH(runtime·call16, 16)
DISPATCH(runtime·call32, 32)
@@ -335,18 +338,18 @@ TEXT ·reflectcall(SB), NOSPLIT, $0-32
JMP runtime·badreflectcall(SB)
#define CALLFN(NAME, MAXSIZE) \
-TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \
NO_LOCAL_POINTERS; \
- MOVW argsize+24(FP), R0; \
+ MOVW stackArgsSize+24(FP), R0; \
\
Get R0; \
I64Eqz; \
Not; \
If; \
Get SP; \
- I64Load argptr+16(FP); \
+ I64Load stackArgs+16(FP); \
I32WrapI64; \
- I64Load argsize+24(FP); \
+ I64Load stackArgsSize+24(FP); \
I64Const $3; \
I64ShrU; \
I32WrapI64; \
@@ -359,12 +362,12 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
I64Load $0; \
CALL; \
\
- I64Load32U retoffset+28(FP); \
+ I64Load32U stackRetOffset+28(FP); \
Set R0; \
\
- MOVD argtype+0(FP), RET0; \
+ MOVD stackArgsType+0(FP), RET0; \
\
- I64Load argptr+16(FP); \
+ I64Load stackArgs+16(FP); \
Get R0; \
I64Add; \
Set RET1; \
@@ -375,7 +378,7 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
I64Add; \
Set RET2; \
\
- I64Load32U argsize+24(FP); \
+ I64Load32U stackArgsSize+24(FP); \
Get R0; \
I64Sub; \
Set RET3; \
@@ -387,12 +390,13 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-32; \
// separate function so it can allocate stack space for the arguments
// to reflectcallmove. It does not follow the Go ABI; it expects its
// arguments in registers.
-TEXT callRet<>(SB), NOSPLIT, $32-0
+TEXT callRet<>(SB), NOSPLIT, $40-0
NO_LOCAL_POINTERS
MOVD RET0, 0(SP)
MOVD RET1, 8(SP)
MOVD RET2, 16(SP)
MOVD RET3, 24(SP)
+ MOVD $0, 32(SP)
CALL runtime·reflectcallmove(SB)
RET
@@ -424,7 +428,7 @@ CALLFN(·call268435456, 268435456)
CALLFN(·call536870912, 536870912)
CALLFN(·call1073741824, 1073741824)
-TEXT runtime·goexit(SB), NOSPLIT, $0-0
+TEXT runtime·goexit(SB), NOSPLIT|TOPFRAME, $0-0
NOP // first PC of goexit is skipped
CALL runtime·goexit1(SB) // does not return
UNDEF
diff --git a/src/runtime/cgo/gcc_amd64.S b/src/runtime/cgo/gcc_amd64.S
index 17d9d47ef4..d75f864666 100644
--- a/src/runtime/cgo/gcc_amd64.S
+++ b/src/runtime/cgo/gcc_amd64.S
@@ -30,9 +30,14 @@ EXT(crosscall_amd64):
pushq %r15
#if defined(_WIN64)
+ movq %r8, %rdi /* arg of setg_gcc */
+ call *%rdx /* setg_gcc */
call *%rcx /* fn */
#else
- call *%rdi /* fn */
+ movq %rdi, %rbx
+ movq %rdx, %rdi /* arg of setg_gcc */
+ call *%rsi /* setg_gcc */
+ call *%rbx /* fn */
#endif
popq %r15
diff --git a/src/runtime/cgo/gcc_darwin_amd64.c b/src/runtime/cgo/gcc_darwin_amd64.c
index 51410d5026..d5b7fd8fd8 100644
--- a/src/runtime/cgo/gcc_darwin_amd64.c
+++ b/src/runtime/cgo/gcc_darwin_amd64.c
@@ -9,13 +9,16 @@
#include "libcgo_unix.h"
static void* threadentry(void*);
+static void (*setg_gcc)(void*);
void
-x_cgo_init(G *g)
+x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
{
pthread_attr_t attr;
size_t size;
+ setg_gcc = setg;
+
pthread_attr_init(&attr);
pthread_attr_getstacksize(&attr, &size);
g->stacklo = (uintptr)&attr - size + 4096;
@@ -57,10 +60,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
- // Move the g pointer into the slot reserved in thread local storage.
- // Constant must match the one in cmd/link/internal/ld/sym.go.
- asm volatile("movq %0, %%gs:0x30" :: "r"(ts.g));
-
- crosscall_amd64(ts.fn);
+ crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
return nil;
}
diff --git a/src/runtime/cgo/gcc_dragonfly_amd64.c b/src/runtime/cgo/gcc_dragonfly_amd64.c
index d25db91900..0003414bf8 100644
--- a/src/runtime/cgo/gcc_dragonfly_amd64.c
+++ b/src/runtime/cgo/gcc_dragonfly_amd64.c
@@ -61,11 +61,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
- /*
- * Set specific keys.
- */
- setg_gcc((void*)ts.g);
-
- crosscall_amd64(ts.fn);
+ crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
return nil;
}
diff --git a/src/runtime/cgo/gcc_freebsd_amd64.c b/src/runtime/cgo/gcc_freebsd_amd64.c
index 514a2f8a23..6071ec3909 100644
--- a/src/runtime/cgo/gcc_freebsd_amd64.c
+++ b/src/runtime/cgo/gcc_freebsd_amd64.c
@@ -69,11 +69,6 @@ threadentry(void *v)
free(v);
_cgo_tsan_release();
- /*
- * Set specific keys.
- */
- setg_gcc((void*)ts.g);
-
- crosscall_amd64(ts.fn);
+ crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
return nil;
}
diff --git a/src/runtime/cgo/gcc_linux_386.c b/src/runtime/cgo/gcc_linux_386.c
index ece9f933c5..70c942aeb8 100644
--- a/src/runtime/cgo/gcc_linux_386.c
+++ b/src/runtime/cgo/gcc_linux_386.c
@@ -12,7 +12,7 @@ static void *threadentry(void*);
static void (*setg_gcc)(void*);
// This will be set in gcc_android.c for android-specific customization.
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
+void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
void
x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
diff --git a/src/runtime/cgo/gcc_linux_amd64.c b/src/runtime/cgo/gcc_linux_amd64.c
index 9134e0df92..c25e7e769b 100644
--- a/src/runtime/cgo/gcc_linux_amd64.c
+++ b/src/runtime/cgo/gcc_linux_amd64.c
@@ -14,7 +14,7 @@ static void* threadentry(void*);
static void (*setg_gcc)(void*);
// This will be set in gcc_android.c for android-specific customization.
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
+void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
void
x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
@@ -89,11 +89,6 @@ threadentry(void *v)
free(v);
_cgo_tsan_release();
- /*
- * Set specific keys.
- */
- setg_gcc((void*)ts.g);
-
- crosscall_amd64(ts.fn);
+ crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
return nil;
}
diff --git a/src/runtime/cgo/gcc_linux_arm.c b/src/runtime/cgo/gcc_linux_arm.c
index 61855b96b2..5bc0fee90d 100644
--- a/src/runtime/cgo/gcc_linux_arm.c
+++ b/src/runtime/cgo/gcc_linux_arm.c
@@ -10,7 +10,7 @@
static void *threadentry(void*);
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
+void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
static void (*setg_gcc)(void*);
void
diff --git a/src/runtime/cgo/gcc_linux_arm64.c b/src/runtime/cgo/gcc_linux_arm64.c
index 261c884ac9..17ff274fbb 100644
--- a/src/runtime/cgo/gcc_linux_arm64.c
+++ b/src/runtime/cgo/gcc_linux_arm64.c
@@ -12,7 +12,7 @@
static void *threadentry(void*);
-void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
+void (*x_cgo_inittls)(void **tlsg, void **tlsbase) __attribute__((common));
static void (*setg_gcc)(void*);
void
diff --git a/src/runtime/cgo/gcc_netbsd_amd64.c b/src/runtime/cgo/gcc_netbsd_amd64.c
index dc966fc45b..9f4b031a08 100644
--- a/src/runtime/cgo/gcc_netbsd_amd64.c
+++ b/src/runtime/cgo/gcc_netbsd_amd64.c
@@ -62,11 +62,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
- /*
- * Set specific keys.
- */
- setg_gcc((void*)ts.g);
-
// On NetBSD, a new thread inherits the signal stack of the
// creating thread. That confuses minit, so we remove that
// signal stack here before calling the regular mstart. It's
@@ -78,6 +73,6 @@ threadentry(void *v)
ss.ss_flags = SS_DISABLE;
sigaltstack(&ss, nil);
- crosscall_amd64(ts.fn);
+ crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
return nil;
}
diff --git a/src/runtime/cgo/gcc_openbsd_amd64.c b/src/runtime/cgo/gcc_openbsd_amd64.c
index 34319fb0b8..09d2750f3a 100644
--- a/src/runtime/cgo/gcc_openbsd_amd64.c
+++ b/src/runtime/cgo/gcc_openbsd_amd64.c
@@ -60,11 +60,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
- /*
- * Set specific keys.
- */
- setg_gcc((void*)ts.g);
-
- crosscall_amd64(ts.fn);
+ crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
return nil;
}
diff --git a/src/runtime/cgo/gcc_solaris_amd64.c b/src/runtime/cgo/gcc_solaris_amd64.c
index 079bd12898..e89e844b1e 100644
--- a/src/runtime/cgo/gcc_solaris_amd64.c
+++ b/src/runtime/cgo/gcc_solaris_amd64.c
@@ -72,11 +72,6 @@ threadentry(void *v)
ts = *(ThreadStart*)v;
free(v);
- /*
- * Set specific keys.
- */
- setg_gcc((void*)ts.g);
-
- crosscall_amd64(ts.fn);
+ crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
return nil;
}
diff --git a/src/runtime/cgo/gcc_windows_amd64.c b/src/runtime/cgo/gcc_windows_amd64.c
index 0f8c817f0e..25cfd086dd 100644
--- a/src/runtime/cgo/gcc_windows_amd64.c
+++ b/src/runtime/cgo/gcc_windows_amd64.c
@@ -12,10 +12,12 @@
#include "libcgo_windows.h"
static void threadentry(void*);
+static void (*setg_gcc)(void*);
void
-x_cgo_init(G *g)
+x_cgo_init(G *g, void (*setg)(void*), void **tlsg, void **tlsbase)
{
+ setg_gcc = setg;
}
@@ -46,10 +48,8 @@ threadentry(void *v)
*/
asm volatile (
"movq %0, %%gs:0x28\n" // MOVL tls0, 0x28(GS)
- "movq %%gs:0x28, %%rax\n" // MOVQ 0x28(GS), tmp
- "movq %1, 0(%%rax)\n" // MOVQ g, 0(GS)
- :: "r"(ts.tls), "r"(ts.g) : "%rax"
+ :: "r"(ts.tls)
);
- crosscall_amd64(ts.fn);
+ crosscall_amd64(ts.fn, setg_gcc, (void*)ts.g);
}
diff --git a/src/runtime/cgo/libcgo.h b/src/runtime/cgo/libcgo.h
index aba500a301..af4960e7e9 100644
--- a/src/runtime/cgo/libcgo.h
+++ b/src/runtime/cgo/libcgo.h
@@ -66,7 +66,7 @@ uintptr_t _cgo_wait_runtime_init_done(void);
/*
* Call fn in the 6c world.
*/
-void crosscall_amd64(void (*fn)(void));
+void crosscall_amd64(void (*fn)(void), void (*setg_gcc)(void*), void *g);
/*
* Call fn in the 8c world.
diff --git a/src/runtime/cgo/linux_syscall.c b/src/runtime/cgo/linux_syscall.c
index 56f3d67d8b..59761c8b40 100644
--- a/src/runtime/cgo/linux_syscall.c
+++ b/src/runtime/cgo/linux_syscall.c
@@ -32,7 +32,7 @@ typedef struct {
#define SET_RETVAL(fn) \
uintptr_t ret = (uintptr_t) fn ; \
- if (ret == -1) { \
+ if (ret == (uintptr_t) -1) { \
x->retval = (uintptr_t) errno; \
} else \
x->retval = ret
diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go
index 20cacd6043..534a2c4295 100644
--- a/src/runtime/cgocall.go
+++ b/src/runtime/cgocall.go
@@ -306,14 +306,7 @@ func unwindm(restore *bool) {
// unwind of g's stack (see comment at top of file).
mp := acquirem()
sched := &mp.g0.sched
- switch GOARCH {
- default:
- throw("unwindm not implemented")
- case "386", "amd64", "arm", "ppc64", "ppc64le", "mips64", "mips64le", "s390x", "mips", "mipsle", "riscv64":
- sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + sys.MinFrameSize))
- case "arm64":
- sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16))
- }
+ sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + alignUp(sys.MinFrameSize, sys.StackAlign)))
// Do the accounting that cgocall will not have a chance to do
// during an unwind.
diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go
index 9bfdfe7c74..e5d0193b9c 100644
--- a/src/runtime/cpuprof.go
+++ b/src/runtime/cpuprof.go
@@ -103,7 +103,16 @@ func (p *cpuProfile) add(gp *g, stk []uintptr) {
// because otherwise its write barrier behavior may not
// be correct. See the long comment there before
// changing the argument here.
- cpuprof.log.write(&gp.labels, nanotime(), hdr[:], stk)
+ //
+ // Note: it can happen on Windows, where we are calling
+ // p.add with a gp that is not the current g, that gp is nil,
+ // meaning we interrupted a system thread with no g.
+ // Avoid faulting in that case.
+ var tagPtr *unsafe.Pointer
+ if gp != nil {
+ tagPtr = &gp.labels
+ }
+ cpuprof.log.write(tagPtr, nanotime(), hdr[:], stk)
}
atomic.Store(&prof.signalLock, 0)
diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go
index 58ad4f3eba..e5bd7973b7 100644
--- a/src/runtime/crash_test.go
+++ b/src/runtime/crash_test.go
@@ -294,6 +294,18 @@ func TestRecursivePanic4(t *testing.T) {
}
+func TestRecursivePanic5(t *testing.T) {
+ output := runTestProg(t, "testprog", "RecursivePanic5")
+ want := `first panic
+second panic
+panic: third panic
+`
+ if !strings.HasPrefix(output, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, output)
+ }
+
+}
+
func TestGoexitCrash(t *testing.T) {
// External linking brings in cgo, causing deadlock detection not working.
testenv.MustInternalLink(t)
diff --git a/src/runtime/defer_test.go b/src/runtime/defer_test.go
index 5ac0814564..9a40ea1984 100644
--- a/src/runtime/defer_test.go
+++ b/src/runtime/defer_test.go
@@ -410,3 +410,31 @@ func rec1(max int) {
rec1(max - 1)
}
}
+
+func TestIssue43921(t *testing.T) {
+ defer func() {
+ expect(t, 1, recover())
+ }()
+ func() {
+ // Prevent open-coded defers
+ for {
+ defer func() {}()
+ break
+ }
+
+ defer func() {
+ defer func() {
+ expect(t, 4, recover())
+ }()
+ panic(4)
+ }()
+ panic(1)
+
+ }()
+}
+
+func expect(t *testing.T, n int, err interface{}) {
+ if n != err {
+ t.Fatalf("have %v, want %v", err, n)
+ }
+}
diff --git a/src/runtime/defs_freebsd_386.go b/src/runtime/defs_freebsd_386.go
index 767755425c..f822934d58 100644
--- a/src/runtime/defs_freebsd_386.go
+++ b/src/runtime/defs_freebsd_386.go
@@ -13,10 +13,11 @@ const (
)
const (
- _EINTR = 0x4
- _EFAULT = 0xe
- _EAGAIN = 0x23
- _ENOSYS = 0x4e
+ _EINTR = 0x4
+ _EFAULT = 0xe
+ _EAGAIN = 0x23
+ _ENOSYS = 0x4e
+ _ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x100000
diff --git a/src/runtime/defs_freebsd_amd64.go b/src/runtime/defs_freebsd_amd64.go
index 5a833426fd..0b696cf227 100644
--- a/src/runtime/defs_freebsd_amd64.go
+++ b/src/runtime/defs_freebsd_amd64.go
@@ -13,10 +13,11 @@ const (
)
const (
- _EINTR = 0x4
- _EFAULT = 0xe
- _EAGAIN = 0x23
- _ENOSYS = 0x4e
+ _EINTR = 0x4
+ _EFAULT = 0xe
+ _EAGAIN = 0x23
+ _ENOSYS = 0x4e
+ _ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x100000
diff --git a/src/runtime/defs_freebsd_arm.go b/src/runtime/defs_freebsd_arm.go
index b55dfd88cf..b6f3e790cf 100644
--- a/src/runtime/defs_freebsd_arm.go
+++ b/src/runtime/defs_freebsd_arm.go
@@ -13,10 +13,11 @@ const (
)
const (
- _EINTR = 0x4
- _EFAULT = 0xe
- _EAGAIN = 0x23
- _ENOSYS = 0x4e
+ _EINTR = 0x4
+ _EFAULT = 0xe
+ _EAGAIN = 0x23
+ _ENOSYS = 0x4e
+ _ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x100000
diff --git a/src/runtime/defs_freebsd_arm64.go b/src/runtime/defs_freebsd_arm64.go
index 5b9d504ba6..0759a1238f 100644
--- a/src/runtime/defs_freebsd_arm64.go
+++ b/src/runtime/defs_freebsd_arm64.go
@@ -13,10 +13,11 @@ const (
)
const (
- _EINTR = 0x4
- _EFAULT = 0xe
- _EAGAIN = 0x23
- _ENOSYS = 0x4e
+ _EINTR = 0x4
+ _EFAULT = 0xe
+ _EAGAIN = 0x23
+ _ENOSYS = 0x4e
+ _ETIMEDOUT = 0x3c
_O_NONBLOCK = 0x4
_O_CLOEXEC = 0x100000
diff --git a/src/runtime/defs_openbsd.go b/src/runtime/defs_openbsd.go
index 53e9d59a3c..ff7e21c71e 100644
--- a/src/runtime/defs_openbsd.go
+++ b/src/runtime/defs_openbsd.go
@@ -54,6 +54,13 @@ const (
SA_RESTART = C.SA_RESTART
SA_ONSTACK = C.SA_ONSTACK
+ PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED
+
+ F_SETFD = C.F_SETFD
+ F_GETFL = C.F_GETFL
+ F_SETFL = C.F_SETFL
+ FD_CLOEXEC = C.FD_CLOEXEC
+
SIGHUP = C.SIGHUP
SIGINT = C.SIGINT
SIGQUIT = C.SIGQUIT
@@ -129,3 +136,10 @@ type Timeval C.struct_timeval
type Itimerval C.struct_itimerval
type KeventT C.struct_kevent
+
+type Pthread C.pthread_t
+type PthreadAttr C.pthread_attr_t
+type PthreadCond C.pthread_cond_t
+type PthreadCondAttr C.pthread_condattr_t
+type PthreadMutex C.pthread_mutex_t
+type PthreadMutexAttr C.pthread_mutexattr_t
diff --git a/src/runtime/defs_openbsd_amd64.go b/src/runtime/defs_openbsd_amd64.go
index c187a98ae0..46f1245201 100644
--- a/src/runtime/defs_openbsd_amd64.go
+++ b/src/runtime/defs_openbsd_amd64.go
@@ -30,6 +30,13 @@ const (
_SA_RESTART = 0x2
_SA_ONSTACK = 0x1
+ _PTHREAD_CREATE_DETACHED = 0x1
+
+ _F_SETFD = 0x2
+ _F_GETFL = 0x3
+ _F_SETFL = 0x4
+ _FD_CLOEXEC = 0x1
+
_SIGHUP = 0x1
_SIGINT = 0x2
_SIGQUIT = 0x3
@@ -177,3 +184,10 @@ type keventt struct {
data int64
udata *byte
}
+
+type pthread uintptr
+type pthreadattr uintptr
+type pthreadcond uintptr
+type pthreadcondattr uintptr
+type pthreadmutex uintptr
+type pthreadmutexattr uintptr
diff --git a/src/runtime/defs_openbsd_arm64.go b/src/runtime/defs_openbsd_arm64.go
index 628f4bc5a5..d2b947feb2 100644
--- a/src/runtime/defs_openbsd_arm64.go
+++ b/src/runtime/defs_openbsd_arm64.go
@@ -31,6 +31,13 @@ const (
_SA_RESTART = 0x2
_SA_ONSTACK = 0x1
+ _PTHREAD_CREATE_DETACHED = 0x1
+
+ _F_SETFD = 0x2
+ _F_GETFL = 0x3
+ _F_SETFL = 0x4
+ _FD_CLOEXEC = 0x1
+
_SIGHUP = 0x1
_SIGINT = 0x2
_SIGQUIT = 0x3
@@ -157,3 +164,10 @@ type keventt struct {
data int64
udata *byte
}
+
+type pthread uintptr
+type pthreadattr uintptr
+type pthreadcond uintptr
+type pthreadcondattr uintptr
+type pthreadmutex uintptr
+type pthreadmutexattr uintptr
diff --git a/src/runtime/defs_windows.go b/src/runtime/defs_windows.go
index 43f358d56a..8d4e38120e 100644
--- a/src/runtime/defs_windows.go
+++ b/src/runtime/defs_windows.go
@@ -2,77 +2,83 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build ignore
-
-/*
-Input to cgo.
-
-GOARCH=amd64 go tool cgo -cdefs defs_windows.go > defs_windows_amd64.h
-GOARCH=386 go tool cgo -cdefs defs_windows.go > defs_windows_386.h
-*/
+// Windows architecture-independent definitions.
package runtime
-/*
-#include <signal.h>
-#include <stdarg.h>
-#include <windef.h>
-#include <winbase.h>
-#include <wincon.h>
+const (
+ _PROT_NONE = 0
+ _PROT_READ = 1
+ _PROT_WRITE = 2
+ _PROT_EXEC = 4
-#ifndef _X86_
-typedef struct {} FLOATING_SAVE_AREA;
-#endif
-#ifndef _AMD64_
-typedef struct {} M128A;
-#endif
-*/
-import "C"
+ _MAP_ANON = 1
+ _MAP_PRIVATE = 2
-const (
- PROT_NONE = 0
- PROT_READ = 1
- PROT_WRITE = 2
- PROT_EXEC = 4
+ _DUPLICATE_SAME_ACCESS = 0x2
+ _THREAD_PRIORITY_HIGHEST = 0x2
- MAP_ANON = 1
- MAP_PRIVATE = 2
+ _SIGINT = 0x2
+ _SIGTERM = 0xF
+ _CTRL_C_EVENT = 0x0
+ _CTRL_BREAK_EVENT = 0x1
+ _CTRL_CLOSE_EVENT = 0x2
+ _CTRL_LOGOFF_EVENT = 0x5
+ _CTRL_SHUTDOWN_EVENT = 0x6
- DUPLICATE_SAME_ACCESS = C.DUPLICATE_SAME_ACCESS
- THREAD_PRIORITY_HIGHEST = C.THREAD_PRIORITY_HIGHEST
+ _EXCEPTION_ACCESS_VIOLATION = 0xc0000005
+ _EXCEPTION_BREAKPOINT = 0x80000003
+ _EXCEPTION_ILLEGAL_INSTRUCTION = 0xc000001d
+ _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d
+ _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e
+ _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f
+ _EXCEPTION_FLT_OVERFLOW = 0xc0000091
+ _EXCEPTION_FLT_UNDERFLOW = 0xc0000093
+ _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094
+ _EXCEPTION_INT_OVERFLOW = 0xc0000095
- SIGINT = C.SIGINT
- SIGTERM = C.SIGTERM
- CTRL_C_EVENT = C.CTRL_C_EVENT
- CTRL_BREAK_EVENT = C.CTRL_BREAK_EVENT
- CTRL_CLOSE_EVENT = C.CTRL_CLOSE_EVENT
- CTRL_LOGOFF_EVENT = C.CTRL_LOGOFF_EVENT
- CTRL_SHUTDOWN_EVENT = C.CTRL_SHUTDOWN_EVENT
+ _INFINITE = 0xffffffff
+ _WAIT_TIMEOUT = 0x102
- CONTEXT_CONTROL = C.CONTEXT_CONTROL
- CONTEXT_FULL = C.CONTEXT_FULL
+ _EXCEPTION_CONTINUE_EXECUTION = -0x1
+ _EXCEPTION_CONTINUE_SEARCH = 0x0
+)
- EXCEPTION_ACCESS_VIOLATION = C.STATUS_ACCESS_VIOLATION
- EXCEPTION_BREAKPOINT = C.STATUS_BREAKPOINT
- EXCEPTION_FLT_DENORMAL_OPERAND = C.STATUS_FLOAT_DENORMAL_OPERAND
- EXCEPTION_FLT_DIVIDE_BY_ZERO = C.STATUS_FLOAT_DIVIDE_BY_ZERO
- EXCEPTION_FLT_INEXACT_RESULT = C.STATUS_FLOAT_INEXACT_RESULT
- EXCEPTION_FLT_OVERFLOW = C.STATUS_FLOAT_OVERFLOW
- EXCEPTION_FLT_UNDERFLOW = C.STATUS_FLOAT_UNDERFLOW
- EXCEPTION_INT_DIVIDE_BY_ZERO = C.STATUS_INTEGER_DIVIDE_BY_ZERO
- EXCEPTION_INT_OVERFLOW = C.STATUS_INTEGER_OVERFLOW
+type systeminfo struct {
+ anon0 [4]byte
+ dwpagesize uint32
+ lpminimumapplicationaddress *byte
+ lpmaximumapplicationaddress *byte
+ dwactiveprocessormask uintptr
+ dwnumberofprocessors uint32
+ dwprocessortype uint32
+ dwallocationgranularity uint32
+ wprocessorlevel uint16
+ wprocessorrevision uint16
+}
- INFINITE = C.INFINITE
- WAIT_TIMEOUT = C.WAIT_TIMEOUT
+type exceptionrecord struct {
+ exceptioncode uint32
+ exceptionflags uint32
+ exceptionrecord *exceptionrecord
+ exceptionaddress *byte
+ numberparameters uint32
+ exceptioninformation [15]uintptr
+}
- EXCEPTION_CONTINUE_EXECUTION = C.EXCEPTION_CONTINUE_EXECUTION
- EXCEPTION_CONTINUE_SEARCH = C.EXCEPTION_CONTINUE_SEARCH
-)
+type overlapped struct {
+ internal uintptr
+ internalhigh uintptr
+ anon0 [8]byte
+ hevent *byte
+}
-type SystemInfo C.SYSTEM_INFO
-type ExceptionRecord C.EXCEPTION_RECORD
-type FloatingSaveArea C.FLOATING_SAVE_AREA
-type M128a C.M128A
-type Context C.CONTEXT
-type Overlapped C.OVERLAPPED
-type MemoryBasicInformation C.MEMORY_BASIC_INFORMATION
+type memoryBasicInformation struct {
+ baseAddress uintptr
+ allocationBase uintptr
+ allocationProtect uint32
+ regionSize uintptr
+ state uint32
+ protect uint32
+ type_ uint32
+}
diff --git a/src/runtime/defs_windows_386.go b/src/runtime/defs_windows_386.go
index 3c5057b86f..37fe74c542 100644
--- a/src/runtime/defs_windows_386.go
+++ b/src/runtime/defs_windows_386.go
@@ -1,69 +1,10 @@
-// created by cgo -cdefs and then converted to Go
-// cgo -cdefs defs_windows.go
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package runtime
-const (
- _PROT_NONE = 0
- _PROT_READ = 1
- _PROT_WRITE = 2
- _PROT_EXEC = 4
-
- _MAP_ANON = 1
- _MAP_PRIVATE = 2
-
- _DUPLICATE_SAME_ACCESS = 0x2
- _THREAD_PRIORITY_HIGHEST = 0x2
-
- _SIGINT = 0x2
- _SIGTERM = 0xF
- _CTRL_C_EVENT = 0x0
- _CTRL_BREAK_EVENT = 0x1
- _CTRL_CLOSE_EVENT = 0x2
- _CTRL_LOGOFF_EVENT = 0x5
- _CTRL_SHUTDOWN_EVENT = 0x6
-
- _CONTEXT_CONTROL = 0x10001
- _CONTEXT_FULL = 0x10007
-
- _EXCEPTION_ACCESS_VIOLATION = 0xc0000005
- _EXCEPTION_BREAKPOINT = 0x80000003
- _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d
- _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e
- _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f
- _EXCEPTION_FLT_OVERFLOW = 0xc0000091
- _EXCEPTION_FLT_UNDERFLOW = 0xc0000093
- _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094
- _EXCEPTION_INT_OVERFLOW = 0xc0000095
-
- _INFINITE = 0xffffffff
- _WAIT_TIMEOUT = 0x102
-
- _EXCEPTION_CONTINUE_EXECUTION = -0x1
- _EXCEPTION_CONTINUE_SEARCH = 0x0
-)
-
-type systeminfo struct {
- anon0 [4]byte
- dwpagesize uint32
- lpminimumapplicationaddress *byte
- lpmaximumapplicationaddress *byte
- dwactiveprocessormask uint32
- dwnumberofprocessors uint32
- dwprocessortype uint32
- dwallocationgranularity uint32
- wprocessorlevel uint16
- wprocessorrevision uint16
-}
-
-type exceptionrecord struct {
- exceptioncode uint32
- exceptionflags uint32
- exceptionrecord *exceptionrecord
- exceptionaddress *byte
- numberparameters uint32
- exceptioninformation [15]uint32
-}
+const _CONTEXT_CONTROL = 0x10001
type floatingsavearea struct {
controlword uint32
@@ -130,20 +71,3 @@ func dumpregs(r *context) {
print("fs ", hex(r.segfs), "\n")
print("gs ", hex(r.seggs), "\n")
}
-
-type overlapped struct {
- internal uint32
- internalhigh uint32
- anon0 [8]byte
- hevent *byte
-}
-
-type memoryBasicInformation struct {
- baseAddress uintptr
- allocationBase uintptr
- allocationProtect uint32
- regionSize uintptr
- state uint32
- protect uint32
- type_ uint32
-}
diff --git a/src/runtime/defs_windows_amd64.go b/src/runtime/defs_windows_amd64.go
index ebb1506e2f..ac636a68ec 100644
--- a/src/runtime/defs_windows_amd64.go
+++ b/src/runtime/defs_windows_amd64.go
@@ -1,70 +1,10 @@
-// created by cgo -cdefs and then converted to Go
-// cgo -cdefs defs_windows.go
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
package runtime
-const (
- _PROT_NONE = 0
- _PROT_READ = 1
- _PROT_WRITE = 2
- _PROT_EXEC = 4
-
- _MAP_ANON = 1
- _MAP_PRIVATE = 2
-
- _DUPLICATE_SAME_ACCESS = 0x2
- _THREAD_PRIORITY_HIGHEST = 0x2
-
- _SIGINT = 0x2
- _SIGTERM = 0xF
- _CTRL_C_EVENT = 0x0
- _CTRL_BREAK_EVENT = 0x1
- _CTRL_CLOSE_EVENT = 0x2
- _CTRL_LOGOFF_EVENT = 0x5
- _CTRL_SHUTDOWN_EVENT = 0x6
-
- _CONTEXT_CONTROL = 0x100001
- _CONTEXT_FULL = 0x10000b
-
- _EXCEPTION_ACCESS_VIOLATION = 0xc0000005
- _EXCEPTION_BREAKPOINT = 0x80000003
- _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d
- _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e
- _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f
- _EXCEPTION_FLT_OVERFLOW = 0xc0000091
- _EXCEPTION_FLT_UNDERFLOW = 0xc0000093
- _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094
- _EXCEPTION_INT_OVERFLOW = 0xc0000095
-
- _INFINITE = 0xffffffff
- _WAIT_TIMEOUT = 0x102
-
- _EXCEPTION_CONTINUE_EXECUTION = -0x1
- _EXCEPTION_CONTINUE_SEARCH = 0x0
-)
-
-type systeminfo struct {
- anon0 [4]byte
- dwpagesize uint32
- lpminimumapplicationaddress *byte
- lpmaximumapplicationaddress *byte
- dwactiveprocessormask uint64
- dwnumberofprocessors uint32
- dwprocessortype uint32
- dwallocationgranularity uint32
- wprocessorlevel uint16
- wprocessorrevision uint16
-}
-
-type exceptionrecord struct {
- exceptioncode uint32
- exceptionflags uint32
- exceptionrecord *exceptionrecord
- exceptionaddress *byte
- numberparameters uint32
- pad_cgo_0 [4]byte
- exceptioninformation [15]uint64
-}
+const _CONTEXT_CONTROL = 0x100001
type m128a struct {
low uint64
@@ -123,7 +63,7 @@ type context struct {
func (c *context) ip() uintptr { return uintptr(c.rip) }
func (c *context) sp() uintptr { return uintptr(c.rsp) }
-// Amd64 does not have link register, so this returns 0.
+// AMD64 does not have link register, so this returns 0.
func (c *context) lr() uintptr { return 0 }
func (c *context) set_lr(x uintptr) {}
@@ -152,20 +92,3 @@ func dumpregs(r *context) {
print("fs ", hex(r.segfs), "\n")
print("gs ", hex(r.seggs), "\n")
}
-
-type overlapped struct {
- internal uint64
- internalhigh uint64
- anon0 [8]byte
- hevent *byte
-}
-
-type memoryBasicInformation struct {
- baseAddress uintptr
- allocationBase uintptr
- allocationProtect uint32
- regionSize uintptr
- state uint32
- protect uint32
- type_ uint32
-}
diff --git a/src/runtime/defs_windows_arm.go b/src/runtime/defs_windows_arm.go
index b275b0572a..370470e35d 100644
--- a/src/runtime/defs_windows_arm.go
+++ b/src/runtime/defs_windows_arm.go
@@ -4,67 +4,13 @@
package runtime
-const (
- _PROT_NONE = 0
- _PROT_READ = 1
- _PROT_WRITE = 2
- _PROT_EXEC = 4
-
- _MAP_ANON = 1
- _MAP_PRIVATE = 2
-
- _DUPLICATE_SAME_ACCESS = 0x2
- _THREAD_PRIORITY_HIGHEST = 0x2
-
- _SIGINT = 0x2
- _SIGTERM = 0xF
- _CTRL_C_EVENT = 0x0
- _CTRL_BREAK_EVENT = 0x1
- _CTRL_CLOSE_EVENT = 0x2
- _CTRL_LOGOFF_EVENT = 0x5
- _CTRL_SHUTDOWN_EVENT = 0x6
-
- _CONTEXT_CONTROL = 0x10001
- _CONTEXT_FULL = 0x10007
-
- _EXCEPTION_ACCESS_VIOLATION = 0xc0000005
- _EXCEPTION_BREAKPOINT = 0x80000003
- _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d
- _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e
- _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f
- _EXCEPTION_FLT_OVERFLOW = 0xc0000091
- _EXCEPTION_FLT_UNDERFLOW = 0xc0000093
- _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094
- _EXCEPTION_INT_OVERFLOW = 0xc0000095
-
- _INFINITE = 0xffffffff
- _WAIT_TIMEOUT = 0x102
-
- _EXCEPTION_CONTINUE_EXECUTION = -0x1
- _EXCEPTION_CONTINUE_SEARCH = 0x0
-)
-
-type systeminfo struct {
- anon0 [4]byte
- dwpagesize uint32
- lpminimumapplicationaddress *byte
- lpmaximumapplicationaddress *byte
- dwactiveprocessormask uint32
- dwnumberofprocessors uint32
- dwprocessortype uint32
- dwallocationgranularity uint32
- wprocessorlevel uint16
- wprocessorrevision uint16
-}
-
-type exceptionrecord struct {
- exceptioncode uint32
- exceptionflags uint32
- exceptionrecord *exceptionrecord
- exceptionaddress *byte
- numberparameters uint32
- exceptioninformation [15]uint32
-}
+// NOTE(rsc): _CONTEXT_CONTROL is actually 0x200001 and should include PC, SP, and LR.
+// However, empirically, LR doesn't come along on Windows 10
+// unless you also set _CONTEXT_INTEGER (0x200002).
+// Without LR, we skip over the next-to-bottom function in profiles
+// when the bottom function is frameless.
+// So we set both here, to make a working _CONTEXT_CONTROL.
+const _CONTEXT_CONTROL = 0x200003
type neon128 struct {
low uint64
@@ -132,23 +78,6 @@ func dumpregs(r *context) {
print("cpsr ", hex(r.cpsr), "\n")
}
-type overlapped struct {
- internal uint32
- internalhigh uint32
- anon0 [8]byte
- hevent *byte
-}
-
-type memoryBasicInformation struct {
- baseAddress uintptr
- allocationBase uintptr
- allocationProtect uint32
- regionSize uintptr
- state uint32
- protect uint32
- type_ uint32
-}
-
func stackcheck() {
// TODO: not implemented on ARM
}
diff --git a/src/runtime/defs_windows_arm64.go b/src/runtime/defs_windows_arm64.go
new file mode 100644
index 0000000000..9ccce46f09
--- /dev/null
+++ b/src/runtime/defs_windows_arm64.go
@@ -0,0 +1,83 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// NOTE(rsc): _CONTEXT_CONTROL is actually 0x400001 and should include PC, SP, and LR.
+// However, empirically, LR doesn't come along on Windows 10
+// unless you also set _CONTEXT_INTEGER (0x400002).
+// Without LR, we skip over the next-to-bottom function in profiles
+// when the bottom function is frameless.
+// So we set both here, to make a working _CONTEXT_CONTROL.
+const _CONTEXT_CONTROL = 0x400003
+
+type neon128 struct {
+ low uint64
+ high int64
+}
+
+// See https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-arm64_nt_context
+type context struct {
+ contextflags uint32
+ cpsr uint32
+ x [31]uint64 // fp is x[29], lr is x[30]
+ xsp uint64
+ pc uint64
+ v [32]neon128
+ fpcr uint32
+ fpsr uint32
+ bcr [8]uint32
+ bvr [8]uint64
+ wcr [2]uint32
+ wvr [2]uint64
+}
+
+func (c *context) ip() uintptr { return uintptr(c.pc) }
+func (c *context) sp() uintptr { return uintptr(c.xsp) }
+func (c *context) lr() uintptr { return uintptr(c.x[30]) }
+
+func (c *context) set_ip(x uintptr) { c.pc = uint64(x) }
+func (c *context) set_sp(x uintptr) { c.xsp = uint64(x) }
+func (c *context) set_lr(x uintptr) { c.x[30] = uint64(x) }
+
+func dumpregs(r *context) {
+ print("r0 ", hex(r.x[0]), "\n")
+ print("r1 ", hex(r.x[1]), "\n")
+ print("r2 ", hex(r.x[2]), "\n")
+ print("r3 ", hex(r.x[3]), "\n")
+ print("r4 ", hex(r.x[4]), "\n")
+ print("r5 ", hex(r.x[5]), "\n")
+ print("r6 ", hex(r.x[6]), "\n")
+ print("r7 ", hex(r.x[7]), "\n")
+ print("r8 ", hex(r.x[8]), "\n")
+ print("r9 ", hex(r.x[9]), "\n")
+ print("r10 ", hex(r.x[10]), "\n")
+ print("r11 ", hex(r.x[11]), "\n")
+ print("r12 ", hex(r.x[12]), "\n")
+ print("r13 ", hex(r.x[13]), "\n")
+ print("r14 ", hex(r.x[14]), "\n")
+ print("r15 ", hex(r.x[15]), "\n")
+ print("r16 ", hex(r.x[16]), "\n")
+ print("r17 ", hex(r.x[17]), "\n")
+ print("r18 ", hex(r.x[18]), "\n")
+ print("r19 ", hex(r.x[19]), "\n")
+ print("r20 ", hex(r.x[20]), "\n")
+ print("r21 ", hex(r.x[21]), "\n")
+ print("r22 ", hex(r.x[22]), "\n")
+ print("r23 ", hex(r.x[23]), "\n")
+ print("r24 ", hex(r.x[24]), "\n")
+ print("r25 ", hex(r.x[25]), "\n")
+ print("r26 ", hex(r.x[26]), "\n")
+ print("r27 ", hex(r.x[27]), "\n")
+ print("r28 ", hex(r.x[28]), "\n")
+ print("r29 ", hex(r.x[29]), "\n")
+ print("lr ", hex(r.x[30]), "\n")
+ print("sp ", hex(r.xsp), "\n")
+ print("pc ", hex(r.pc), "\n")
+ print("cpsr ", hex(r.cpsr), "\n")
+}
+
+func stackcheck() {
+ // TODO: not implemented on ARM
+}
diff --git a/src/runtime/duff_amd64.s b/src/runtime/duff_amd64.s
index 2ff5bf6dbc..df010f5853 100644
--- a/src/runtime/duff_amd64.s
+++ b/src/runtime/duff_amd64.s
@@ -5,100 +5,100 @@
#include "textflag.h"
TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT, $0-0
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
- MOVUPS X0,(DI)
- MOVUPS X0,16(DI)
- MOVUPS X0,32(DI)
- MOVUPS X0,48(DI)
+ MOVUPS X15,(DI)
+ MOVUPS X15,16(DI)
+ MOVUPS X15,32(DI)
+ MOVUPS X15,48(DI)
LEAQ 64(DI),DI
RET
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 44551dcaf1..a48bb2636f 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -200,8 +200,6 @@ func GostringW(w []uint16) (s string) {
return
}
-type Uintreg sys.Uintreg
-
var Open = open
var Close = closefd
var Read = read
@@ -1201,12 +1199,12 @@ type TimeHistogram timeHistogram
// Counts returns the counts for the given bucket, subBucket indices.
// Returns true if the bucket was valid, otherwise returns the counts
-// for the overflow bucket and false.
+// for the underflow bucket and false.
func (th *TimeHistogram) Count(bucket, subBucket uint) (uint64, bool) {
t := (*timeHistogram)(th)
i := bucket*TimeHistNumSubBuckets + subBucket
if i >= uint(len(t.counts)) {
- return t.overflow, false
+ return t.underflow, false
}
return t.counts[i], true
}
diff --git a/src/runtime/extern.go b/src/runtime/extern.go
index dacdf4f383..bbe41dd0d4 100644
--- a/src/runtime/extern.go
+++ b/src/runtime/extern.go
@@ -229,6 +229,8 @@ func Callers(skip int, pc []uintptr) int {
return callers(skip, pc)
}
+var defaultGOROOT string // set by cmd/link
+
// GOROOT returns the root of the Go tree. It uses the
// GOROOT environment variable, if set at process start,
// or else the root used during the Go build.
@@ -237,7 +239,7 @@ func GOROOT() string {
if s != "" {
return s
}
- return sys.DefaultGoroot
+ return defaultGOROOT
}
// Version returns the Go tree's version string.
diff --git a/src/runtime/histogram.go b/src/runtime/histogram.go
index 4020969eb9..da4910d341 100644
--- a/src/runtime/histogram.go
+++ b/src/runtime/histogram.go
@@ -7,6 +7,7 @@ package runtime
import (
"runtime/internal/atomic"
"runtime/internal/sys"
+ "unsafe"
)
const (
@@ -25,7 +26,7 @@ const (
// The number of super-buckets (timeHistNumSuperBuckets), on the
// other hand, defines the range. To reserve room for sub-buckets,
// bit timeHistSubBucketBits is the first bit considered for
- // super-buckets, so super-bucket indicies are adjusted accordingly.
+ // super-buckets, so super-bucket indices are adjusted accordingly.
//
// As an example, consider 45 super-buckets with 16 sub-buckets.
//
@@ -69,17 +70,21 @@ const (
// for concurrent use. It is also safe to read all the values
// atomically.
type timeHistogram struct {
- counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
- overflow uint64
+ counts [timeHistNumSuperBuckets * timeHistNumSubBuckets]uint64
+
+ // underflow counts all the times we got a negative duration
+ // sample. Because of how time works on some platforms, it's
+ // possible to measure negative durations. We could ignore them,
+ // but we record them anyway because it's better to have some
+ // signal that it's happening than just missing samples.
+ underflow uint64
}
// record adds the given duration to the distribution.
-//
-// Although the duration is an int64 to facilitate ease-of-use
-// with e.g. nanotime, the duration must be non-negative.
func (h *timeHistogram) record(duration int64) {
if duration < 0 {
- throw("timeHistogram encountered negative duration")
+ atomic.Xadd64(&h.underflow, 1)
+ return
}
// The index of the exponential bucket is just the index
// of the highest set bit adjusted for how many bits we
@@ -92,29 +97,47 @@ func (h *timeHistogram) record(duration int64) {
superBucket = uint(sys.Len64(uint64(duration))) - timeHistSubBucketBits
if superBucket*timeHistNumSubBuckets >= uint(len(h.counts)) {
// The bucket index we got is larger than what we support, so
- // add into the special overflow bucket.
- atomic.Xadd64(&h.overflow, 1)
- return
+ // include this count in the highest bucket, which extends to
+ // infinity.
+ superBucket = timeHistNumSuperBuckets - 1
+ subBucket = timeHistNumSubBuckets - 1
+ } else {
+ // The linear subbucket index is just the timeHistSubBucketsBits
+ // bits after the top bit. To extract that value, shift down
+ // the duration such that we leave the top bit and the next bits
+ // intact, then extract the index.
+ subBucket = uint((duration >> (superBucket - 1)) % timeHistNumSubBuckets)
}
- // The linear subbucket index is just the timeHistSubBucketsBits
- // bits after the top bit. To extract that value, shift down
- // the duration such that we leave the top bit and the next bits
- // intact, then extract the index.
- subBucket = uint((duration >> (superBucket - 1)) % timeHistNumSubBuckets)
} else {
subBucket = uint(duration)
}
atomic.Xadd64(&h.counts[superBucket*timeHistNumSubBuckets+subBucket], 1)
}
+const (
+ fInf = 0x7FF0000000000000
+ fNegInf = 0xFFF0000000000000
+)
+
+func float64Inf() float64 {
+ inf := uint64(fInf)
+ return *(*float64)(unsafe.Pointer(&inf))
+}
+
+func float64NegInf() float64 {
+ inf := uint64(fNegInf)
+ return *(*float64)(unsafe.Pointer(&inf))
+}
+
// timeHistogramMetricsBuckets generates a slice of boundaries for
// the timeHistogram. These boundaries are represented in seconds,
// not nanoseconds like the timeHistogram represents durations.
func timeHistogramMetricsBuckets() []float64 {
- b := make([]float64, timeHistTotalBuckets-1)
+ b := make([]float64, timeHistTotalBuckets+1)
+ b[0] = float64NegInf()
for i := 0; i < timeHistNumSuperBuckets; i++ {
superBucketMin := uint64(0)
- // The (inclusive) minimum for the first bucket is 0.
+ // The (inclusive) minimum for the first non-negative bucket is 0.
if i > 0 {
// The minimum for the second bucket will be
// 1 << timeHistSubBucketBits, indicating that all
@@ -128,7 +151,7 @@ func timeHistogramMetricsBuckets() []float64 {
// index to combine it with the bucketMin.
subBucketShift := uint(0)
if i > 1 {
- // The first two buckets are exact with respect to integers,
+ // The first two super buckets are exact with respect to integers,
// so we'll never have to shift the sub-bucket index. Thereafter,
// we shift up by 1 with each subsequent bucket.
subBucketShift = uint(i - 2)
@@ -141,8 +164,9 @@ func timeHistogramMetricsBuckets() []float64 {
// Convert the subBucketMin which is in nanoseconds to a float64 seconds value.
// These values will all be exactly representable by a float64.
- b[i*timeHistNumSubBuckets+j] = float64(subBucketMin) / 1e9
+ b[i*timeHistNumSubBuckets+j+1] = float64(subBucketMin) / 1e9
}
}
+ b[len(b)-1] = float64Inf()
return b
}
diff --git a/src/runtime/histogram_test.go b/src/runtime/histogram_test.go
index 5f5b28f784..dbc64fa559 100644
--- a/src/runtime/histogram_test.go
+++ b/src/runtime/histogram_test.go
@@ -5,6 +5,7 @@
package runtime_test
import (
+ "math"
. "runtime"
"testing"
)
@@ -32,8 +33,8 @@ func TestTimeHistogram(t *testing.T) {
h.Record(base + v)
}
}
- // Hit the overflow bucket.
- h.Record(int64(^uint64(0) >> 1))
+ // Hit the underflow bucket.
+ h.Record(int64(-1))
// Check to make sure there's exactly one count in each
// bucket.
@@ -41,7 +42,7 @@ func TestTimeHistogram(t *testing.T) {
for j := uint(0); j < TimeHistNumSubBuckets; j++ {
c, ok := h.Count(i, j)
if !ok {
- t.Errorf("hit overflow bucket unexpectedly: (%d, %d)", i, j)
+ t.Errorf("hit underflow bucket unexpectedly: (%d, %d)", i, j)
} else if c != 1 {
t.Errorf("bucket (%d, %d) has count that is not 1: %d", i, j, c)
}
@@ -49,10 +50,21 @@ func TestTimeHistogram(t *testing.T) {
}
c, ok := h.Count(TimeHistNumSuperBuckets, 0)
if ok {
- t.Errorf("expected to hit overflow bucket: (%d, %d)", TimeHistNumSuperBuckets, 0)
+ t.Errorf("expected to hit underflow bucket: (%d, %d)", TimeHistNumSuperBuckets, 0)
}
if c != 1 {
- t.Errorf("overflow bucket has count that is not 1: %d", c)
+ t.Errorf("underflow bucket has count that is not 1: %d", c)
}
+
+ // Check overflow behavior.
+ // By hitting a high value, we should just be adding into the highest bucket.
+ h.Record(math.MaxInt64)
+ c, ok = h.Count(TimeHistNumSuperBuckets-1, TimeHistNumSubBuckets-1)
+ if !ok {
+ t.Error("hit underflow bucket in highest bucket unexpectedly")
+ } else if c != 2 {
+ t.Errorf("highest has count that is not 2: %d", c)
+ }
+
dummyTimeHistogram = TimeHistogram{}
}
diff --git a/src/runtime/internal/sys/arch.go b/src/runtime/internal/sys/arch.go
index 13c00cf639..3c99a2f7da 100644
--- a/src/runtime/internal/sys/arch.go
+++ b/src/runtime/internal/sys/arch.go
@@ -18,3 +18,37 @@ const (
S390X
WASM
)
+
+// PtrSize is the size of a pointer in bytes - unsafe.Sizeof(uintptr(0)) but as an ideal constant.
+// It is also the size of the machine's native word size (that is, 4 on 32-bit systems, 8 on 64-bit).
+const PtrSize = 4 << (^uintptr(0) >> 63)
+
+// AIX requires a larger stack for syscalls.
+const StackGuardMultiplier = StackGuardMultiplierDefault*(1-GoosAix) + 2*GoosAix
+
+// ArchFamily is the architecture family (AMD64, ARM, ...)
+const ArchFamily ArchFamilyType = _ArchFamily
+
+// BigEndian reports whether the architecture is big-endian.
+const BigEndian = GoarchArmbe|GoarchArm64be|GoarchMips|GoarchMips64|GoarchPpc|GoarchPpc64|GoarchS390|GoarchS390x|GoarchSparc|GoarchSparc64 == 1
+
+// DefaultPhysPageSize is the default physical page size.
+const DefaultPhysPageSize = _DefaultPhysPageSize
+
+// PCQuantum is the minimal unit for a program counter (1 on x86, 4 on most other systems).
+// The various PC tables record PC deltas pre-divided by PCQuantum.
+const PCQuantum = _PCQuantum
+
+// Int64Align is the required alignment for a 64-bit integer (4 on 32-bit systems, 8 on 64-bit).
+const Int64Align = PtrSize
+
+// MinFrameSize is the size of the system-reserved words at the bottom
+// of a frame (just above the architectural stack pointer).
+// It is zero on x86 and PtrSize on most non-x86 (LR-based) systems.
+// On PowerPC it is larger, to cover three more reserved words:
+// the compiler word, the link editor word, and the TOC save word.
+const MinFrameSize = _MinFrameSize
+
+// StackAlign is the required alignment of the SP register.
+// The stack must be at least word aligned, but some architectures require more.
+const StackAlign = _StackAlign
diff --git a/src/runtime/internal/sys/arch_386.go b/src/runtime/internal/sys/arch_386.go
index b51f70a512..1ebce3435e 100644
--- a/src/runtime/internal/sys/arch_386.go
+++ b/src/runtime/internal/sys/arch_386.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = I386
- BigEndian = false
- DefaultPhysPageSize = 4096
- PCQuantum = 1
- Int64Align = 4
- MinFrameSize = 0
+ _ArchFamily = I386
+ _DefaultPhysPageSize = 4096
+ _PCQuantum = 1
+ _MinFrameSize = 0
+ _StackAlign = PtrSize
)
-
-type Uintreg uint32
diff --git a/src/runtime/internal/sys/arch_amd64.go b/src/runtime/internal/sys/arch_amd64.go
index 3d6776e71e..7f003d0f1d 100644
--- a/src/runtime/internal/sys/arch_amd64.go
+++ b/src/runtime/internal/sys/arch_amd64.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = AMD64
- BigEndian = false
- DefaultPhysPageSize = 4096
- PCQuantum = 1
- Int64Align = 8
- MinFrameSize = 0
+ _ArchFamily = AMD64
+ _DefaultPhysPageSize = 4096
+ _PCQuantum = 1
+ _MinFrameSize = 0
+ _StackAlign = PtrSize
)
-
-type Uintreg uint64
diff --git a/src/runtime/internal/sys/arch_arm.go b/src/runtime/internal/sys/arch_arm.go
index 97960d6f83..ef2048bb71 100644
--- a/src/runtime/internal/sys/arch_arm.go
+++ b/src/runtime/internal/sys/arch_arm.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = ARM
- BigEndian = false
- DefaultPhysPageSize = 65536
- PCQuantum = 4
- Int64Align = 4
- MinFrameSize = 4
+ _ArchFamily = ARM
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 4
+ _StackAlign = PtrSize
)
-
-type Uintreg uint32
diff --git a/src/runtime/internal/sys/arch_arm64.go b/src/runtime/internal/sys/arch_arm64.go
index 911a9485e1..b9f2f7b1fe 100644
--- a/src/runtime/internal/sys/arch_arm64.go
+++ b/src/runtime/internal/sys/arch_arm64.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = ARM64
- BigEndian = false
- DefaultPhysPageSize = 65536
- PCQuantum = 4
- Int64Align = 8
- MinFrameSize = 8
+ _ArchFamily = ARM64
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 8
+ _StackAlign = 16
)
-
-type Uintreg uint64
diff --git a/src/runtime/internal/sys/arch_mips.go b/src/runtime/internal/sys/arch_mips.go
index 75cdb2e07f..4cb0eebea7 100644
--- a/src/runtime/internal/sys/arch_mips.go
+++ b/src/runtime/internal/sys/arch_mips.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = MIPS
- BigEndian = true
- DefaultPhysPageSize = 65536
- PCQuantum = 4
- Int64Align = 4
- MinFrameSize = 4
+ _ArchFamily = MIPS
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 4
+ _StackAlign = PtrSize
)
-
-type Uintreg uint32
diff --git a/src/runtime/internal/sys/arch_mips64.go b/src/runtime/internal/sys/arch_mips64.go
index 494291a802..57636ac4a4 100644
--- a/src/runtime/internal/sys/arch_mips64.go
+++ b/src/runtime/internal/sys/arch_mips64.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = MIPS64
- BigEndian = true
- DefaultPhysPageSize = 16384
- PCQuantum = 4
- Int64Align = 8
- MinFrameSize = 8
+ _ArchFamily = MIPS64
+ _DefaultPhysPageSize = 16384
+ _PCQuantum = 4
+ _MinFrameSize = 8
+ _StackAlign = PtrSize
)
-
-type Uintreg uint64
diff --git a/src/runtime/internal/sys/arch_mips64le.go b/src/runtime/internal/sys/arch_mips64le.go
index d36d1202f6..57636ac4a4 100644
--- a/src/runtime/internal/sys/arch_mips64le.go
+++ b/src/runtime/internal/sys/arch_mips64le.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = MIPS64
- BigEndian = false
- DefaultPhysPageSize = 16384
- PCQuantum = 4
- Int64Align = 8
- MinFrameSize = 8
+ _ArchFamily = MIPS64
+ _DefaultPhysPageSize = 16384
+ _PCQuantum = 4
+ _MinFrameSize = 8
+ _StackAlign = PtrSize
)
-
-type Uintreg uint64
diff --git a/src/runtime/internal/sys/arch_mipsle.go b/src/runtime/internal/sys/arch_mipsle.go
index 323bf82059..4240f5ce47 100644
--- a/src/runtime/internal/sys/arch_mipsle.go
+++ b/src/runtime/internal/sys/arch_mipsle.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = MIPS
- BigEndian = false
- DefaultPhysPageSize = 65536
- PCQuantum = 4
- Int64Align = 4
- MinFrameSize = 4
+ _ArchFamily = MIPS
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 4
+ _StackAlign = PtrSize
)
-
-type Uintreg uint32
diff --git a/src/runtime/internal/sys/arch_ppc64.go b/src/runtime/internal/sys/arch_ppc64.go
index da1fe3d596..1869213ce2 100644
--- a/src/runtime/internal/sys/arch_ppc64.go
+++ b/src/runtime/internal/sys/arch_ppc64.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = PPC64
- BigEndian = true
- DefaultPhysPageSize = 65536
- PCQuantum = 4
- Int64Align = 8
- MinFrameSize = 32
+ _ArchFamily = PPC64
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 32
+ _StackAlign = 16
)
-
-type Uintreg uint64
diff --git a/src/runtime/internal/sys/arch_ppc64le.go b/src/runtime/internal/sys/arch_ppc64le.go
index 605979903a..1869213ce2 100644
--- a/src/runtime/internal/sys/arch_ppc64le.go
+++ b/src/runtime/internal/sys/arch_ppc64le.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = PPC64
- BigEndian = false
- DefaultPhysPageSize = 65536
- PCQuantum = 4
- Int64Align = 8
- MinFrameSize = 32
+ _ArchFamily = PPC64
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 4
+ _MinFrameSize = 32
+ _StackAlign = 16
)
-
-type Uintreg uint64
diff --git a/src/runtime/internal/sys/arch_riscv64.go b/src/runtime/internal/sys/arch_riscv64.go
index 7cdcc8fcbd..360d236e32 100644
--- a/src/runtime/internal/sys/arch_riscv64.go
+++ b/src/runtime/internal/sys/arch_riscv64.go
@@ -5,14 +5,9 @@
package sys
const (
- ArchFamily = RISCV64
- BigEndian = false
- CacheLineSize = 64
- DefaultPhysPageSize = 4096
- PCQuantum = 4
- Int64Align = 8
- HugePageSize = 1 << 21
- MinFrameSize = 8
+ _ArchFamily = RISCV64
+ _DefaultPhysPageSize = 4096
+ _PCQuantum = 4
+ _MinFrameSize = 8
+ _StackAlign = PtrSize
)
-
-type Uintreg uint64
diff --git a/src/runtime/internal/sys/arch_s390x.go b/src/runtime/internal/sys/arch_s390x.go
index 12cb8a0fcb..e33e0b7f2b 100644
--- a/src/runtime/internal/sys/arch_s390x.go
+++ b/src/runtime/internal/sys/arch_s390x.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = S390X
- BigEndian = true
- DefaultPhysPageSize = 4096
- PCQuantum = 2
- Int64Align = 8
- MinFrameSize = 8
+ _ArchFamily = S390X
+ _DefaultPhysPageSize = 4096
+ _PCQuantum = 2
+ _MinFrameSize = 8
+ _StackAlign = PtrSize
)
-
-type Uintreg uint64
diff --git a/src/runtime/internal/sys/arch_wasm.go b/src/runtime/internal/sys/arch_wasm.go
index eb825df626..ee919ff9e6 100644
--- a/src/runtime/internal/sys/arch_wasm.go
+++ b/src/runtime/internal/sys/arch_wasm.go
@@ -5,12 +5,9 @@
package sys
const (
- ArchFamily = WASM
- BigEndian = false
- DefaultPhysPageSize = 65536
- PCQuantum = 1
- Int64Align = 8
- MinFrameSize = 0
+ _ArchFamily = WASM
+ _DefaultPhysPageSize = 65536
+ _PCQuantum = 1
+ _MinFrameSize = 0
+ _StackAlign = PtrSize
)
-
-type Uintreg uint64
diff --git a/src/runtime/internal/sys/stubs.go b/src/runtime/internal/sys/stubs.go
deleted file mode 100644
index 10b0173f60..0000000000
--- a/src/runtime/internal/sys/stubs.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sys
-
-// Declarations for runtime services implemented in C or assembly.
-
-const PtrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
-const RegSize = 4 << (^Uintreg(0) >> 63) // unsafe.Sizeof(uintreg(0)) but an ideal const
-const SpAlign = 1*(1-GoarchArm64) + 16*GoarchArm64 // SP alignment: 1 normally, 16 for ARM64
-
-var DefaultGoroot string // set at link time
-
-// AIX requires a larger stack for syscalls.
-const StackGuardMultiplier = StackGuardMultiplierDefault*(1-GoosAix) + 2*GoosAix
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index 2b5affce52..4994347bde 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -14,6 +14,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -223,11 +224,18 @@ func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size
// stack map of reflectcall is wrong.
//
//go:nosplit
-func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr) {
+func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) {
if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= sys.PtrSize {
bulkBarrierPreWrite(uintptr(dst), uintptr(src), size)
}
memmove(dst, src, size)
+
+ // Move pointers returned in registers to a place where the GC can see them.
+ for i := range regs.Ints {
+ if regs.ReturnIsPtr.Get(i) {
+ regs.Ptrs[i] = unsafe.Pointer(regs.Ints[i])
+ }
+ }
}
//go:nosplit
diff --git a/src/runtime/memclr_386.s b/src/runtime/memclr_386.s
index 65f7196312..5e090ef09e 100644
--- a/src/runtime/memclr_386.s
+++ b/src/runtime/memclr_386.s
@@ -9,6 +9,8 @@
// NOTE: Windows externalthreadhandler expects memclr to preserve DX.
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), DI
diff --git a/src/runtime/memclr_amd64.s b/src/runtime/memclr_amd64.s
index d79078fd00..37fe9745b1 100644
--- a/src/runtime/memclr_amd64.s
+++ b/src/runtime/memclr_amd64.s
@@ -9,6 +9,8 @@
// NOTE: Windows externalthreadhandler expects memclr to preserve DX.
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-16
MOVQ ptr+0(FP), DI
diff --git a/src/runtime/memclr_arm.s b/src/runtime/memclr_arm.s
index 7326b8be34..f02d058ead 100644
--- a/src/runtime/memclr_arm.s
+++ b/src/runtime/memclr_arm.s
@@ -30,7 +30,10 @@
#define N R12
#define TMP R12 /* N and TMP don't overlap */
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
+// Also called from assembly in sys_windows_arm.s without g (but using Go stack convention).
TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-8
MOVW ptr+0(FP), TO
MOVW n+4(FP), N
diff --git a/src/runtime/memclr_arm64.s b/src/runtime/memclr_arm64.s
index a56a6dfb85..c1a0dcef58 100644
--- a/src/runtime/memclr_arm64.s
+++ b/src/runtime/memclr_arm64.s
@@ -4,7 +4,10 @@
#include "textflag.h"
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
+// Also called from assembly in sys_windows_arm64.s without g (but using Go stack convention).
TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16
MOVD ptr+0(FP), R0
MOVD n+8(FP), R1
diff --git a/src/runtime/memclr_mips64x.s b/src/runtime/memclr_mips64x.s
index 4c2292eae8..d7a3251e20 100644
--- a/src/runtime/memclr_mips64x.s
+++ b/src/runtime/memclr_mips64x.s
@@ -7,6 +7,8 @@
#include "go_asm.h"
#include "textflag.h"
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16
MOVV ptr+0(FP), R1
diff --git a/src/runtime/memclr_mipsx.s b/src/runtime/memclr_mipsx.s
index 1561a23dbe..eb2a8a7219 100644
--- a/src/runtime/memclr_mipsx.s
+++ b/src/runtime/memclr_mipsx.s
@@ -14,6 +14,8 @@
#define MOVWLO MOVWL
#endif
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-8
MOVW n+4(FP), R2
diff --git a/src/runtime/memclr_plan9_386.s b/src/runtime/memclr_plan9_386.s
index 5b880ae86f..54701a9453 100644
--- a/src/runtime/memclr_plan9_386.s
+++ b/src/runtime/memclr_plan9_386.s
@@ -4,6 +4,8 @@
#include "textflag.h"
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), DI
diff --git a/src/runtime/memclr_plan9_amd64.s b/src/runtime/memclr_plan9_amd64.s
index ad383cd6b3..8c6a1cc780 100644
--- a/src/runtime/memclr_plan9_amd64.s
+++ b/src/runtime/memclr_plan9_amd64.s
@@ -4,6 +4,8 @@
#include "textflag.h"
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16
MOVQ ptr+0(FP), DI
diff --git a/src/runtime/memclr_ppc64x.s b/src/runtime/memclr_ppc64x.s
index 072963f756..7512620894 100644
--- a/src/runtime/memclr_ppc64x.s
+++ b/src/runtime/memclr_ppc64x.s
@@ -6,6 +6,8 @@
#include "textflag.h"
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT|NOFRAME, $0-16
MOVD ptr+0(FP), R3
diff --git a/src/runtime/memclr_riscv64.s b/src/runtime/memclr_riscv64.s
index ba7704e805..54ddaa4560 100644
--- a/src/runtime/memclr_riscv64.s
+++ b/src/runtime/memclr_riscv64.s
@@ -4,6 +4,8 @@
#include "textflag.h"
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// void runtime·memclrNoHeapPointers(void*, uintptr)
TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16
MOV ptr+0(FP), T1
diff --git a/src/runtime/memclr_s390x.s b/src/runtime/memclr_s390x.s
index dd14a441cc..fa657ef66e 100644
--- a/src/runtime/memclr_s390x.s
+++ b/src/runtime/memclr_s390x.s
@@ -4,6 +4,8 @@
#include "textflag.h"
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT|NOFRAME,$0-16
MOVD ptr+0(FP), R4
diff --git a/src/runtime/memclr_wasm.s b/src/runtime/memclr_wasm.s
index 68ffe2f67b..5a053049f8 100644
--- a/src/runtime/memclr_wasm.s
+++ b/src/runtime/memclr_wasm.s
@@ -4,6 +4,8 @@
#include "textflag.h"
+// See memclrNoHeapPointers Go doc for important implementation constraints.
+
// func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
TEXT runtime·memclrNoHeapPointers(SB), NOSPLIT, $0-16
MOVD ptr+0(FP), R0
diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go
index d3c0341aee..3e8dbda0ca 100644
--- a/src/runtime/metrics.go
+++ b/src/runtime/metrics.go
@@ -41,10 +41,28 @@ func initMetrics() {
if metricsInit {
return
}
- sizeClassBuckets = make([]float64, _NumSizeClasses)
- for i := range sizeClassBuckets {
- sizeClassBuckets[i] = float64(class_to_size[i])
+
+ sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
+ // Skip size class 0 which is a stand-in for large objects, but large
+ // objects are tracked separately (and they actually get placed in
+ // the last bucket, not the first).
+ sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
+ for i := 1; i < _NumSizeClasses; i++ {
+ // Size classes have an inclusive upper-bound
+ // and exclusive lower bound (e.g. 48-byte size class is
+ // (32, 48]) whereas we want and inclusive lower-bound
+ // and exclusive upper-bound (e.g. 48-byte size class is
+ // [33, 49). We can achieve this by shifting all bucket
+ // boundaries up by 1.
+ //
+ // Also, a float64 can precisely represent integers with
+ // value up to 2^53 and size classes are relatively small
+ // (nowhere near 2^48 even) so this will give us exact
+ // boundaries.
+ sizeClassBuckets[i] = float64(class_to_size[i] + 1)
}
+ sizeClassBuckets = append(sizeClassBuckets, float64Inf())
+
timeHistBuckets = timeHistogramMetricsBuckets()
metrics = map[string]metricData{
"/gc/cycles/automatic:gc-cycles": {
@@ -68,23 +86,27 @@ func initMetrics() {
out.scalar = in.sysStats.gcCyclesDone
},
},
- "/gc/heap/allocs-by-size:objects": {
+ "/gc/heap/allocs-by-size:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(sizeClassBuckets)
hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount)
- for i := range hist.buckets {
- hist.counts[i] = uint64(in.heapStats.smallAllocCount[i])
+ // Cut off the first index which is ostensibly for size class 0,
+ // but large objects are tracked separately so it's actually unused.
+ for i, count := range in.heapStats.smallAllocCount[1:] {
+ hist.counts[i] = uint64(count)
}
},
},
- "/gc/heap/frees-by-size:objects": {
+ "/gc/heap/frees-by-size:bytes": {
deps: makeStatDepSet(heapStatsDep),
compute: func(in *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(sizeClassBuckets)
hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount)
- for i := range hist.buckets {
- hist.counts[i] = uint64(in.heapStats.smallFreeCount[i])
+ // Cut off the first index which is ostensibly for size class 0,
+ // but large objects are tracked separately so it's actually unused.
+ for i, count := range in.heapStats.smallFreeCount[1:] {
+ hist.counts[i] = uint64(count)
}
},
},
@@ -105,9 +127,12 @@ func initMetrics() {
"/gc/pauses:seconds": {
compute: func(_ *statAggregate, out *metricValue) {
hist := out.float64HistOrInit(timeHistBuckets)
- hist.counts[len(hist.counts)-1] = atomic.Load64(&memstats.gcPauseDist.overflow)
- for i := range hist.buckets {
- hist.counts[i] = atomic.Load64(&memstats.gcPauseDist.counts[i])
+ // The bottom-most bucket, containing negative values, is tracked
+ // as a separately as underflow, so fill that in manually and then
+ // iterate over the rest.
+ hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
+ for i := range memstats.gcPauseDist.counts {
+ hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
}
},
},
@@ -426,8 +451,8 @@ func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogr
v.pointer = unsafe.Pointer(hist)
}
hist.buckets = buckets
- if len(hist.counts) != len(hist.buckets)+1 {
- hist.counts = make([]uint64, len(buckets)+1)
+ if len(hist.counts) != len(hist.buckets)-1 {
+ hist.counts = make([]uint64, len(buckets)-1)
}
return hist
}
diff --git a/src/runtime/metrics/description.go b/src/runtime/metrics/description.go
index 32af5d1727..1175156104 100644
--- a/src/runtime/metrics/description.go
+++ b/src/runtime/metrics/description.go
@@ -23,6 +23,11 @@ type Description struct {
// Examples of units might be "seconds", "bytes", "bytes/second", "cpu-seconds",
// "byte*cpu-seconds", and "bytes/second/second".
//
+ // For histograms, multiple units may apply. For instance, the units of the buckets and
+ // the count. By convention, for histograms, the units of the count are always "samples"
+ // with the type of sample evident by the metric's name, while the unit in the name
+ // specifies the buckets' unit.
+ //
// A complete name might look like "/memory/heap/free:bytes".
Name string
@@ -41,10 +46,6 @@ type Description struct {
//
// This flag thus indicates whether or not it's useful to compute a rate from this value.
Cumulative bool
-
- // StopTheWorld is whether or not the metric requires a stop-the-world
- // event in order to collect it.
- StopTheWorld bool
}
// The English language descriptions below must be kept in sync with the
@@ -69,14 +70,16 @@ var allDesc = []Description{
Cumulative: true,
},
{
- Name: "/gc/heap/allocs-by-size:objects",
+ Name: "/gc/heap/allocs-by-size:bytes",
Description: "Distribution of all objects allocated by approximate size.",
Kind: KindFloat64Histogram,
+ Cumulative: true,
},
{
- Name: "/gc/heap/frees-by-size:objects",
+ Name: "/gc/heap/frees-by-size:bytes",
Description: "Distribution of all objects freed by approximate size.",
Kind: KindFloat64Histogram,
+ Cumulative: true,
},
{
Name: "/gc/heap/goal:bytes",
@@ -92,6 +95,7 @@ var allDesc = []Description{
Name: "/gc/pauses:seconds",
Description: "Distribution individual GC-related stop-the-world pause latencies.",
Kind: KindFloat64Histogram,
+ Cumulative: true,
},
{
Name: "/memory/classes/heap/free:bytes",
diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go
index a68184ee82..7f790afc12 100644
--- a/src/runtime/metrics/doc.go
+++ b/src/runtime/metrics/doc.go
@@ -16,13 +16,12 @@ Interface
Metrics are designated by a string key, rather than, for example, a field name in
a struct. The full list of supported metrics is always available in the slice of
Descriptions returned by All. Each Description also includes useful information
-about the metric, such as how to display it (e.g. gauge vs. counter) and how difficult
-or disruptive it is to obtain it (e.g. do you need to stop the world?).
+about the metric.
Thus, users of this API are encouraged to sample supported metrics defined by the
slice returned by All to remain compatible across Go versions. Of course, situations
arise where reading specific metrics is critical. For these cases, users are
-encouranged to use build tags, and although metrics may be deprecated and removed,
+encouraged to use build tags, and although metrics may be deprecated and removed,
users should consider this to be an exceptional and rare event, coinciding with a
very large change in a particular Go implementation.
@@ -61,10 +60,10 @@ Below is the full list of supported metrics, ordered lexicographically.
/gc/cycles/total:gc-cycles
Count of all completed GC cycles.
- /gc/heap/allocs-by-size:objects
+ /gc/heap/allocs-by-size:bytes
Distribution of all objects allocated by approximate size.
- /gc/heap/frees-by-size:objects
+ /gc/heap/frees-by-size:bytes
Distribution of all objects freed by approximate size.
/gc/heap/goal:bytes
diff --git a/src/runtime/metrics/example_test.go b/src/runtime/metrics/example_test.go
new file mode 100644
index 0000000000..624d9d8a6b
--- /dev/null
+++ b/src/runtime/metrics/example_test.go
@@ -0,0 +1,96 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package metrics_test
+
+import (
+ "fmt"
+ "runtime/metrics"
+)
+
+func ExampleRead_readingOneMetric() {
+ // Name of the metric we want to read.
+ const myMetric = "/memory/classes/heap/free:bytes"
+
+ // Create a sample for the metric.
+ sample := make([]metrics.Sample, 1)
+ sample[0].Name = myMetric
+
+ // Sample the metric.
+ metrics.Read(sample)
+
+ // Check if the metric is actually supported.
+ // If it's not, the resulting value will always have
+ // kind KindBad.
+ if sample[0].Value.Kind() == metrics.KindBad {
+ panic(fmt.Sprintf("metric %q no longer supported", myMetric))
+ }
+
+ // Handle the result.
+ //
+ // It's OK to assume a particular Kind for a metric;
+ // they're guaranteed not to change.
+ freeBytes := sample[0].Value.Uint64()
+
+ fmt.Printf("free but not released memory: %d\n", freeBytes)
+}
+
+func ExampleRead_readingAllMetrics() {
+ // Get descriptions for all supported metrics.
+ descs := metrics.All()
+
+ // Create a sample for each metric.
+ samples := make([]metrics.Sample, len(descs))
+ for i := range samples {
+ samples[i].Name = descs[i].Name
+ }
+
+ // Sample the metrics. Re-use the samples slice if you can!
+ metrics.Read(samples)
+
+ // Iterate over all results.
+ for _, sample := range samples {
+ // Pull out the name and value.
+ name, value := sample.Name, sample.Value
+
+ // Handle each sample.
+ switch value.Kind() {
+ case metrics.KindUint64:
+ fmt.Printf("%s: %d\n", name, value.Uint64())
+ case metrics.KindFloat64:
+ fmt.Printf("%s: %f\n", name, value.Float64())
+ case metrics.KindFloat64Histogram:
+ // The histogram may be quite large, so let's just pull out
+ // a crude estimate for the median for the sake of this example.
+ fmt.Printf("%s: %f\n", name, medianBucket(value.Float64Histogram()))
+ case metrics.KindBad:
+ // This should never happen because all metrics are supported
+ // by construction.
+ panic("bug in runtime/metrics package!")
+ default:
+ // This may happen as new metrics get added.
+ //
+ // The safest thing to do here is to simply log it somewhere
+ // as something to look into, but ignore it for now.
+ // In the worst case, you might temporarily miss out on a new metric.
+ fmt.Printf("%s: unexpected metric Kind: %v\n", name, value.Kind())
+ }
+ }
+}
+
+func medianBucket(h *metrics.Float64Histogram) float64 {
+ total := uint64(0)
+ for _, count := range h.Counts {
+ total += count
+ }
+ thresh := total / 2
+ total = 0
+ for i, count := range h.Counts {
+ total += count
+ if total >= thresh {
+ return h.Buckets[i]
+ }
+ }
+ panic("should not happen")
+}
diff --git a/src/runtime/metrics/histogram.go b/src/runtime/metrics/histogram.go
index e1364e1e26..956422bf84 100644
--- a/src/runtime/metrics/histogram.go
+++ b/src/runtime/metrics/histogram.go
@@ -6,25 +6,28 @@ package metrics
// Float64Histogram represents a distribution of float64 values.
type Float64Histogram struct {
- // Counts contains the weights for each histogram bucket. The length of
- // Counts is equal to the length of Buckets (in the metric description)
- // plus one to account for the implicit minimum bucket.
+ // Counts contains the weights for each histogram bucket.
//
- // Given N buckets, the following is the mathematical relationship between
- // Counts and Buckets.
- // count[0] is the weight of the range (-inf, bucket[0])
- // count[n] is the weight of the range [bucket[n], bucket[n+1]), for 0 < n < N-1
- // count[N-1] is the weight of the range [bucket[N-1], inf)
+ // Given N buckets, Count[n] is the weight of the range
+ // [bucket[n], bucket[n+1]), for 0 <= n < N.
Counts []uint64
- // Buckets contains the boundaries between histogram buckets, in increasing order.
+ // Buckets contains the boundaries of the histogram buckets, in increasing order.
//
- // Because this slice contains boundaries, there are len(Buckets)+1 counts:
- // a count for all values less than the first boundary, a count covering each
- // [slice[i], slice[i+1]) interval, and a count for all values greater than or
- // equal to the last boundary.
+ // Buckets[0] is the inclusive lower bound of the minimum bucket while
+ // Buckets[len(Buckets)-1] is the exclusive upper bound of the maximum bucket.
+ // Hence, there are len(Buckets)-1 counts. Furthermore, len(Buckets) != 1, always,
+ // since at least two boundaries are required to describe one bucket (and 0
+ // boundaries are used to describe 0 buckets).
+ //
+ // Buckets[0] is permitted to have value -Inf and Buckets[len(Buckets)-1] is
+ // permitted to have value Inf.
//
// For a given metric name, the value of Buckets is guaranteed not to change
// between calls until program exit.
+ //
+ // This slice value is permitted to alias with other Float64Histograms' Buckets
+ // fields, so the values within should only ever be read. If they need to be
+ // modified, the user must make a copy.
Buckets []float64
}
diff --git a/src/runtime/metrics/sample.go b/src/runtime/metrics/sample.go
index 35534dd70d..4cf8cdf799 100644
--- a/src/runtime/metrics/sample.go
+++ b/src/runtime/metrics/sample.go
@@ -14,7 +14,7 @@ type Sample struct {
// Name is the name of the metric sampled.
//
// It must correspond to a name in one of the metric descriptions
- // returned by Descriptions.
+ // returned by All.
Name string
// Value is the value of the metric sample.
@@ -32,9 +32,9 @@ func runtime_readMetrics(unsafe.Pointer, int, int)
//
// Note that re-use has some caveats. Notably, Values should not be read or
// manipulated while a Read with that value is outstanding; that is a data race.
-// This property includes pointer-typed Values (e.g. Float64Histogram) whose
-// underlying storage will be reused by Read when possible. To safely use such
-// values in a concurrent setting, all data must be deep-copied.
+// This property includes pointer-typed Values (for example, Float64Histogram)
+// whose underlying storage will be reused by Read when possible. To safely use
+// such values in a concurrent setting, all data must be deep-copied.
//
// It is safe to execute multiple Read calls concurrently, but their arguments
// must share no underlying memory. When in doubt, create a new []Sample from
diff --git a/src/runtime/metrics/value.go b/src/runtime/metrics/value.go
index 0b056b4ea8..ed9a33d87c 100644
--- a/src/runtime/metrics/value.go
+++ b/src/runtime/metrics/value.go
@@ -33,7 +33,7 @@ type Value struct {
pointer unsafe.Pointer // contains non-scalar values.
}
-// Kind returns the a tag representing the kind of value this is.
+// Kind returns the tag representing the kind of value this is.
func (v Value) Kind() ValueKind {
return v.kind
}
@@ -63,7 +63,7 @@ func (v Value) Float64() float64 {
// If v.Kind() != KindFloat64Histogram, this method panics.
func (v Value) Float64Histogram() *Float64Histogram {
if v.kind != KindFloat64Histogram {
- panic("called Float64 on non-float64 metric value")
+ panic("called Float64Histogram on non-Float64Histogram metric value")
}
return (*Float64Histogram)(v.pointer)
}
diff --git a/src/runtime/metrics_test.go b/src/runtime/metrics_test.go
index 167edd57fd..8a3cf019bd 100644
--- a/src/runtime/metrics_test.go
+++ b/src/runtime/metrics_test.go
@@ -70,6 +70,34 @@ func TestReadMetrics(t *testing.T) {
checkUint64(t, name, samples[i].Value.Uint64(), mstats.BuckHashSys)
case "/memory/classes/total:bytes":
checkUint64(t, name, samples[i].Value.Uint64(), mstats.Sys)
+ case "/gc/heap/allocs-by-size:bytes":
+ hist := samples[i].Value.Float64Histogram()
+ // Skip size class 0 in BySize, because it's always empty and not represented
+ // in the histogram.
+ for i, sc := range mstats.BySize[1:] {
+ if b, s := hist.Buckets[i+1], float64(sc.Size+1); b != s {
+ t.Errorf("bucket does not match size class: got %f, want %f", b, s)
+ // The rest of the checks aren't expected to work anyway.
+ continue
+ }
+ if c, m := hist.Counts[i], sc.Mallocs; c != m {
+ t.Errorf("histogram counts do not much BySize for class %d: got %d, want %d", i, c, m)
+ }
+ }
+ case "/gc/heap/frees-by-size:bytes":
+ hist := samples[i].Value.Float64Histogram()
+ // Skip size class 0 in BySize, because it's always empty and not represented
+ // in the histogram.
+ for i, sc := range mstats.BySize[1:] {
+ if b, s := hist.Buckets[i+1], float64(sc.Size+1); b != s {
+ t.Errorf("bucket does not match size class: got %f, want %f", b, s)
+ // The rest of the checks aren't expected to work anyway.
+ continue
+ }
+ if c, f := hist.Counts[i], sc.Frees; c != f {
+ t.Errorf("histogram counts do not much BySize for class %d: got %d, want %d", i, c, f)
+ }
+ }
case "/gc/heap/objects:objects":
checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapObjects)
case "/gc/heap/goal:bytes":
@@ -133,9 +161,9 @@ func TestReadMetricsConsistency(t *testing.T) {
totalVirtual.got = samples[i].Value.Uint64()
case "/gc/heap/objects:objects":
objects.total = samples[i].Value.Uint64()
- case "/gc/heap/allocs-by-size:objects":
+ case "/gc/heap/allocs-by-size:bytes":
objects.alloc = samples[i].Value.Float64Histogram()
- case "/gc/heap/frees-by-size:objects":
+ case "/gc/heap/frees-by-size:bytes":
objects.free = samples[i].Value.Float64Histogram()
case "/gc/cycles:gc-cycles":
gc.numGC = samples[i].Value.Uint64()
@@ -154,6 +182,12 @@ func TestReadMetricsConsistency(t *testing.T) {
if totalVirtual.got != totalVirtual.want {
t.Errorf(`"/memory/classes/total:bytes" does not match sum of /memory/classes/**: got %d, want %d`, totalVirtual.got, totalVirtual.want)
}
+ if b, c := len(objects.alloc.Buckets), len(objects.alloc.Counts); b != c+1 {
+ t.Errorf("allocs-by-size has wrong bucket or counts length: %d buckets, %d counts", b, c)
+ }
+ if b, c := len(objects.free.Buckets), len(objects.free.Counts); b != c+1 {
+ t.Errorf("frees-by-size has wrong bucket or counts length: %d buckets, %d counts", b, c)
+ }
if len(objects.alloc.Buckets) != len(objects.free.Buckets) {
t.Error("allocs-by-size and frees-by-size buckets don't match in length")
} else if len(objects.alloc.Counts) != len(objects.free.Counts) {
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index f4dbd77252..7d0313be12 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -7,6 +7,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -219,7 +220,11 @@ func runfinq() {
throw("bad kind in runfinq")
}
fingRunning = true
- reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
+ // Pass a dummy RegArgs for now.
+ //
+ // TODO(mknyszek): Pass arguments in registers.
+ var regs abi.RegArgs
+ reflectcall(nil, unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz), uint32(framesz), &regs)
fingRunning = false
// Drop finalizer queue heap references
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index 5a24cdac88..46fae5de72 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -101,8 +101,7 @@ func gcMarkRootPrepare() {
// Gs may be created after this point, but it's okay that we
// ignore them because they begin life without any roots, so
// there's nothing to scan, and any roots they create during
- // the concurrent phase will be scanned during mark
- // termination.
+ // the concurrent phase will be caught by the write barrier.
work.nStackRoots = int(atomic.Loaduintptr(&allglen))
work.markrootNext = 0
@@ -133,7 +132,6 @@ fail:
println("gp", gp, "goid", gp.goid,
"status", readgstatus(gp),
"gcscandone", gp.gcscandone)
- unlock(&allglock) // Avoid self-deadlock with traceback.
throw("scan missed a g")
}
diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go
index 38f09309dc..a7c5bc49b8 100644
--- a/src/runtime/mgcscavenge.go
+++ b/src/runtime/mgcscavenge.go
@@ -562,7 +562,7 @@ func (p *pageAlloc) scavengeUnreserve(r addrRange, gen uint32) {
func (p *pageAlloc) scavengeOne(work addrRange, max uintptr, mayUnlock bool) (uintptr, addrRange) {
assertLockHeld(p.mheapLock)
- // Defensively check if we've recieved an empty address range.
+ // Defensively check if we've received an empty address range.
// If so, just return.
if work.size() == 0 {
// Nothing to do.
diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go
index 94ae75fbfe..ef297f073e 100644
--- a/src/runtime/mkduff.go
+++ b/src/runtime/mkduff.go
@@ -62,15 +62,15 @@ func gen(arch string, tags, zero, copy func(io.Writer)) {
func notags(w io.Writer) { fmt.Fprintln(w) }
func zeroAMD64(w io.Writer) {
- // X0: zero
+ // X15: zero
// DI: ptr to memory to be zeroed
// DI is updated as a side effect.
- fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT, $0-0")
+ fmt.Fprintln(w, "TEXT runtime·duffzero<ABIInternal>(SB), NOSPLIT, $0-0")
for i := 0; i < 16; i++ {
- fmt.Fprintln(w, "\tMOVUPS\tX0,(DI)")
- fmt.Fprintln(w, "\tMOVUPS\tX0,16(DI)")
- fmt.Fprintln(w, "\tMOVUPS\tX0,32(DI)")
- fmt.Fprintln(w, "\tMOVUPS\tX0,48(DI)")
+ fmt.Fprintln(w, "\tMOVUPS\tX15,(DI)")
+ fmt.Fprintln(w, "\tMOVUPS\tX15,16(DI)")
+ fmt.Fprintln(w, "\tMOVUPS\tX15,32(DI)")
+ fmt.Fprintln(w, "\tMOVUPS\tX15,48(DI)")
fmt.Fprintln(w, "\tLEAQ\t64(DI),DI") // We use lea instead of add, to avoid clobbering flags
fmt.Fprintln(w)
}
@@ -84,7 +84,7 @@ func copyAMD64(w io.Writer) {
//
// This is equivalent to a sequence of MOVSQ but
// for some reason that is 3.5x slower than this code.
- fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT, $0-0")
+ fmt.Fprintln(w, "TEXT runtime·duffcopy<ABIInternal>(SB), NOSPLIT, $0-0")
for i := 0; i < 64; i++ {
fmt.Fprintln(w, "\tMOVUPS\t(SI), X0")
fmt.Fprintln(w, "\tADDQ\t$16, SI")
diff --git a/src/runtime/mmap.go b/src/runtime/mmap.go
index 9fe31cb416..1b1848b79e 100644
--- a/src/runtime/mmap.go
+++ b/src/runtime/mmap.go
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !aix
+// +build !darwin
+// +build !js
+// +build !linux !amd64
+// +build !linux !arm64
+// +build !openbsd
// +build !plan9
// +build !solaris
// +build !windows
-// +build !linux !amd64
-// +build !linux !arm64
-// +build !js
-// +build !darwin
-// +build !aix
package runtime
diff --git a/src/runtime/msan0.go b/src/runtime/msan0.go
index 117c5e5789..374d13f30b 100644
--- a/src/runtime/msan0.go
+++ b/src/runtime/msan0.go
@@ -16,7 +16,8 @@ const msanenabled = false
// Because msanenabled is false, none of these functions should be called.
-func msanread(addr unsafe.Pointer, sz uintptr) { throw("msan") }
-func msanwrite(addr unsafe.Pointer, sz uintptr) { throw("msan") }
-func msanmalloc(addr unsafe.Pointer, sz uintptr) { throw("msan") }
-func msanfree(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanread(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanwrite(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanmalloc(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanfree(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanmove(dst, src unsafe.Pointer, sz uintptr) { throw("msan") }
diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go
index 428ff7f225..4d77f0de6d 100644
--- a/src/runtime/os2_aix.go
+++ b/src/runtime/os2_aix.go
@@ -18,11 +18,11 @@ import (
//go:cgo_import_dynamic libc___n_pthreads __n_pthreads "libpthread.a/shr_xpg5_64.o"
//go:cgo_import_dynamic libc___mod_init __mod_init "libc.a/shr_64.o"
-//go:linkname libc___n_pthreads libc___n_pthread
+//go:linkname libc___n_pthreads libc___n_pthreads
//go:linkname libc___mod_init libc___mod_init
var (
- libc___n_pthread,
+ libc___n_pthreads,
libc___mod_init libFunc
)
@@ -527,20 +527,17 @@ func internal_cpu_getsystemcfg(label uint) uint {
func usleep1(us uint32)
//go:nosplit
-func usleep(us uint32) {
- _g_ := getg()
+func usleep_no_g(us uint32) {
+ usleep1(us)
+}
- // Check the validity of m because we might be called in cgo callback
- // path early enough where there isn't a g or a m available yet.
- if _g_ != nil && _g_.m != nil {
- r, err := syscall1(&libc_usleep, uintptr(us))
- if int32(r) == -1 {
- println("syscall usleep failed: ", hex(err))
- throw("syscall usleep")
- }
- return
+//go:nosplit
+func usleep(us uint32) {
+ r, err := syscall1(&libc_usleep, uintptr(us))
+ if int32(r) == -1 {
+ println("syscall usleep failed: ", hex(err))
+ throw("syscall usleep")
}
- usleep1(us)
}
//go:nosplit
@@ -611,20 +608,17 @@ func raiseproc(sig uint32) {
func osyield1()
//go:nosplit
-func osyield() {
- _g_ := getg()
+func osyield_no_g() {
+ osyield1()
+}
- // Check the validity of m because it might be called during a cgo
- // callback early enough where m isn't available yet.
- if _g_ != nil && _g_.m != nil {
- r, err := syscall0(&libc_sched_yield)
- if int32(r) == -1 {
- println("syscall osyield failed: ", hex(err))
- throw("syscall osyield")
- }
- return
+//go:nosplit
+func osyield() {
+ r, err := syscall0(&libc_sched_yield)
+ if int32(r) == -1 {
+ println("syscall osyield failed: ", hex(err))
+ throw("syscall osyield")
}
- osyield1()
}
//go:nosplit
diff --git a/src/runtime/os3_plan9.go b/src/runtime/os3_plan9.go
index 15ca3359d2..b6ee98cab6 100644
--- a/src/runtime/os3_plan9.go
+++ b/src/runtime/os3_plan9.go
@@ -92,10 +92,6 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int {
if usesLR {
c.setlr(pc)
} else {
- if sys.RegSize > sys.PtrSize {
- sp -= sys.PtrSize
- *(*uintptr)(unsafe.Pointer(sp)) = 0
- }
sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = pc
c.setsp(sp)
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
index d6e36fbfbb..4b65139eb8 100644
--- a/src/runtime/os3_solaris.go
+++ b/src/runtime/os3_solaris.go
@@ -227,6 +227,11 @@ func unminit() {
unminitSignals()
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
func sigtramp()
//go:nosplit
@@ -517,6 +522,11 @@ func sysconf(name int32) int64 {
func usleep1(usec uint32)
//go:nosplit
+func usleep_no_g(µs uint32) {
+ usleep1(µs)
+}
+
+//go:nosplit
func usleep(µs uint32) {
usleep1(µs)
}
@@ -564,18 +574,15 @@ func setNonblock(fd int32) {
func osyield1()
//go:nosplit
-func osyield() {
- _g_ := getg()
-
- // Check the validity of m because we might be called in cgo callback
- // path early enough where there isn't a m available yet.
- if _g_ != nil && _g_.m != nil {
- sysvicall0(&libc_sched_yield)
- return
- }
+func osyield_no_g() {
osyield1()
}
+//go:nosplit
+func osyield() {
+ sysvicall0(&libc_sched_yield)
+}
+
//go:linkname executablePath os.executablePath
var executablePath string
diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go
index 0c501be96a..303f0876de 100644
--- a/src/runtime/os_aix.go
+++ b/src/runtime/os_aix.go
@@ -180,6 +180,11 @@ func unminit() {
unminitSignals()
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
// tstart is a function descriptor to _tstart defined in assembly.
var tstart funcDescriptor
diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go
index e0a43c28aa..470698d0a3 100644
--- a/src/runtime/os_darwin.go
+++ b/src/runtime/os_darwin.go
@@ -325,6 +325,16 @@ func unminit() {
}
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
+//go:nosplit
+func osyield_no_g() {
+ usleep_no_g(1)
+}
+
//go:nosplit
func osyield() {
usleep(1)
diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go
index 6578fcbeb1..b786c8ab5f 100644
--- a/src/runtime/os_dragonfly.go
+++ b/src/runtime/os_dragonfly.go
@@ -51,6 +51,11 @@ func sys_umtx_wakeup(addr *uint32, val int32) int32
func osyield()
+//go:nosplit
+func osyield_no_g() {
+ osyield()
+}
+
func kqueue() int32
//go:noescape
@@ -203,6 +208,11 @@ func unminit() {
unminitSignals()
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
func sigtramp()
type sigactiont struct {
diff --git a/src/runtime/os_freebsd.go b/src/runtime/os_freebsd.go
index 730973a202..09dd50ce59 100644
--- a/src/runtime/os_freebsd.go
+++ b/src/runtime/os_freebsd.go
@@ -36,6 +36,11 @@ func sys_umtx_op(addr *uint32, mode int32, val uint32, uaddr1 uintptr, ut *umtx_
func osyield()
+//go:nosplit
+func osyield_no_g() {
+ osyield()
+}
+
func kqueue() int32
//go:noescape
@@ -166,7 +171,7 @@ func futexsleep1(addr *uint32, val uint32, ns int64) {
utp = &ut
}
ret := sys_umtx_op(addr, _UMTX_OP_WAIT_UINT_PRIVATE, val, unsafe.Sizeof(*utp), utp)
- if ret >= 0 || ret == -_EINTR {
+ if ret >= 0 || ret == -_EINTR || ret == -_ETIMEDOUT {
return
}
print("umtx_wait addr=", addr, " val=", val, " ret=", ret, "\n")
@@ -208,7 +213,6 @@ func newosproc(mp *m) {
var oset sigset
sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- // TODO: Check for error.
ret := thr_new(&param, int32(unsafe.Sizeof(param)))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret < 0 {
@@ -320,6 +324,11 @@ func unminit() {
unminitSignals()
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
func sigtramp()
type sigactiont struct {
diff --git a/src/runtime/os_js.go b/src/runtime/os_js.go
index 94983b358d..5b2c53795a 100644
--- a/src/runtime/os_js.go
+++ b/src/runtime/os_js.go
@@ -30,12 +30,22 @@ func wasmWrite(fd uintptr, p unsafe.Pointer, n int32)
func usleep(usec uint32)
+//go:nosplit
+func usleep_no_g(usec uint32) {
+ usleep(usec)
+}
+
func exitThread(wait *uint32)
type mOS struct{}
func osyield()
+//go:nosplit
+func osyield_no_g() {
+ osyield()
+}
+
const _SIGSEGV = 0xb
func sigpanic() {
@@ -72,7 +82,7 @@ func clearSignalHandlers() {
}
//go:nosplit
-func sigblock() {
+func sigblock(exiting bool) {
}
// Called to initialize a new m (including the bootstrap m).
@@ -84,6 +94,11 @@ func minit() {
func unminit() {
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
func osinit() {
ncpu = 1
getg().m.procid = 2
diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go
index 371db73502..21d3ae653e 100644
--- a/src/runtime/os_linux.go
+++ b/src/runtime/os_linux.go
@@ -301,6 +301,24 @@ func getHugePageSize() uintptr {
func osinit() {
ncpu = getproccount()
physHugePageSize = getHugePageSize()
+ if iscgo {
+ // #42494 glibc and musl reserve some signals for
+ // internal use and require they not be blocked by
+ // the rest of a normal C runtime. When the go runtime
+ // blocks...unblocks signals, temporarily, the blocked
+ // interval of time is generally very short. As such,
+ // these expectations of *libc code are mostly met by
+ // the combined go+cgo system of threads. However,
+ // when go causes a thread to exit, via a return from
+ // mstart(), the combined runtime can deadlock if
+ // these signals are blocked. Thus, don't block these
+ // signals when exiting threads.
+ // - glibc: SIGCANCEL (32), SIGSETXID (33)
+ // - musl: SIGTIMER (32), SIGCANCEL (33), SIGSYNCCALL (34)
+ sigdelset(&sigsetAllExiting, 32)
+ sigdelset(&sigsetAllExiting, 33)
+ sigdelset(&sigsetAllExiting, 34)
+ }
osArchInit()
}
@@ -357,6 +375,11 @@ func unminit() {
unminitSignals()
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
//#ifdef GOARCH_386
//#define sa_handler k_sa_handler
//#endif
@@ -387,6 +410,11 @@ func raiseproc(sig uint32)
func sched_getaffinity(pid, len uintptr, buf *byte) int32
func osyield()
+//go:nosplit
+func osyield_no_g() {
+ osyield()
+}
+
func pipe() (r, w int32, errno int32)
func pipe2(flags int32) (r, w int32, errno int32)
func setNonblock(fd int32)
diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go
index f7f90cedc1..0328fa57ae 100644
--- a/src/runtime/os_netbsd.go
+++ b/src/runtime/os_netbsd.go
@@ -67,6 +67,11 @@ func lwp_self() int32
func osyield()
+//go:nosplit
+func osyield_no_g() {
+ osyield()
+}
+
func kqueue() int32
//go:noescape
@@ -290,6 +295,11 @@ func unminit() {
unminitSignals()
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
func sigtramp()
type sigactiont struct {
diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go
index d7960f4c91..6259b96c22 100644
--- a/src/runtime/os_openbsd.go
+++ b/src/runtime/os_openbsd.go
@@ -6,7 +6,6 @@ package runtime
import (
"runtime/internal/atomic"
- "runtime/internal/sys"
"unsafe"
)
@@ -14,60 +13,6 @@ type mOS struct {
waitsemacount uint32
}
-//go:noescape
-func setitimer(mode int32, new, old *itimerval)
-
-//go:noescape
-func sigaction(sig uint32, new, old *sigactiont)
-
-//go:noescape
-func sigaltstack(new, old *stackt)
-
-//go:noescape
-func obsdsigprocmask(how int32, new sigset) sigset
-
-//go:nosplit
-//go:nowritebarrierrec
-func sigprocmask(how int32, new, old *sigset) {
- n := sigset(0)
- if new != nil {
- n = *new
- }
- r := obsdsigprocmask(how, n)
- if old != nil {
- *old = r
- }
-}
-
-//go:noescape
-func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-
-func raiseproc(sig uint32)
-
-func getthrid() int32
-func thrkill(tid int32, sig int)
-
-//go:noescape
-func tfork(param *tforkt, psize uintptr, mm *m, gg *g, fn uintptr) int32
-
-//go:noescape
-func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32
-
-//go:noescape
-func thrwakeup(ident uintptr, n int32) int32
-
-func osyield()
-
-func kqueue() int32
-
-//go:noescape
-func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
-
-func pipe() (r, w int32, errno int32)
-func pipe2(flags int32) (r, w int32, errno int32)
-func closeonexec(fd int32)
-func setNonblock(fd int32)
-
const (
_ESRCH = 3
_EWOULDBLOCK = _EAGAIN
@@ -183,36 +128,6 @@ func semawakeup(mp *m) {
}
}
-// May run with m.p==nil, so write barriers are not allowed.
-//go:nowritebarrier
-func newosproc(mp *m) {
- stk := unsafe.Pointer(mp.g0.stack.hi)
- if false {
- print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
- }
-
- // Stack pointer must point inside stack area (as marked with MAP_STACK),
- // rather than at the top of it.
- param := tforkt{
- tf_tcb: unsafe.Pointer(&mp.tls[0]),
- tf_tid: nil, // minit will record tid
- tf_stack: uintptr(stk) - sys.PtrSize,
- }
-
- var oset sigset
- sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
- ret := tfork(&param, unsafe.Sizeof(param), mp, mp.g0, funcPC(mstart))
- sigprocmask(_SIG_SETMASK, &oset, nil)
-
- if ret < 0 {
- print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", -ret, ")\n")
- if ret == -_EAGAIN {
- println("runtime: may need to increase max user processes (ulimit -p)")
- }
- throw("runtime.newosproc")
- }
-}
-
func osinit() {
ncpu = getncpu()
physPageSize = getPageSize()
@@ -257,6 +172,11 @@ func unminit() {
unminitSignals()
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
func sigtramp()
type sigactiont struct {
diff --git a/src/runtime/os_openbsd_libc.go b/src/runtime/os_openbsd_libc.go
new file mode 100644
index 0000000000..2edb0358b0
--- /dev/null
+++ b/src/runtime/os_openbsd_libc.go
@@ -0,0 +1,58 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build openbsd,amd64 openbsd,arm64
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+var failThreadCreate = []byte("runtime: failed to create new OS thread\n")
+
+// mstart_stub provides glue code to call mstart from pthread_create.
+func mstart_stub()
+
+// May run with m.p==nil, so write barriers are not allowed.
+//go:nowritebarrierrec
+func newosproc(mp *m) {
+ if false {
+ print("newosproc m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
+ }
+
+ // Initialize an attribute object.
+ var attr pthreadattr
+ if err := pthread_attr_init(&attr); err != 0 {
+ write(2, unsafe.Pointer(&failThreadCreate[0]), int32(len(failThreadCreate)))
+ exit(1)
+ }
+
+ // Find out OS stack size for our own stack guard.
+ var stacksize uintptr
+ if pthread_attr_getstacksize(&attr, &stacksize) != 0 {
+ write(2, unsafe.Pointer(&failThreadCreate[0]), int32(len(failThreadCreate)))
+ exit(1)
+ }
+ mp.g0.stack.hi = stacksize // for mstart
+
+ // Tell the pthread library we won't join with this thread.
+ if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
+ write(2, unsafe.Pointer(&failThreadCreate[0]), int32(len(failThreadCreate)))
+ exit(1)
+ }
+
+ // Finally, create the thread. It starts at mstart_stub, which does some low-level
+ // setup and then calls mstart.
+ var oset sigset
+ sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
+ err := pthread_create(&attr, funcPC(mstart_stub), unsafe.Pointer(mp))
+ sigprocmask(_SIG_SETMASK, &oset, nil)
+ if err != 0 {
+ write(2, unsafe.Pointer(&failThreadCreate[0]), int32(len(failThreadCreate)))
+ exit(1)
+ }
+
+ pthread_attr_destroy(&attr)
+}
diff --git a/src/runtime/os_openbsd_syscall.go b/src/runtime/os_openbsd_syscall.go
new file mode 100644
index 0000000000..16ff2b8e25
--- /dev/null
+++ b/src/runtime/os_openbsd_syscall.go
@@ -0,0 +1,46 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build openbsd,!amd64
+// +build openbsd,!arm64
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+//go:noescape
+func tfork(param *tforkt, psize uintptr, mm *m, gg *g, fn uintptr) int32
+
+// May run with m.p==nil, so write barriers are not allowed.
+//go:nowritebarrier
+func newosproc(mp *m) {
+ stk := unsafe.Pointer(mp.g0.stack.hi)
+ if false {
+ print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
+ }
+
+ // Stack pointer must point inside stack area (as marked with MAP_STACK),
+ // rather than at the top of it.
+ param := tforkt{
+ tf_tcb: unsafe.Pointer(&mp.tls[0]),
+ tf_tid: nil, // minit will record tid
+ tf_stack: uintptr(stk) - sys.PtrSize,
+ }
+
+ var oset sigset
+ sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
+ ret := tfork(&param, unsafe.Sizeof(param), mp, mp.g0, funcPC(mstart))
+ sigprocmask(_SIG_SETMASK, &oset, nil)
+
+ if ret < 0 {
+ print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", -ret, ")\n")
+ if ret == -_EAGAIN {
+ println("runtime: may need to increase max user processes (ulimit -p)")
+ }
+ throw("runtime.newosproc")
+ }
+}
diff --git a/src/runtime/os_openbsd_syscall1.go b/src/runtime/os_openbsd_syscall1.go
new file mode 100644
index 0000000000..f37da04194
--- /dev/null
+++ b/src/runtime/os_openbsd_syscall1.go
@@ -0,0 +1,20 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build openbsd,!amd64,!arm64
+
+package runtime
+
+//go:noescape
+func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32
+
+//go:noescape
+func thrwakeup(ident uintptr, n int32) int32
+
+func osyield()
+
+//go:nosplit
+func osyield_no_g() {
+ osyield()
+}
diff --git a/src/runtime/os_openbsd_syscall2.go b/src/runtime/os_openbsd_syscall2.go
new file mode 100644
index 0000000000..81cfb085aa
--- /dev/null
+++ b/src/runtime/os_openbsd_syscall2.go
@@ -0,0 +1,100 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build openbsd,!amd64,!arm64
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+//go:noescape
+func sigaction(sig uint32, new, old *sigactiont)
+
+func kqueue() int32
+
+//go:noescape
+func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
+
+func raiseproc(sig uint32)
+
+func getthrid() int32
+func thrkill(tid int32, sig int)
+
+// read calls the read system call.
+// It returns a non-negative number of bytes written or a negative errno value.
+func read(fd int32, p unsafe.Pointer, n int32) int32
+
+func closefd(fd int32) int32
+
+func exit(code int32)
+func usleep(usec uint32)
+
+//go:nosplit
+func usleep_no_g(usec uint32) {
+ usleep(usec)
+}
+
+// write calls the write system call.
+// It returns a non-negative number of bytes written or a negative errno value.
+//go:noescape
+func write1(fd uintptr, p unsafe.Pointer, n int32) int32
+
+//go:noescape
+func open(name *byte, mode, perm int32) int32
+
+// return value is only set on linux to be used in osinit()
+func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
+
+// exitThread terminates the current thread, writing *wait = 0 when
+// the stack is safe to reclaim.
+//
+//go:noescape
+func exitThread(wait *uint32)
+
+//go:noescape
+func obsdsigprocmask(how int32, new sigset) sigset
+
+//go:nosplit
+//go:nowritebarrierrec
+func sigprocmask(how int32, new, old *sigset) {
+ n := sigset(0)
+ if new != nil {
+ n = *new
+ }
+ r := obsdsigprocmask(how, n)
+ if old != nil {
+ *old = r
+ }
+}
+
+func pipe() (r, w int32, errno int32)
+func pipe2(flags int32) (r, w int32, errno int32)
+
+//go:noescape
+func setitimer(mode int32, new, old *itimerval)
+
+//go:noescape
+func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
+
+// mmap calls the mmap system call. It is implemented in assembly.
+// We only pass the lower 32 bits of file offset to the
+// assembly routine; the higher bits (if required), should be provided
+// by the assembly routine as 0.
+// The err result is an OS error code such as ENOMEM.
+func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (p unsafe.Pointer, err int)
+
+// munmap calls the munmap system call. It is implemented in assembly.
+func munmap(addr unsafe.Pointer, n uintptr)
+
+func nanotime1() int64
+
+//go:noescape
+func sigaltstack(new, old *stackt)
+
+func closeonexec(fd int32)
+func setNonblock(fd int32)
+
+func walltime1() (sec int64, nsec int32)
diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go
index 62aecea060..77665f461a 100644
--- a/src/runtime/os_plan9.go
+++ b/src/runtime/os_plan9.go
@@ -195,7 +195,7 @@ func msigrestore(sigmask sigset) {
func clearSignalHandlers() {
}
-func sigblock() {
+func sigblock(exiting bool) {
}
// Called to initialize a new m (including the bootstrap m).
@@ -213,6 +213,11 @@ func minit() {
func unminit() {
}
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+func mdestroy(mp *m) {
+}
+
var sysstat = []byte("/dev/sysstat\x00")
func getproccount() int32 {
@@ -335,6 +340,11 @@ func osyield() {
}
//go:nosplit
+func osyield_no_g() {
+ osyield()
+}
+
+//go:nosplit
func usleep(µs uint32) {
ms := int32(µs / 1000)
if ms == 0 {
@@ -344,6 +354,11 @@ func usleep(µs uint32) {
}
//go:nosplit
+func usleep_no_g(usec uint32) {
+ usleep(usec)
+}
+
+//go:nosplit
func nanotime1() int64 {
var scratch int64
ns := nsec(&scratch)
diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go
index ffb087f9db..f4e21a93ed 100644
--- a/src/runtime/os_windows.go
+++ b/src/runtime/os_windows.go
@@ -46,6 +46,7 @@ const (
//go:cgo_import_dynamic runtime._SetThreadPriority SetThreadPriority%2 "kernel32.dll"
//go:cgo_import_dynamic runtime._SetUnhandledExceptionFilter SetUnhandledExceptionFilter%1 "kernel32.dll"
//go:cgo_import_dynamic runtime._SetWaitableTimer SetWaitableTimer%6 "kernel32.dll"
+//go:cgo_import_dynamic runtime._Sleep Sleep%1 "kernel32.dll"
//go:cgo_import_dynamic runtime._SuspendThread SuspendThread%1 "kernel32.dll"
//go:cgo_import_dynamic runtime._SwitchToThread SwitchToThread%0 "kernel32.dll"
//go:cgo_import_dynamic runtime._TlsAlloc TlsAlloc%0 "kernel32.dll"
@@ -97,6 +98,7 @@ var (
_SetThreadPriority,
_SetUnhandledExceptionFilter,
_SetWaitableTimer,
+ _Sleep,
_SuspendThread,
_SwitchToThread,
_TlsAlloc,
@@ -146,6 +148,9 @@ func tstart_stdcall(newm *m)
// Called by OS using stdcall ABI.
func ctrlhandler()
+// Init-time helper
+func wintls()
+
type mOS struct {
threadLock mutex // protects "thread" and prevents closing
thread uintptr // thread handle
@@ -234,6 +239,8 @@ func windowsLoadSystemLib(name []byte) uintptr {
}
}
+const haveCputicksAsm = GOARCH == "386" || GOARCH == "amd64"
+
func loadOptionalSyscalls() {
var kernel32dll = []byte("kernel32.dll\000")
k32 := stdcall1(_LoadLibraryA, uintptr(unsafe.Pointer(&kernel32dll[0])))
@@ -260,7 +267,7 @@ func loadOptionalSyscalls() {
}
_NtWaitForSingleObject = windowsFindfunc(n32, []byte("NtWaitForSingleObject\000"))
- if GOARCH == "arm" {
+ if !haveCputicksAsm {
_QueryPerformanceCounter = windowsFindfunc(k32, []byte("QueryPerformanceCounter\000"))
if _QueryPerformanceCounter == nil {
throw("could not find QPC syscalls")
@@ -377,7 +384,6 @@ const (
// in sys_windows_386.s and sys_windows_amd64.s:
func externalthreadhandler()
func getlasterror() uint32
-func setlasterror(err uint32)
// When loading DLLs, we prefer to use LoadLibraryEx with
// LOAD_LIBRARY_SEARCH_* flags, if available. LoadLibraryEx is not
@@ -451,23 +457,22 @@ func createHighResTimer() uintptr {
_SYNCHRONIZE|_TIMER_QUERY_STATE|_TIMER_MODIFY_STATE)
}
+const highResTimerSupported = GOARCH == "386" || GOARCH == "amd64"
+
func initHighResTimer() {
- if GOARCH == "arm" {
+ if !highResTimerSupported {
// TODO: Not yet implemented.
return
}
h := createHighResTimer()
if h != 0 {
haveHighResTimer = true
- usleep2Addr = unsafe.Pointer(funcPC(usleep2HighRes))
stdcall1(_CloseHandle, h)
}
}
func osinit() {
asmstdcallAddr = unsafe.Pointer(funcPC(asmstdcall))
- usleep2Addr = unsafe.Pointer(funcPC(usleep2))
- switchtothreadAddr = unsafe.Pointer(funcPC(switchtothread))
setBadSignalMsg()
@@ -886,29 +891,30 @@ func clearSignalHandlers() {
}
//go:nosplit
-func sigblock() {
+func sigblock(exiting bool) {
}
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
var thandle uintptr
- stdcall7(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS)
+ if stdcall7(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
+ print("runtime.minit: duplicatehandle failed; errno=", getlasterror(), "\n")
+ throw("runtime.minit: duplicatehandle failed")
+ }
+
+ mp := getg().m
+ lock(&mp.threadLock)
+ mp.thread = thandle
// Configure usleep timer, if possible.
- var timer uintptr
- if haveHighResTimer {
- timer = createHighResTimer()
- if timer == 0 {
+ if mp.highResTimer == 0 && haveHighResTimer {
+ mp.highResTimer = createHighResTimer()
+ if mp.highResTimer == 0 {
print("runtime: CreateWaitableTimerEx failed; errno=", getlasterror(), "\n")
throw("CreateWaitableTimerEx when creating timer failed")
}
}
-
- mp := getg().m
- lock(&mp.threadLock)
- mp.thread = thandle
- mp.highResTimer = timer
unlock(&mp.threadLock)
// Query the true stack base from the OS. Currently we're
@@ -944,13 +950,29 @@ func minit() {
func unminit() {
mp := getg().m
lock(&mp.threadLock)
- stdcall1(_CloseHandle, mp.thread)
- mp.thread = 0
+ if mp.thread != 0 {
+ stdcall1(_CloseHandle, mp.thread)
+ mp.thread = 0
+ }
+ unlock(&mp.threadLock)
+}
+
+// Called from exitm, but not from drop, to undo the effect of thread-owned
+// resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
+//go:nosplit
+func mdestroy(mp *m) {
if mp.highResTimer != 0 {
stdcall1(_CloseHandle, mp.highResTimer)
mp.highResTimer = 0
}
- unlock(&mp.threadLock)
+ if mp.waitsema != 0 {
+ stdcall1(_CloseHandle, mp.waitsema)
+ mp.waitsema = 0
+ }
+ if mp.resumesema != 0 {
+ stdcall1(_CloseHandle, mp.resumesema)
+ mp.resumesema = 0
+ }
}
// Calling stdcall on os stack.
@@ -1042,26 +1064,39 @@ func stdcall7(fn stdFunction, a0, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
return stdcall(fn)
}
-// In sys_windows_386.s and sys_windows_amd64.s.
-func onosstack(fn unsafe.Pointer, arg uint32)
-
-// These are not callable functions. They should only be called via onosstack.
-func usleep2(usec uint32)
-func usleep2HighRes(usec uint32)
+// These must run on the system stack only.
+func usleep2(dt int32)
+func usleep2HighRes(dt int32)
func switchtothread()
-var usleep2Addr unsafe.Pointer
-var switchtothreadAddr unsafe.Pointer
+//go:nosplit
+func osyield_no_g() {
+ switchtothread()
+}
//go:nosplit
func osyield() {
- onosstack(switchtothreadAddr, 0)
+ systemstack(switchtothread)
+}
+
+//go:nosplit
+func usleep_no_g(us uint32) {
+ dt := -10 * int32(us) // relative sleep (negative), 100ns units
+ usleep2(dt)
}
//go:nosplit
func usleep(us uint32) {
- // Have 1us units; want 100ns units.
- onosstack(usleep2Addr, 10*us)
+ systemstack(func() {
+ dt := -10 * int32(us) // relative sleep (negative), 100ns units
+ // If the high-res timer is available and its handle has been allocated for this m, use it.
+ // Otherwise fall back to the low-res one, which doesn't need a handle.
+ if haveHighResTimer && getg().m.highResTimer != 0 {
+ usleep2HighRes(dt)
+ } else {
+ usleep2(dt)
+ }
+ })
}
func ctrlhandler1(_type uint32) uint32 {
@@ -1077,6 +1112,11 @@ func ctrlhandler1(_type uint32) uint32 {
}
if sigsend(s) {
+ if s == _SIGTERM {
+ // Windows terminates the process after this handler returns.
+ // Block indefinitely to give signal handlers a chance to clean up.
+ stdcall1(_Sleep, uintptr(_INFINITE))
+ }
return 1
}
return 0
@@ -1099,21 +1139,21 @@ func profilem(mp *m, thread uintptr) {
c.contextflags = _CONTEXT_CONTROL
stdcall2(_GetThreadContext, thread, uintptr(unsafe.Pointer(c)))
- gp := gFromTLS(mp)
+ gp := gFromSP(mp, c.sp())
sigprof(c.ip(), c.sp(), c.lr(), gp, mp)
}
-func gFromTLS(mp *m) *g {
- switch GOARCH {
- case "arm":
- tls := &mp.tls[0]
- return **((***g)(unsafe.Pointer(tls)))
- case "386", "amd64":
- tls := &mp.tls[0]
- return *((**g)(unsafe.Pointer(tls)))
+func gFromSP(mp *m, sp uintptr) *g {
+ if gp := mp.g0; gp != nil && gp.stack.lo < sp && sp < gp.stack.hi {
+ return gp
+ }
+ if gp := mp.gsignal; gp != nil && gp.stack.lo < sp && sp < gp.stack.hi {
+ return gp
+ }
+ if gp := mp.curg; gp != nil && gp.stack.lo < sp && sp < gp.stack.hi {
+ return gp
}
- throw("unsupported architecture")
return nil
}
@@ -1134,8 +1174,12 @@ func profileloop1(param uintptr) uint32 {
}
// Acquire our own handle to the thread.
var thread uintptr
- stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS)
+ if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
+ print("runtime.profileloop1: duplicatehandle failed; errno=", getlasterror(), "\n")
+ throw("runtime.profileloop1: duplicatehandle failed")
+ }
unlock(&mp.threadLock)
+
// mp may exit between the DuplicateHandle
// above and the SuspendThread. The handle
// will remain valid, but SuspendThread may
@@ -1180,14 +1224,14 @@ func setThreadCPUProfiler(hz int32) {
atomic.Store((*uint32)(unsafe.Pointer(&getg().m.profilehz)), uint32(hz))
}
-const preemptMSupported = GOARCH != "arm"
+const preemptMSupported = GOARCH == "386" || GOARCH == "amd64"
// suspendLock protects simultaneous SuspendThread operations from
// suspending each other.
var suspendLock mutex
func preemptM(mp *m) {
- if GOARCH == "arm" {
+ if !preemptMSupported {
// TODO: Implement call injection
return
}
@@ -1214,7 +1258,10 @@ func preemptM(mp *m) {
return
}
var thread uintptr
- stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS)
+ if stdcall7(_DuplicateHandle, currentProcess, mp.thread, currentProcess, uintptr(unsafe.Pointer(&thread)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 {
+ print("runtime.preemptM: duplicatehandle failed; errno=", getlasterror(), "\n")
+ throw("runtime.preemptM: duplicatehandle failed")
+ }
unlock(&mp.threadLock)
// Prepare thread context buffer. This must be aligned to 16 bytes.
@@ -1255,7 +1302,7 @@ func preemptM(mp *m) {
unlock(&suspendLock)
// Does it want a preemption and is it safe to preempt?
- gp := gFromTLS(mp)
+ gp := gFromSP(mp, c.sp())
if wantAsyncPreempt(gp) {
if ok, newpc := isAsyncSafePoint(gp, c.ip(), c.sp(), c.lr()); ok {
// Inject call to asyncPreempt
diff --git a/src/runtime/os_windows_arm64.go b/src/runtime/os_windows_arm64.go
new file mode 100644
index 0000000000..7e413445ba
--- /dev/null
+++ b/src/runtime/os_windows_arm64.go
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+//go:nosplit
+func cputicks() int64 {
+ var counter int64
+ stdcall1(_QueryPerformanceCounter, uintptr(unsafe.Pointer(&counter)))
+ return counter
+}
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index aed17d6fc6..e320eaa596 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
@@ -874,7 +875,13 @@ func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) {
p.pc = getcallerpc()
p.sp = unsafe.Pointer(getcallersp())
}
- reflectcall(nil, fn, arg, argsize, argsize)
+ // Pass a dummy RegArgs for now since no function actually implements
+ // the register-based ABI.
+ //
+ // TODO(mknyszek): Implement this properly, setting up arguments in
+ // registers as necessary in the caller.
+ var regs abi.RegArgs
+ reflectcall(nil, fn, arg, argsize, argsize, argsize, &regs)
if p != nil {
p.pc = 0
p.sp = unsafe.Pointer(nil)
@@ -968,7 +975,9 @@ func gopanic(e interface{}) {
}
} else {
p.argp = unsafe.Pointer(getargp(0))
- reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
+
+ var regs abi.RegArgs
+ reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz), uint32(d.siz), &regs)
}
p.argp = nil
@@ -1000,37 +1009,42 @@ func gopanic(e interface{}) {
}
atomic.Xadd(&runningPanicDefers, -1)
- if done {
- // Remove any remaining non-started, open-coded
- // defer entries after a recover, since the
- // corresponding defers will be executed normally
- // (inline). Any such entry will become stale once
- // we run the corresponding defers inline and exit
- // the associated stack frame.
- d := gp._defer
- var prev *_defer
- for d != nil {
- if d.openDefer {
- if d.started {
- // This defer is started but we
- // are in the middle of a
- // defer-panic-recover inside of
- // it, so don't remove it or any
- // further defer entries
- break
- }
- if prev == nil {
- gp._defer = d.link
- } else {
- prev.link = d.link
- }
- newd := d.link
- freedefer(d)
- d = newd
+ // Remove any remaining non-started, open-coded
+ // defer entries after a recover, since the
+ // corresponding defers will be executed normally
+ // (inline). Any such entry will become stale once
+ // we run the corresponding defers inline and exit
+ // the associated stack frame.
+ d := gp._defer
+ var prev *_defer
+ if !done {
+ // Skip our current frame, if not done. It is
+ // needed to complete any remaining defers in
+ // deferreturn()
+ prev = d
+ d = d.link
+ }
+ for d != nil {
+ if d.started {
+ // This defer is started but we
+ // are in the middle of a
+ // defer-panic-recover inside of
+ // it, so don't remove it or any
+ // further defer entries
+ break
+ }
+ if d.openDefer {
+ if prev == nil {
+ gp._defer = d.link
} else {
- prev = d
- d = d.link
+ prev.link = d.link
}
+ newd := d.link
+ freedefer(d)
+ d = newd
+ } else {
+ prev = d
+ d = d.link
}
}
diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go
index b6ee160e84..d7571953a9 100644
--- a/src/runtime/pprof/pprof_test.go
+++ b/src/runtime/pprof/pprof_test.go
@@ -279,16 +279,12 @@ func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []stri
broken := false
switch runtime.GOOS {
- case "darwin", "ios", "dragonfly", "netbsd", "illumos", "solaris":
+ case "ios", "dragonfly", "netbsd", "illumos", "solaris":
broken = true
case "openbsd":
if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
broken = true
}
- case "windows":
- if runtime.GOARCH == "arm" {
- broken = true // See https://golang.org/issues/42862
- }
}
maxDuration := 5 * time.Second
@@ -514,8 +510,10 @@ func TestGoroutineSwitch(t *testing.T) {
}
StopCPUProfile()
- // Read profile to look for entries for runtime.gogo with an attempt at a traceback.
- // The special entry
+ // Read profile to look for entries for gogo with an attempt at a traceback.
+ // "runtime.gogo" is OK, because that's the part of the context switch
+ // before the actual switch begins. But we should not see "gogo",
+ // aka "gogo<>(SB)", which does the actual switch and is marked SPWRITE.
parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
// An entry with two frames with 'System' in its top frame
// exists to record a PC without a traceback. Those are okay.
@@ -526,13 +524,19 @@ func TestGoroutineSwitch(t *testing.T) {
}
}
- // Otherwise, should not see runtime.gogo.
+ // An entry with just one frame is OK too:
+ // it knew to stop at gogo.
+ if len(stk) == 1 {
+ return
+ }
+
+ // Otherwise, should not see gogo.
// The place we'd see it would be the inner most frame.
name := stk[0].Line[0].Function.Name
- if name == "runtime.gogo" {
+ if name == "gogo" {
var buf bytes.Buffer
fprintStack(&buf, stk)
- t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
+ t.Fatalf("found profile entry for gogo:\n%s", buf.String())
}
})
}
diff --git a/src/runtime/print.go b/src/runtime/print.go
index 64055a34cc..f15296cf02 100644
--- a/src/runtime/print.go
+++ b/src/runtime/print.go
@@ -216,13 +216,15 @@ func printint(v int64) {
printuint(uint64(v))
}
+var minhexdigits = 0 // protected by printlock
+
func printhex(v uint64) {
const dig = "0123456789abcdef"
var buf [100]byte
i := len(buf)
for i--; i > 0; i-- {
buf[i] = dig[v%16]
- if v < 16 {
+ if v < 16 && len(buf)-i >= minhexdigits {
break
}
v /= 16
@@ -265,29 +267,16 @@ func printiface(i iface) {
// and should return a character mark to appear just before that
// word's value. It can return 0 to indicate no mark.
func hexdumpWords(p, end uintptr, mark func(uintptr) byte) {
- p1 := func(x uintptr) {
- var buf [2 * sys.PtrSize]byte
- for i := len(buf) - 1; i >= 0; i-- {
- if x&0xF < 10 {
- buf[i] = byte(x&0xF) + '0'
- } else {
- buf[i] = byte(x&0xF) - 10 + 'a'
- }
- x >>= 4
- }
- gwrite(buf[:])
- }
-
printlock()
var markbuf [1]byte
markbuf[0] = ' '
+ minhexdigits = int(unsafe.Sizeof(uintptr(0)) * 2)
for i := uintptr(0); p+i < end; i += sys.PtrSize {
if i%16 == 0 {
if i != 0 {
println()
}
- p1(p + i)
- print(": ")
+ print(hex(p+i), ": ")
}
if mark != nil {
@@ -298,15 +287,16 @@ func hexdumpWords(p, end uintptr, mark func(uintptr) byte) {
}
gwrite(markbuf[:])
val := *(*uintptr)(unsafe.Pointer(p + i))
- p1(val)
+ print(hex(val))
print(" ")
// Can we symbolize val?
fn := findfunc(val)
if fn.valid() {
- print("<", funcname(fn), "+", val-fn.entry, "> ")
+ print("<", funcname(fn), "+", hex(val-fn.entry), "> ")
}
}
+ minhexdigits = 0
println()
printunlock()
}
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 64e102fb0a..dbb430fd25 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -490,8 +490,29 @@ func lockedOSThread() bool {
}
var (
- allgs []*g
+ // allgs contains all Gs ever created (including dead Gs), and thus
+ // never shrinks.
+ //
+ // Access via the slice is protected by allglock or stop-the-world.
+ // Readers that cannot take the lock may (carefully!) use the atomic
+ // variables below.
allglock mutex
+ allgs []*g
+
+ // allglen and allgptr are atomic variables that contain len(allg) and
+ // &allg[0] respectively. Proper ordering depends on totally-ordered
+ // loads and stores. Writes are protected by allglock.
+ //
+ // allgptr is updated before allglen. Readers should read allglen
+ // before allgptr to ensure that allglen is always <= len(allgptr). New
+ // Gs appended during the race can be missed. For a consistent view of
+ // all Gs, allglock must be held.
+ //
+ // allgptr copies should always be stored as a concrete type or
+ // unsafe.Pointer, not uintptr, to ensure that GC can still reach it
+ // even if it points to a stale array.
+ allglen uintptr
+ allgptr **g
)
func allgadd(gp *g) {
@@ -501,10 +522,25 @@ func allgadd(gp *g) {
lock(&allglock)
allgs = append(allgs, gp)
- allglen = uintptr(len(allgs))
+ if &allgs[0] != allgptr {
+ atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
+ }
+ atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
unlock(&allglock)
}
+// atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
+func atomicAllG() (**g, uintptr) {
+ length := atomic.Loaduintptr(&allglen)
+ ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
+ return ptr, length
+}
+
+// atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
+func atomicAllGIndex(ptr **g, i uintptr) *g {
+ return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize))
+}
+
const (
// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
@@ -1170,8 +1206,38 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 {
return startTime
}
+// usesLibcall indicates whether this runtime performs system calls
+// via libcall.
+func usesLibcall() bool {
+ switch GOOS {
+ case "aix", "darwin", "illumos", "ios", "solaris", "windows":
+ return true
+ case "openbsd":
+ return GOARCH == "amd64" || GOARCH == "arm64"
+ }
+ return false
+}
+
+// mStackIsSystemAllocated indicates whether this runtime starts on a
+// system-allocated stack.
+func mStackIsSystemAllocated() bool {
+ switch GOOS {
+ case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
+ return true
+ case "openbsd":
+ switch GOARCH {
+ case "amd64", "arm64":
+ return true
+ }
+ }
+ return false
+}
+
// mstart is the entry-point for new Ms.
-//
+// It is written in assembly, marked TOPFRAME, and calls mstart0.
+func mstart()
+
+// mstart0 is the Go entry-point for new Ms.
// This must not split the stack because we may not even have stack
// bounds set up yet.
//
@@ -1180,7 +1246,7 @@ func startTheWorldWithSema(emitTraceEvent bool) int64 {
//
//go:nosplit
//go:nowritebarrierrec
-func mstart() {
+func mstart0() {
_g_ := getg()
osStack := _g_.stack.lo == 0
@@ -1188,6 +1254,11 @@ func mstart() {
// Initialize stack bounds from system stack.
// Cgo may have left stack size in stack.hi.
// minit may update the stack bounds.
+ //
+ // Note: these bounds may not be very accurate.
+ // We set hi to &size, but there are things above
+ // it. The 1024 is supposed to compensate this,
+ // but is somewhat arbitrary.
size := _g_.stack.hi
if size == 0 {
size = 8192 * sys.StackGuardMultiplier
@@ -1204,8 +1275,7 @@ func mstart() {
mstart1()
// Exit this thread.
- switch GOOS {
- case "windows", "solaris", "illumos", "plan9", "darwin", "ios", "aix":
+ if mStackIsSystemAllocated() {
// Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate
// the stack, but put it in _g_.stack before mstart,
// so the logic above hasn't set osStack yet.
@@ -1214,6 +1284,9 @@ func mstart() {
mexit(osStack)
}
+// The go:noinline is to guarantee the getcallerpc/getcallersp below are safe,
+// so that we can set up g0.sched to return to the call of mstart1 above.
+//go:noinline
func mstart1() {
_g_ := getg()
@@ -1221,11 +1294,16 @@ func mstart1() {
throw("bad runtime·mstart")
}
- // Record the caller for use as the top of stack in mcall and
- // for terminating the thread.
+ // Set up m.g0.sched as a label returning returning to just
+ // after the mstart1 call in mstart0 above, for use by goexit0 and mcall.
// We're never coming back to mstart1 after we call schedule,
// so other calls can reuse the current frame.
- save(getcallerpc(), getcallersp())
+ // And goexit0 does a gogo that needs to return from mstart1
+ // and let mstart0 exit the thread.
+ _g_.sched.g = guintptr(unsafe.Pointer(_g_))
+ _g_.sched.pc = getcallerpc()
+ _g_.sched.sp = getcallersp()
+
asminit()
minit()
@@ -1313,7 +1391,7 @@ func mexit(osStack bool) {
throw("locked m0 woke up")
}
- sigblock()
+ sigblock(true)
unminit()
// Free the gsignal stack.
@@ -1371,6 +1449,10 @@ found:
}
}
+ // Destroy all allocated resources. After this is called, we may no
+ // longer take any locks.
+ mdestroy(m)
+
if osStack {
// Return from mstart and let the system thread
// library free the g0 stack and terminate the thread.
@@ -1515,6 +1597,7 @@ func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) {
if netpollinited() {
netpollBreak()
}
+ sigRecvPrepareForFixup()
_g_ := getg()
if raceenabled {
// For m's running without racectx, we loan out the
@@ -1683,7 +1766,7 @@ func allocm(_p_ *p, fn func(), id int64) *m {
// In case of cgo or Solaris or illumos or Darwin, pthread_create will make us a stack.
// Windows and Plan 9 will layout sched stack on OS stack.
- if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" || GOOS == "plan9" || GOOS == "darwin" || GOOS == "ios" {
+ if iscgo || mStackIsSystemAllocated() {
mp.g0 = malg(-1)
} else {
mp.g0 = malg(8192 * sys.StackGuardMultiplier)
@@ -1754,7 +1837,7 @@ func needm() {
// starting a new m to run Go code via newosproc.
var sigmask sigset
sigsave(&sigmask)
- sigblock()
+ sigblock(false)
// Lock extra list, take head, unlock popped list.
// nilokay=false is safe here because of the invariant above,
@@ -1828,7 +1911,7 @@ func oneNewExtraM() {
gp := malg(4096)
gp.sched.pc = funcPC(goexit) + sys.PCQuantum
gp.sched.sp = gp.stack.hi
- gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame
+ gp.sched.sp -= 4 * sys.PtrSize // extra space in case of reads slightly beyond frame
gp.sched.lr = 0
gp.sched.g = guintptr(unsafe.Pointer(gp))
gp.syscallpc = gp.sched.pc
@@ -1903,7 +1986,7 @@ func dropm() {
// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
// It's important not to try to handle a signal between those two steps.
sigmask := mp.sigmask
- sigblock()
+ sigblock(false)
unminit()
mnext := lockextra(true)
@@ -1940,7 +2023,7 @@ func lockextra(nilokay bool) *m {
for {
old := atomic.Loaduintptr(&extram)
if old == locked {
- osyield()
+ osyield_no_g()
continue
}
if old == 0 && !nilokay {
@@ -1951,13 +2034,13 @@ func lockextra(nilokay bool) *m {
atomic.Xadd(&extraMWaiters, 1)
incr = true
}
- usleep(1)
+ usleep_no_g(1)
continue
}
if atomic.Casuintptr(&extram, old, locked) {
return (*m)(unsafe.Pointer(old))
}
- osyield()
+ osyield_no_g()
continue
}
}
@@ -2882,7 +2965,9 @@ func wakeNetPoller(when int64) {
} else {
// There are no threads in the network poller, try to get
// one there so it can handle new timers.
- wakep()
+ if GOOS != "plan9" { // Temporary workaround - see issue #42303.
+ wakep()
+ }
}
}
@@ -3371,11 +3456,19 @@ func goexit0(gp *g) {
func save(pc, sp uintptr) {
_g_ := getg()
+ if _g_ == _g_.m.g0 || _g_ == _g_.m.gsignal {
+ // m.g0.sched is special and must describe the context
+ // for exiting the thread. mstart1 writes to it directly.
+ // m.gsignal.sched should not be used at all.
+ // This check makes sure save calls do not accidentally
+ // run in contexts where they'd write to system g's.
+ throw("save on system g not allowed")
+ }
+
_g_.sched.pc = pc
_g_.sched.sp = sp
_g_.sched.lr = 0
_g_.sched.ret = 0
- _g_.sched.g = guintptr(unsafe.Pointer(_g_))
// We need to ensure ctxt is zero, but can't have a write
// barrier here. However, it should always already be zero.
// Assert that.
@@ -3389,7 +3482,7 @@ func save(pc, sp uintptr) {
// This is called only from the go syscall library and cgocall,
// not from the low-level system calls used by the runtime.
//
-// Entersyscall cannot split the stack: the gosave must
+// Entersyscall cannot split the stack: the save must
// make g->sched refer to the caller's stack segment, because
// entersyscall is going to return immediately after.
//
@@ -3774,7 +3867,7 @@ func beforefork() {
// group. See issue #18600.
gp.m.locks++
sigsave(&gp.m.sigmask)
- sigblock()
+ sigblock(false)
// This function is called before fork in syscall package.
// Code between fork and exec must not allocate memory nor even try to grow stack.
@@ -3935,9 +4028,9 @@ func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerp
// We could allocate a larger initial stack if necessary.
// Not worth it: this is almost always an error.
- // 4*sizeof(uintreg): extra space added below
- // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
- if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
+ // 4*PtrSize: extra space added below
+ // PtrSize: caller's LR (arm) or return address (x86, in gostartcall).
+ if siz >= _StackMin-4*sys.PtrSize-sys.PtrSize {
throw("newproc: function arguments too large for new goroutine")
}
@@ -3956,8 +4049,8 @@ func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerp
throw("newproc1: new g is not Gdead")
}
- totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
- totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign
+ totalSize := 4*sys.PtrSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
+ totalSize += -totalSize & (sys.StackAlign - 1) // align to StackAlign
sp := newg.stack.hi - totalSize
spArg := sp
if usesLR {
@@ -4263,7 +4356,7 @@ func badunlockosthread() {
}
func gcount() int32 {
- n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
+ n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
for _, _p_ := range allp {
n -= _p_.gFree.n
}
@@ -4330,75 +4423,6 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
// See golang.org/issue/17165.
getg().m.mallocing++
- // Define that a "user g" is a user-created goroutine, and a "system g"
- // is one that is m->g0 or m->gsignal.
- //
- // We might be interrupted for profiling halfway through a
- // goroutine switch. The switch involves updating three (or four) values:
- // g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
- // because once it gets updated the new g is running.
- //
- // When switching from a user g to a system g, LR is not considered live,
- // so the update only affects g, SP, and PC. Since PC must be last, there
- // the possible partial transitions in ordinary execution are (1) g alone is updated,
- // (2) both g and SP are updated, and (3) SP alone is updated.
- // If SP or g alone is updated, we can detect the partial transition by checking
- // whether the SP is within g's stack bounds. (We could also require that SP
- // be changed only after g, but the stack bounds check is needed by other
- // cases, so there is no need to impose an additional requirement.)
- //
- // There is one exceptional transition to a system g, not in ordinary execution.
- // When a signal arrives, the operating system starts the signal handler running
- // with an updated PC and SP. The g is updated last, at the beginning of the
- // handler. There are two reasons this is okay. First, until g is updated the
- // g and SP do not match, so the stack bounds check detects the partial transition.
- // Second, signal handlers currently run with signals disabled, so a profiling
- // signal cannot arrive during the handler.
- //
- // When switching from a system g to a user g, there are three possibilities.
- //
- // First, it may be that the g switch has no PC update, because the SP
- // either corresponds to a user g throughout (as in asmcgocall)
- // or because it has been arranged to look like a user g frame
- // (as in cgocallback). In this case, since the entire
- // transition is a g+SP update, a partial transition updating just one of
- // those will be detected by the stack bounds check.
- //
- // Second, when returning from a signal handler, the PC and SP updates
- // are performed by the operating system in an atomic update, so the g
- // update must be done before them. The stack bounds check detects
- // the partial transition here, and (again) signal handlers run with signals
- // disabled, so a profiling signal cannot arrive then anyway.
- //
- // Third, the common case: it may be that the switch updates g, SP, and PC
- // separately. If the PC is within any of the functions that does this,
- // we don't ask for a traceback. C.F. the function setsSP for more about this.
- //
- // There is another apparently viable approach, recorded here in case
- // the "PC within setsSP function" check turns out not to be usable.
- // It would be possible to delay the update of either g or SP until immediately
- // before the PC update instruction. Then, because of the stack bounds check,
- // the only problematic interrupt point is just before that PC update instruction,
- // and the sigprof handler can detect that instruction and simulate stepping past
- // it in order to reach a consistent state. On ARM, the update of g must be made
- // in two places (in R10 and also in a TLS slot), so the delayed update would
- // need to be the SP update. The sigprof handler must read the instruction at
- // the current PC and if it was the known instruction (for example, JMP BX or
- // MOV R2, PC), use that other register in place of the PC value.
- // The biggest drawback to this solution is that it requires that we can tell
- // whether it's safe to read from the memory pointed at by PC.
- // In a correct program, we can test PC == nil and otherwise read,
- // but if a profiling signal happens at the instant that a program executes
- // a bad jump (before the program manages to handle the resulting fault)
- // the profiling handler could fault trying to read nonexistent memory.
- //
- // To recap, there are no constraints on the assembly being used for the
- // transition. We simply require that g and SP match and that the PC is not
- // in gogo.
- traceback := true
- if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) || (mp != nil && mp.vdsoSP != 0) {
- traceback = false
- }
var stk [maxCPUProfStack]uintptr
n := 0
if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
@@ -4421,7 +4445,7 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
if n > 0 {
n += cgoOff
}
- } else if traceback {
+ } else {
n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
}
@@ -4429,7 +4453,7 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
// Normal traceback is impossible or has failed.
// See if it falls into several common cases.
n = 0
- if (GOOS == "windows" || GOOS == "solaris" || GOOS == "illumos" || GOOS == "darwin" || GOOS == "ios" || GOOS == "aix") && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
+ if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
// Libcall, i.e. runtime syscall on windows.
// Collect Go stack that leads to the call.
n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
@@ -4500,30 +4524,6 @@ func sigprofNonGoPC(pc uintptr) {
}
}
-// Reports whether a function will set the SP
-// to an absolute value. Important that
-// we don't traceback when these are at the bottom
-// of the stack since we can't be sure that we will
-// find the caller.
-//
-// If the function is not on the bottom of the stack
-// we assume that it will have set it up so that traceback will be consistent,
-// either by being a traceback terminating function
-// or putting one on the stack at the right offset.
-func setsSP(pc uintptr) bool {
- f := findfunc(pc)
- if !f.valid() {
- // couldn't find the function for this PC,
- // so assume the worst and stop traceback
- return true
- }
- switch f.funcID {
- case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack:
- return true
- }
- return false
-}
-
// setcpuprofilerate sets the CPU profiling rate to hz times per second.
// If hz <= 0, setcpuprofilerate turns off CPU profiling.
func setcpuprofilerate(hz int32) {
@@ -4967,7 +4967,6 @@ func checkdead() {
case _Grunnable,
_Grunning,
_Gsyscall:
- unlock(&allglock)
print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
throw("checkdead: runnable g")
}
@@ -5128,6 +5127,26 @@ func sysmon() {
}
}
mDoFixup()
+ if GOOS == "netbsd" {
+ // netpoll is responsible for waiting for timer
+ // expiration, so we typically don't have to worry
+ // about starting an M to service timers. (Note that
+ // sleep for timeSleepUntil above simply ensures sysmon
+ // starts running again when that timer expiration may
+ // cause Go code to run again).
+ //
+ // However, netbsd has a kernel bug that sometimes
+ // misses netpollBreak wake-ups, which can lead to
+ // unbounded delays servicing timers. If we detect this
+ // overrun, then startm to get something to handle the
+ // timer.
+ //
+ // See issue 42515 and
+ // https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094.
+ if next, _ := timeSleepUntil(); next < now {
+ startm(nil, false)
+ }
+ }
if atomic.Load(&scavenge.sysmonWake) != 0 {
// Kick the scavenger awake if someone requested it.
wakeScavenger()
diff --git a/src/runtime/race/output_test.go b/src/runtime/race/output_test.go
index 986667332f..17dc32013f 100644
--- a/src/runtime/race/output_test.go
+++ b/src/runtime/race/output_test.go
@@ -7,6 +7,7 @@
package race_test
import (
+ "fmt"
"internal/testenv"
"os"
"os/exec"
@@ -71,9 +72,24 @@ func TestOutput(t *testing.T) {
"GORACE="+test.gorace,
)
got, _ := cmd.CombinedOutput()
- if !regexp.MustCompile(test.re).MatchString(string(got)) {
- t.Fatalf("failed test case %v, expect:\n%v\ngot:\n%s",
- test.name, test.re, got)
+ matched := false
+ for _, re := range test.re {
+ if regexp.MustCompile(re).MatchString(string(got)) {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ exp := fmt.Sprintf("expect:\n%v\n", test.re[0])
+ if len(test.re) > 1 {
+ exp = fmt.Sprintf("expected one of %d patterns:\n",
+ len(test.re))
+ for k, re := range test.re {
+ exp += fmt.Sprintf("pattern %d:\n%v\n", k, re)
+ }
+ }
+ t.Fatalf("failed test case %v, %sgot:\n%s",
+ test.name, exp, got)
}
}
}
@@ -84,7 +100,7 @@ var tests = []struct {
goos string
gorace string
source string
- re string
+ re []string
}{
{"simple", "run", "", "atexit_sleep_ms=0", `
package main
@@ -107,7 +123,7 @@ func racer(x *int, done chan bool) {
store(x, 42)
done <- true
}
-`, `==================
+`, []string{`==================
WARNING: DATA RACE
Write at 0x[0-9,a-f]+ by goroutine [0-9]:
main\.store\(\)
@@ -129,7 +145,7 @@ Goroutine [0-9] \(running\) created at:
==================
Found 1 data race\(s\)
exit status 66
-`},
+`}},
{"exitcode", "run", "", "atexit_sleep_ms=0 exitcode=13", `
package main
@@ -143,7 +159,7 @@ func main() {
x = 43
<-done
}
-`, `exit status 13`},
+`, []string{`exit status 13`}},
{"strip_path_prefix", "run", "", "atexit_sleep_ms=0 strip_path_prefix=/main.", `
package main
@@ -157,9 +173,9 @@ func main() {
x = 43
<-done
}
-`, `
+`, []string{`
go:7 \+0x[0-9,a-f]+
-`},
+`}},
{"halt_on_error", "run", "", "atexit_sleep_ms=0 halt_on_error=1", `
package main
@@ -173,10 +189,10 @@ func main() {
x = 43
<-done
}
-`, `
+`, []string{`
==================
exit status 66
-`},
+`}},
{"test_fails_on_race", "test", "", "atexit_sleep_ms=0", `
package main_test
@@ -193,12 +209,12 @@ func TestFail(t *testing.T) {
<-done
t.Log(t.Failed())
}
-`, `
+`, []string{`
==================
--- FAIL: TestFail \(0...s\)
.*main_test.go:14: true
.*testing.go:.*: race detected during execution of test
-FAIL`},
+FAIL`}},
{"slicebytetostring_pc", "run", "", "atexit_sleep_ms=0", `
package main
@@ -211,11 +227,11 @@ func main() {
data[0] = 1
<-done
}
-`, `
+`, []string{`
runtime\.slicebytetostring\(\)
.*/runtime/string\.go:.*
main\.main\.func1\(\)
- .*/main.go:7`},
+ .*/main.go:7`}},
// Test for https://golang.org/issue/33309
{"midstack_inlining_traceback", "run", "linux", "atexit_sleep_ms=0", `
@@ -241,7 +257,7 @@ func g(c chan int) {
func h(c chan int) {
c <- x
}
-`, `==================
+`, []string{`==================
WARNING: DATA RACE
Read at 0x[0-9,a-f]+ by goroutine [0-9]:
main\.h\(\)
@@ -261,7 +277,7 @@ Goroutine [0-9] \(running\) created at:
==================
Found 1 data race\(s\)
exit status 66
-`},
+`}},
// Test for https://golang.org/issue/17190
{"external_cgo_thread", "run", "linux", "atexit_sleep_ms=0", `
@@ -284,22 +300,23 @@ static inline void startThread(cb* c) {
*/
import "C"
-import "time"
-
+var done chan bool
var racy int
//export goCallback
func goCallback() {
racy++
+ done <- true
}
func main() {
+ done = make(chan bool)
var c C.cb
C.startThread(&c)
- time.Sleep(time.Second)
racy++
+ <- done
}
-`, `==================
+`, []string{`==================
WARNING: DATA RACE
Read at 0x[0-9,a-f]+ by main goroutine:
main\.main\(\)
@@ -310,11 +327,27 @@ Previous write at 0x[0-9,a-f]+ by goroutine [0-9]:
.*/main\.go:27 \+0x[0-9,a-f]+
_cgoexp_[0-9a-z]+_goCallback\(\)
.*_cgo_gotypes\.go:[0-9]+ \+0x[0-9,a-f]+
+ _cgoexp_[0-9a-z]+_goCallback\(\)
+ <autogenerated>:1 \+0x[0-9,a-f]+
+
+Goroutine [0-9] \(running\) created at:
+ runtime\.newextram\(\)
+ .*/runtime/proc.go:[0-9]+ \+0x[0-9,a-f]+
+==================`,
+ `==================
+WARNING: DATA RACE
+Read at 0x[0-9,a-f]+ by .*:
+ main\..*
+ .*/main\.go:[0-9]+ \+0x[0-9,a-f]+(?s).*
+
+Previous write at 0x[0-9,a-f]+ by .*:
+ main\..*
+ .*/main\.go:[0-9]+ \+0x[0-9,a-f]+(?s).*
Goroutine [0-9] \(running\) created at:
runtime\.newextram\(\)
.*/runtime/proc.go:[0-9]+ \+0x[0-9,a-f]+
-==================`},
+==================`}},
{"second_test_passes", "test", "", "atexit_sleep_ms=0", `
package main_test
import "testing"
@@ -332,11 +365,11 @@ func TestFail(t *testing.T) {
func TestPass(t *testing.T) {
}
-`, `
+`, []string{`
==================
--- FAIL: TestFail \(0...s\)
.*testing.go:.*: race detected during execution of test
-FAIL`},
+FAIL`}},
{"mutex", "run", "", "atexit_sleep_ms=0", `
package main
import (
@@ -367,7 +400,7 @@ func main() {
}
wg.Wait()
if (data == iterations*(threads+1)) { fmt.Println("pass") }
-}`, `pass`},
+}`, []string{`pass`}},
// Test for https://github.com/golang/go/issues/37355
{"chanmm", "run", "", "atexit_sleep_ms=0", `
package main
@@ -396,7 +429,7 @@ func main() {
wg.Wait()
_ = data
}
-`, `==================
+`, []string{`==================
WARNING: DATA RACE
Write at 0x[0-9,a-f]+ by goroutine [0-9]:
main\.main\.func2\(\)
@@ -409,5 +442,5 @@ Previous write at 0x[0-9,a-f]+ by main goroutine:
Goroutine [0-9] \(running\) created at:
main\.main\(\)
.*/main.go:[0-9]+ \+0x[0-9,a-f]+
-==================`},
+==================`}},
}
diff --git a/src/runtime/race_amd64.s b/src/runtime/race_amd64.s
index 9818bc6ddf..c3b7bbfbfe 100644
--- a/src/runtime/race_amd64.s
+++ b/src/runtime/race_amd64.s
@@ -146,8 +146,10 @@ TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
// If addr (RARG1) is out of range, do nothing.
// Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
TEXT racecalladdr<>(SB), NOSPLIT, $0-0
+#ifndef GOEXPERIMENT_REGABI
get_tls(R12)
MOVQ g(R12), R14
+#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
// Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend).
CMPQ RARG1, runtime·racearenastart(SB)
@@ -183,8 +185,10 @@ TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
// R11 = caller's return address
TEXT racefuncenter<>(SB), NOSPLIT, $0-0
MOVQ DX, R15 // save function entry context (for closures)
+#ifndef GOEXPERIMENT_REGABI
get_tls(R12)
MOVQ g(R12), R14
+#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
MOVQ R11, RARG1
// void __tsan_func_enter(ThreadState *thr, void *pc);
@@ -197,8 +201,10 @@ TEXT racefuncenter<>(SB), NOSPLIT, $0-0
// func runtime·racefuncexit()
// Called from instrumented code.
TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
+#ifndef GOEXPERIMENT_REGABI
get_tls(R12)
MOVQ g(R12), R14
+#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
// void __tsan_func_exit(ThreadState *thr);
MOVQ $__tsan_func_exit(SB), AX
@@ -357,8 +363,10 @@ racecallatomic_data:
JAE racecallatomic_ignore
racecallatomic_ok:
// Addr is within the good range, call the atomic function.
+#ifndef GOEXPERIMENT_REGABI
get_tls(R12)
MOVQ g(R12), R14
+#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
MOVQ 8(SP), RARG1 // caller pc
MOVQ (SP), RARG2 // pc
@@ -370,8 +378,10 @@ racecallatomic_ignore:
// An attempt to synchronize on the address would cause crash.
MOVQ AX, R15 // remember the original function
MOVQ $__tsan_go_ignore_sync_begin(SB), AX
+#ifndef GOEXPERIMENT_REGABI
get_tls(R12)
MOVQ g(R12), R14
+#endif
MOVQ g_racectx(R14), RARG0 // goroutine context
CALL racecall<>(SB)
MOVQ R15, AX // restore the original function
@@ -399,8 +409,10 @@ TEXT runtime·racecall(SB), NOSPLIT, $0-0
// Switches SP to g0 stack and calls (AX). Arguments already set.
TEXT racecall<>(SB), NOSPLIT, $0-0
+#ifndef GOEXPERIMENT_REGABI
get_tls(R12)
MOVQ g(R12), R14
+#endif
MOVQ g_m(R14), R13
// Switch to g0 stack.
MOVQ SP, R12 // callee-saved, preserved across the CALL
@@ -412,6 +424,9 @@ call:
ANDQ $~15, SP // alignment for gcc ABI
CALL AX
MOVQ R12, SP
+ // Back to Go world, set special registers.
+ // The g register (R14) is preserved in C.
+ XORPS X15, X15
RET
// C->Go callback thunk that allows to call runtime·racesymbolize from C code.
@@ -419,7 +434,9 @@ call:
// The overall effect of Go->C->Go call chain is similar to that of mcall.
// RARG0 contains command code. RARG1 contains command-specific context.
// See racecallback for command codes.
-TEXT runtime·racecallbackthunk(SB), NOSPLIT, $56-8
+// Defined as ABIInternal so as to avoid introducing a wrapper,
+// because its address is passed to C via funcPC.
+TEXT runtime·racecallbackthunk<ABIInternal>(SB), NOSPLIT, $56-8
// Handle command raceGetProcCmd (0) here.
// First, code below assumes that we are on curg, while raceGetProcCmd
// can be executed on g0. Second, it is called frequently, so will
@@ -447,12 +464,13 @@ rest:
PUSHQ R15
// Set g = g0.
get_tls(R12)
- MOVQ g(R12), R13
- MOVQ g_m(R13), R14
- MOVQ m_g0(R14), R15
+ MOVQ g(R12), R14
+ MOVQ g_m(R14), R13
+ MOVQ m_g0(R13), R15
CMPQ R13, R15
JEQ noswitch // branch if already on g0
MOVQ R15, g(R12) // g = m->g0
+ MOVQ R15, R14 // set g register
PUSHQ RARG1 // func arg
PUSHQ RARG0 // func arg
CALL runtime·racecallback(SB)
diff --git a/src/runtime/rt0_windows_arm64.s b/src/runtime/rt0_windows_arm64.s
new file mode 100644
index 0000000000..1e71a068d3
--- /dev/null
+++ b/src/runtime/rt0_windows_arm64.s
@@ -0,0 +1,12 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "textflag.h"
+
+// This is the entry point for the program from the
+// kernel for an ordinary -buildmode=exe program.
+TEXT _rt0_arm64_windows(SB),NOSPLIT|NOFRAME,$0
+ B ·rt0_go(SB)
diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go
index c9376827da..5bd283d12f 100644
--- a/src/runtime/runtime2.go
+++ b/src/runtime/runtime2.go
@@ -327,7 +327,7 @@ type gobuf struct {
pc uintptr
g guintptr
ctxt unsafe.Pointer
- ret sys.Uintreg
+ ret uintptr
lr uintptr
bp uintptr // for framepointer-enabled architectures
}
@@ -833,10 +833,11 @@ type _func struct {
pcfile uint32
pcln uint32
npcdata uint32
- cuOffset uint32 // runtime.cutab offset of this function's CU
- funcID funcID // set for certain special runtime functions
- _ [2]byte // pad
- nfuncdata uint8 // must be last
+ cuOffset uint32 // runtime.cutab offset of this function's CU
+ funcID funcID // set for certain special runtime functions
+ flag funcFlag
+ _ [1]byte // pad
+ nfuncdata uint8 // must be last, must end on a uint32-aligned boundary
}
// Pseudo-Func that is returned for PCs that occur in inlined code.
@@ -853,7 +854,7 @@ type funcinl struct {
// layout of Itab known to compilers
// allocated in non-garbage-collected memory
// Needs to be in sync with
-// ../cmd/compile/internal/gc/reflect.go:/^func.dumptabs.
+// ../cmd/compile/internal/gc/reflect.go:/^func.WriteTabs.
type itab struct {
inter *interfacetype
_type *_type
@@ -1052,7 +1053,6 @@ func (w waitReason) String() string {
}
var (
- allglen uintptr
allm *m
gomaxprocs int32
ncpu int32
@@ -1106,4 +1106,4 @@ var (
)
// Must agree with cmd/internal/objabi.Framepointer_enabled.
-const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64" && (GOOS == "linux" || GOOS == "darwin" || GOOS == "ios")
+const framepointer_enabled = GOARCH == "amd64" || GOARCH == "arm64"
diff --git a/src/runtime/runtime_test.go b/src/runtime/runtime_test.go
index e5d2d97d05..4572a25195 100644
--- a/src/runtime/runtime_test.go
+++ b/src/runtime/runtime_test.go
@@ -266,8 +266,8 @@ func TestTrailingZero(t *testing.T) {
n int64
z struct{}
}
- if unsafe.Sizeof(T2{}) != 8+unsafe.Sizeof(Uintreg(0)) {
- t.Errorf("sizeof(%#v)==%d, want %d", T2{}, unsafe.Sizeof(T2{}), 8+unsafe.Sizeof(Uintreg(0)))
+ if unsafe.Sizeof(T2{}) != 8+unsafe.Sizeof(uintptr(0)) {
+ t.Errorf("sizeof(%#v)==%d, want %d", T2{}, unsafe.Sizeof(T2{}), 8+unsafe.Sizeof(uintptr(0)))
}
type T3 struct {
n byte
diff --git a/src/runtime/signal_amd64.go b/src/runtime/signal_amd64.go
index 6ab1f758c2..3eeb5e044f 100644
--- a/src/runtime/signal_amd64.go
+++ b/src/runtime/signal_amd64.go
@@ -65,11 +65,14 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
pc := uintptr(c.rip())
sp := uintptr(c.rsp())
+ // In case we are panicking from external code, we need to initialize
+ // Go special registers. We inject sigpanic0 (instead of sigpanic),
+ // which takes care of that.
if shouldPushSigpanic(gp, pc, *(*uintptr)(unsafe.Pointer(sp))) {
- c.pushCall(funcPC(sigpanic), pc)
+ c.pushCall(funcPC(sigpanic0), pc)
} else {
// Not safe to push the call. Just clobber the frame.
- c.set_rip(uint64(funcPC(sigpanic)))
+ c.set_rip(uint64(funcPC(sigpanic0)))
}
}
diff --git a/src/runtime/signal_arm64.go b/src/runtime/signal_arm64.go
index 3c20139c99..b559b93938 100644
--- a/src/runtime/signal_arm64.go
+++ b/src/runtime/signal_arm64.go
@@ -63,7 +63,7 @@ func (c *sigctxt) preparePanic(sig uint32, gp *g) {
// functions are correctly handled. This smashes
// the stack frame but we're not going back there
// anyway.
- sp := c.sp() - sys.SpAlign // needs only sizeof uint64, but must align the stack
+ sp := c.sp() - sys.StackAlign // needs only sizeof uint64, but must align the stack
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.lr()
diff --git a/src/runtime/signal_openbsd.go b/src/runtime/signal_openbsd.go
index 99c601ce58..d2c5c5e39a 100644
--- a/src/runtime/signal_openbsd.go
+++ b/src/runtime/signal_openbsd.go
@@ -37,5 +37,5 @@ var sigtable = [...]sigTabT{
/* 29 */ {_SigNotify, "SIGINFO: status request from keyboard"},
/* 30 */ {_SigNotify, "SIGUSR1: user-defined signal 1"},
/* 31 */ {_SigNotify, "SIGUSR2: user-defined signal 2"},
- /* 32 */ {_SigNotify, "SIGTHR: reserved"},
+ /* 32 */ {0, "SIGTHR: reserved"}, // thread AST - cannot be registered.
}
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index e8f39c3321..3f70707ab4 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -475,6 +475,14 @@ func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool {
return false
}
+ var st stackt
+ sigaltstack(nil, &st)
+ stsp := uintptr(unsafe.Pointer(st.ss_sp))
+ if st.ss_flags&_SS_DISABLE == 0 && sp >= stsp && sp < stsp+st.ss_size {
+ setGsignalStack(&st, gsigStack)
+ return true
+ }
+
if sp >= mp.g0.stack.lo && sp < mp.g0.stack.hi {
// The signal was delivered on the g0 stack.
// This can happen when linked with C code
@@ -483,29 +491,25 @@ func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool {
// the signal handler directly when C code,
// including C code called via cgo, calls a
// TSAN-intercepted function such as malloc.
+ //
+ // We check this condition last as g0.stack.lo
+ // may be not very accurate (see mstart).
st := stackt{ss_size: mp.g0.stack.hi - mp.g0.stack.lo}
setSignalstackSP(&st, mp.g0.stack.lo)
setGsignalStack(&st, gsigStack)
return true
}
- var st stackt
- sigaltstack(nil, &st)
+ // sp is not within gsignal stack, g0 stack, or sigaltstack. Bad.
+ setg(nil)
+ needm()
if st.ss_flags&_SS_DISABLE != 0 {
- setg(nil)
- needm()
noSignalStack(sig)
- dropm()
- }
- stsp := uintptr(unsafe.Pointer(st.ss_sp))
- if sp < stsp || sp >= stsp+st.ss_size {
- setg(nil)
- needm()
+ } else {
sigNotOnStack(sig)
- dropm()
}
- setGsignalStack(&st, gsigStack)
- return true
+ dropm()
+ return false
}
// crashing is the number of m's we have waited for when implementing
@@ -1042,15 +1046,26 @@ func msigrestore(sigmask sigset) {
sigprocmask(_SIG_SETMASK, &sigmask, nil)
}
-// sigblock blocks all signals in the current thread's signal mask.
+// sigsetAllExiting is used by sigblock(true) when a thread is
+// exiting. sigset_all is defined in OS specific code, and per GOOS
+// behavior may override this default for sigsetAllExiting: see
+// osinit().
+var sigsetAllExiting = sigset_all
+
+// sigblock blocks signals in the current thread's signal mask.
// This is used to block signals while setting up and tearing down g
-// when a non-Go thread calls a Go function.
-// The OS-specific code is expected to define sigset_all.
+// when a non-Go thread calls a Go function. When a thread is exiting
+// we use the sigsetAllExiting value, otherwise the OS specific
+// definition of sigset_all is used.
// This is nosplit and nowritebarrierrec because it is called by needm
// which may be called on a non-Go thread with no g available.
//go:nosplit
//go:nowritebarrierrec
-func sigblock() {
+func sigblock(exiting bool) {
+ if exiting {
+ sigprocmask(_SIG_SETMASK, &sigsetAllExiting, nil)
+ return
+ }
sigprocmask(_SIG_SETMASK, &sigset_all, nil)
}
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index 3af2e39b08..6215d0ba2d 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "runtime/internal/sys"
"unsafe"
)
@@ -43,13 +44,17 @@ func initExceptionHandler() {
//
//go:nosplit
func isAbort(r *context) bool {
- // In the case of an abort, the exception IP is one byte after
- // the INT3 (this differs from UNIX OSes).
- return isAbortPC(r.ip() - 1)
+ pc := r.ip()
+ if GOARCH == "386" || GOARCH == "amd64" {
+ // In the case of an abort, the exception IP is one byte after
+ // the INT3 (this differs from UNIX OSes).
+ pc--
+ }
+ return isAbortPC(pc)
}
// isgoexception reports whether this exception should be translated
-// into a Go panic.
+// into a Go panic or throw.
//
// It is nosplit to avoid growing the stack in case we're aborting
// because of a stack overflow.
@@ -63,11 +68,6 @@ func isgoexception(info *exceptionrecord, r *context) bool {
return false
}
- if isAbort(r) {
- // Never turn abort into a panic.
- return false
- }
-
// Go will only handle some exceptions.
switch info.exceptioncode {
default:
@@ -81,6 +81,7 @@ func isgoexception(info *exceptionrecord, r *context) bool {
case _EXCEPTION_FLT_OVERFLOW:
case _EXCEPTION_FLT_UNDERFLOW:
case _EXCEPTION_BREAKPOINT:
+ case _EXCEPTION_ILLEGAL_INSTRUCTION: // breakpoint arrives this way on arm64
}
return true
}
@@ -99,21 +100,23 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
return _EXCEPTION_CONTINUE_SEARCH
}
- // After this point, it is safe to grow the stack.
-
- if gp.throwsplit {
- // We can't safely sigpanic because it may grow the
- // stack. Let it fall through.
- return _EXCEPTION_CONTINUE_SEARCH
+ if gp.throwsplit || isAbort(r) {
+ // We can't safely sigpanic because it may grow the stack.
+ // Or this is a call to abort.
+ // Don't go through any more of the Windows handler chain.
+ // Crash now.
+ winthrow(info, r, gp)
}
+ // After this point, it is safe to grow the stack.
+
// Make it look like a call to the signal func.
// Have to pass arguments out of band since
// augmenting the stack frame would break
// the unwinding code.
gp.sig = info.exceptioncode
- gp.sigcode0 = uintptr(info.exceptioninformation[0])
- gp.sigcode1 = uintptr(info.exceptioninformation[1])
+ gp.sigcode0 = info.exceptioninformation[0]
+ gp.sigcode1 = info.exceptioninformation[1]
gp.sigpc = r.ip()
// Only push runtime·sigpanic if r.ip() != 0.
@@ -131,16 +134,14 @@ func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32 {
// overwrite the PC. (See issue #35773)
if r.ip() != 0 && r.ip() != funcPC(asyncPreempt) {
sp := unsafe.Pointer(r.sp())
- sp = add(sp, ^(unsafe.Sizeof(uintptr(0)) - 1)) // sp--
+ delta := uintptr(sys.StackAlign)
+ sp = add(sp, -delta)
r.set_sp(uintptr(sp))
- switch GOARCH {
- default:
- panic("unsupported architecture")
- case "386", "amd64":
- *((*uintptr)(sp)) = r.ip()
- case "arm":
+ if usesLR {
*((*uintptr)(sp)) = r.lr()
r.set_lr(r.ip())
+ } else {
+ *((*uintptr)(sp)) = r.ip()
}
}
r.set_ip(funcPC(sigpanic))
@@ -181,6 +182,12 @@ func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 {
return _EXCEPTION_CONTINUE_SEARCH
}
+ winthrow(info, r, gp)
+ return 0 // not reached
+}
+
+//go:nosplit
+func winthrow(info *exceptionrecord, r *context, gp *g) {
_g_ := getg()
if panicking != 0 { // traceback already printed
@@ -206,11 +213,8 @@ func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 {
}
print("\n")
- // TODO(jordanrh1): This may be needed for 386/AMD64 as well.
- if GOARCH == "arm" {
- _g_.m.throwing = 1
- _g_.m.caughtsig.set(gp)
- }
+ _g_.m.throwing = 1
+ _g_.m.caughtsig.set(gp)
level, _, docrash := gotraceback()
if level > 0 {
@@ -224,7 +228,6 @@ func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 {
}
exit(2)
- return 0 // not reached
}
func sigpanic() {
diff --git a/src/runtime/signal_windows_test.go b/src/runtime/signal_windows_test.go
index a5a885c2f7..33a9b92ee7 100644
--- a/src/runtime/signal_windows_test.go
+++ b/src/runtime/signal_windows_test.go
@@ -11,6 +11,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
+ "strconv"
"strings"
"syscall"
"testing"
@@ -79,6 +80,69 @@ func sendCtrlBreak(pid int) error {
return nil
}
+// TestCtrlHandler tests that Go can gracefully handle closing the console window.
+// See https://golang.org/issues/41884.
+func TestCtrlHandler(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // build go program
+ exe := filepath.Join(t.TempDir(), "test.exe")
+ cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", exe, "testdata/testwinsignal/main.go")
+ out, err := testenv.CleanCmdEnv(cmd).CombinedOutput()
+ if err != nil {
+ t.Fatalf("failed to build go exe: %v\n%s", err, out)
+ }
+
+ // run test program
+ cmd = exec.Command(exe)
+ var stderr bytes.Buffer
+ cmd.Stderr = &stderr
+ outPipe, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatalf("Failed to create stdout pipe: %v", err)
+ }
+ outReader := bufio.NewReader(outPipe)
+
+ // in a new command window
+ const _CREATE_NEW_CONSOLE = 0x00000010
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ CreationFlags: _CREATE_NEW_CONSOLE,
+ HideWindow: true,
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("Start failed: %v", err)
+ }
+ defer func() {
+ cmd.Process.Kill()
+ cmd.Wait()
+ }()
+
+ // wait for child to be ready to receive signals
+ if line, err := outReader.ReadString('\n'); err != nil {
+ t.Fatalf("could not read stdout: %v", err)
+ } else if strings.TrimSpace(line) != "ready" {
+ t.Fatalf("unexpected message: %s", line)
+ }
+
+ // gracefully kill pid, this closes the command window
+ if err := exec.Command("taskkill.exe", "/pid", strconv.Itoa(cmd.Process.Pid)).Run(); err != nil {
+ t.Fatalf("failed to kill: %v", err)
+ }
+
+ // check child received, handled SIGTERM
+ if line, err := outReader.ReadString('\n'); err != nil {
+ t.Fatalf("could not read stdout: %v", err)
+ } else if expected, got := syscall.SIGTERM.String(), strings.TrimSpace(line); expected != got {
+ t.Fatalf("Expected '%s' got: %s", expected, got)
+ }
+
+ // check child exited gracefully, did not timeout
+ if err := cmd.Wait(); err != nil {
+ t.Fatalf("Program exited with error: %v\n%s", err, &stderr)
+ }
+}
+
// TestLibraryCtrlHandler tests that Go DLL allows calling program to handle console control events.
// See https://golang.org/issues/35965.
func TestLibraryCtrlHandler(t *testing.T) {
diff --git a/src/runtime/sigqueue.go b/src/runtime/sigqueue.go
index 0605f5da80..28b9e26d0f 100644
--- a/src/runtime/sigqueue.go
+++ b/src/runtime/sigqueue.go
@@ -12,12 +12,16 @@
// sigsend is called by the signal handler to queue a new signal.
// signal_recv is called by the Go program to receive a newly queued signal.
// Synchronization between sigsend and signal_recv is based on the sig.state
-// variable. It can be in 3 states: sigIdle, sigReceiving and sigSending.
+// variable. It can be in 4 states: sigIdle, sigReceiving, sigSending and sigFixup.
// sigReceiving means that signal_recv is blocked on sig.Note and there are no
// new pending signals.
// sigSending means that sig.mask *may* contain new pending signals,
// signal_recv can't be blocked in this state.
// sigIdle means that there are no new pending signals and signal_recv is not blocked.
+// sigFixup is a transient state that can only exist as a short
+// transition from sigReceiving and then on to sigIdle: it is
+// used to ensure the AllThreadsSyscall()'s mDoFixup() operation
+// occurs on the sleeping m, waiting to receive a signal.
// Transitions between states are done atomically with CAS.
// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
// If several sigsends and signal_recv execute concurrently, it can lead to
@@ -59,6 +63,7 @@ const (
sigIdle = iota
sigReceiving
sigSending
+ sigFixup
)
// sigsend delivers a signal from sighandler to the internal signal delivery queue.
@@ -112,6 +117,9 @@ Send:
notewakeup(&sig.note)
break Send
}
+ case sigFixup:
+ // nothing to do - we need to wait for sigIdle.
+ osyield()
}
}
@@ -119,6 +127,19 @@ Send:
return true
}
+// sigRecvPrepareForFixup is used to temporarily wake up the
+// signal_recv() running thread while it is blocked waiting for the
+// arrival of a signal. If it causes the thread to wake up, the
+// sig.state travels through this sequence: sigReceiving -> sigFixup
+// -> sigIdle -> sigReceiving and resumes. (This is only called while
+// GC is disabled.)
+//go:nosplit
+func sigRecvPrepareForFixup() {
+ if atomic.Cas(&sig.state, sigReceiving, sigFixup) {
+ notewakeup(&sig.note)
+ }
+}
+
// Called to receive the next queued signal.
// Must only be called from a single goroutine at a time.
//go:linkname signal_recv os/signal.signal_recv
@@ -146,7 +167,16 @@ func signal_recv() uint32 {
}
notetsleepg(&sig.note, -1)
noteclear(&sig.note)
- break Receive
+ if !atomic.Cas(&sig.state, sigFixup, sigIdle) {
+ break Receive
+ }
+ // Getting here, the code will
+ // loop around again to sleep
+ // in state sigReceiving. This
+ // path is taken when
+ // sigRecvPrepareForFixup()
+ // has been called by another
+ // thread.
}
case sigSending:
if atomic.Cas(&sig.state, sigSending, sigIdle) {
diff --git a/src/runtime/sigqueue_plan9.go b/src/runtime/sigqueue_plan9.go
index d5fe8f8b35..aebd2060e7 100644
--- a/src/runtime/sigqueue_plan9.go
+++ b/src/runtime/sigqueue_plan9.go
@@ -92,6 +92,13 @@ func sendNote(s *byte) bool {
return true
}
+// sigRecvPrepareForFixup is a no-op on plan9. (This would only be
+// called while GC is disabled.)
+//
+//go:nosplit
+func sigRecvPrepareForFixup() {
+}
+
// Called to receive the next queued signal.
// Must only be called from a single goroutine at a time.
//go:linkname signal_recv os/signal.signal_recv
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 7b9dce5393..d971e5e26f 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -651,7 +651,7 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
// Adjust saved base pointer if there is one.
// TODO what about arm64 frame pointer adjustment?
- if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
+ if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.PtrSize {
if stackDebug >= 3 {
print(" saved bp\n")
}
@@ -1089,7 +1089,7 @@ func nilfunc() {
}
// adjust Gobuf as if it executed a call to fn
-// and then did an immediate gosave.
+// and then stopped before the first instruction in fn.
func gostartcallfn(gobuf *gobuf, fv *funcval) {
var fn unsafe.Pointer
if fv != nil {
@@ -1245,7 +1245,7 @@ func getStackMap(frame *stkframe, cache *pcvalueCache, debug bool) (locals, args
var minsize uintptr
switch sys.ArchFamily {
case sys.ARM64:
- minsize = sys.SpAlign
+ minsize = sys.StackAlign
default:
minsize = sys.MinFrameSize
}
diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go
index b55c3c0590..b9b313a711 100644
--- a/src/runtime/stubs.go
+++ b/src/runtime/stubs.go
@@ -4,7 +4,10 @@
package runtime
-import "unsafe"
+import (
+ "internal/abi"
+ "unsafe"
+)
// Should be a built-in for unsafe.Pointer?
//go:nosplit
@@ -73,7 +76,15 @@ func badsystemstack() {
// *ptr is uninitialized memory (e.g., memory that's being reused
// for a new allocation) and hence contains only "junk".
//
+// memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n
+// is a multiple of the pointer size, then any pointer-aligned,
+// pointer-sized portion is cleared atomically. Despite the function
+// name, this is necessary because this function is the underlying
+// implementation of typedmemclr and memclrHasPointers. See the doc of
+// memmove for more details.
+//
// The (CPU-specific) implementations of this function are in memclr_*.s.
+//
//go:noescape
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
@@ -158,8 +169,8 @@ func noescape(p unsafe.Pointer) unsafe.Pointer {
// This in turn calls cgocallbackg, which is where we'll find
// pointer-declared arguments.
func cgocallback(fn, frame, ctxt uintptr)
+
func gogo(buf *gobuf)
-func gosave(buf *gobuf)
//go:noescape
func jmpdefer(fv *funcval, argp uintptr)
@@ -167,19 +178,50 @@ func asminit()
func setg(gg *g)
func breakpoint()
-// reflectcall calls fn with a copy of the n argument bytes pointed at by arg.
-// After fn returns, reflectcall copies n-retoffset result bytes
-// back into arg+retoffset before returning. If copying result bytes back,
-// the caller should pass the argument frame type as argtype, so that
-// call can execute appropriate write barriers during the copy.
+// reflectcall calls fn with arguments described by stackArgs, stackArgsSize,
+// frameSize, and regArgs.
+//
+// Arguments passed on the stack and space for return values passed on the stack
+// must be laid out at the space pointed to by stackArgs (with total length
+// stackArgsSize) according to the ABI.
+//
+// stackRetOffset must be some value <= stackArgsSize that indicates the
+// offset within stackArgs where the return value space begins.
+//
+// frameSize is the total size of the argument frame at stackArgs and must
+// therefore be >= stackArgsSize. It must include additional space for spilling
+// register arguments for stack growth and preemption.
+//
+// TODO(mknyszek): Once we don't need the additional spill space, remove frameSize,
+// since frameSize will be redundant with stackArgsSize.
//
-// Package reflect always passes a frame type. In package runtime,
-// Windows callbacks are the only use of this that copies results
-// back, and those cannot have pointers in their results, so runtime
-// passes nil for the frame type.
+// Arguments passed in registers must be laid out in regArgs according to the ABI.
+// regArgs will hold any return values passed in registers after the call.
+//
+// reflectcall copies stack arguments from stackArgs to the goroutine stack, and
+// then copies back stackArgsSize-stackRetOffset bytes back to the return space
+// in stackArgs once fn has completed. It also "unspills" argument registers from
+// regArgs before calling fn, and spills them back into regArgs immediately
+// following the call to fn. If there are results being returned on the stack,
+// the caller should pass the argument frame type as stackArgsType so that
+// reflectcall can execute appropriate write barriers during the copy.
+//
+// reflectcall expects regArgs.ReturnIsPtr to be populated indicating which
+// registers on the return path will contain Go pointers. It will then store
+// these pointers in regArgs.Ptrs such that they are visible to the GC.
+//
+// Package reflect passes a frame type. In package runtime, there is only
+// one call that copies results back, in callbackWrap in syscall_windows.go, and it
+// does NOT pass a frame type, meaning there are no write barriers invoked. See that
+// call site for justification.
//
// Package reflect accesses this symbol through a linkname.
-func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
+//
+// Arguments passed through to reflectcall do not escape. The type is used
+// only in a very limited callee of reflectcall, the stackArgs are copied, and
+// regArgs is only used in the reflectcall frame.
+//go:noescape
+func reflectcall(stackArgsType *_type, fn, stackArgs unsafe.Pointer, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs)
func procyield(cycles uint32)
@@ -349,3 +391,7 @@ func duffcopy()
// Called from linker-generated .initarray; declared for go vet; do NOT call from Go.
func addmoduledata()
+
+// Injected by the signal handler for panicking signals. On many platforms it just
+// jumps to sigpanic.
+func sigpanic0()
diff --git a/src/runtime/stubs2.go b/src/runtime/stubs2.go
index 4a1a5cc3d9..96096d236b 100644
--- a/src/runtime/stubs2.go
+++ b/src/runtime/stubs2.go
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !aix
+// +build !darwin
+// +build !js
+// +build !openbsd
// +build !plan9
// +build !solaris
// +build !windows
-// +build !js
-// +build !darwin
-// +build !aix
package runtime
@@ -22,6 +23,11 @@ func closefd(fd int32) int32
func exit(code int32)
func usleep(usec uint32)
+//go:nosplit
+func usleep_no_g(usec uint32) {
+ usleep(usec)
+}
+
// write calls the write system call.
// It returns a non-negative number of bytes written or a negative errno value.
//go:noescape
diff --git a/src/runtime/stubs3.go b/src/runtime/stubs3.go
index 95eecc7eca..1885d32051 100644
--- a/src/runtime/stubs3.go
+++ b/src/runtime/stubs3.go
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !aix
+// +build !darwin
+// +build !freebsd
+// +build !openbsd
// +build !plan9
// +build !solaris
-// +build !freebsd
-// +build !darwin
-// +build !aix
package runtime
diff --git a/src/runtime/stubs_386.go b/src/runtime/stubs_386.go
index 5108294d83..300f167fff 100644
--- a/src/runtime/stubs_386.go
+++ b/src/runtime/stubs_386.go
@@ -15,3 +15,6 @@ func stackcheck()
// Called from assembly only; declared for go vet.
func setldt(slot uintptr, base unsafe.Pointer, size uintptr)
func emptyfunc()
+
+//go:noescape
+func asmcgocall_no_g(fn, arg unsafe.Pointer)
diff --git a/src/runtime/stubs_amd64.go b/src/runtime/stubs_amd64.go
index 8c14bc2271..bf98493e9d 100644
--- a/src/runtime/stubs_amd64.go
+++ b/src/runtime/stubs_amd64.go
@@ -4,6 +4,8 @@
package runtime
+import "unsafe"
+
// Called from compiled code; declared for vet; do NOT call from Go.
func gcWriteBarrierCX()
func gcWriteBarrierDX()
@@ -35,3 +37,6 @@ func retpolineR12()
func retpolineR13()
func retpolineR14()
func retpolineR15()
+
+//go:noescape
+func asmcgocall_no_g(fn, arg unsafe.Pointer)
diff --git a/src/runtime/stubs_arm.go b/src/runtime/stubs_arm.go
index c13bf16de2..52c32937ae 100644
--- a/src/runtime/stubs_arm.go
+++ b/src/runtime/stubs_arm.go
@@ -4,6 +4,8 @@
package runtime
+import "unsafe"
+
// Called from compiler-generated code; declared for go vet.
func udiv()
func _div()
@@ -18,3 +20,6 @@ func save_g()
func emptyfunc()
func _initcgo()
func read_tls_fallback()
+
+//go:noescape
+func asmcgocall_no_g(fn, arg unsafe.Pointer)
diff --git a/src/runtime/stubs_arm64.go b/src/runtime/stubs_arm64.go
index 44c566e602..f5e3bb4854 100644
--- a/src/runtime/stubs_arm64.go
+++ b/src/runtime/stubs_arm64.go
@@ -4,6 +4,13 @@
package runtime
+import "unsafe"
+
// Called from assembly only; declared for go vet.
func load_g()
func save_g()
+
+//go:noescape
+func asmcgocall_no_g(fn, arg unsafe.Pointer)
+
+func emptyfunc()
diff --git a/src/runtime/stubs_mips64x.go b/src/runtime/stubs_mips64x.go
index 4e62c1ce90..652e7a9e34 100644
--- a/src/runtime/stubs_mips64x.go
+++ b/src/runtime/stubs_mips64x.go
@@ -6,6 +6,11 @@
package runtime
+import "unsafe"
+
// Called from assembly only; declared for go vet.
func load_g()
func save_g()
+
+//go:noescape
+func asmcgocall_no_g(fn, arg unsafe.Pointer)
diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go
index 7667f23f1d..00f802aaa7 100644
--- a/src/runtime/symtab.go
+++ b/src/runtime/symtab.go
@@ -308,30 +308,53 @@ type funcID uint8
const (
funcID_normal funcID = iota // not a special function
- funcID_runtime_main
+ funcID_asmcgocall
+ funcID_asyncPreempt
+ funcID_cgocallback
+ funcID_debugCallV1
+ funcID_externalthreadhandler
+ funcID_gcBgMarkWorker
funcID_goexit
+ funcID_gogo
+ funcID_gopanic
+ funcID_handleAsyncEvent
funcID_jmpdefer
funcID_mcall
funcID_morestack
funcID_mstart
+ funcID_panicwrap
funcID_rt0_go
- funcID_asmcgocall
- funcID_sigpanic
funcID_runfinq
- funcID_gcBgMarkWorker
- funcID_systemstack_switch
+ funcID_runtime_main
+ funcID_sigpanic
funcID_systemstack
- funcID_cgocallback
- funcID_gogo
- funcID_externalthreadhandler
- funcID_debugCallV1
- funcID_gopanic
- funcID_panicwrap
- funcID_handleAsyncEvent
- funcID_asyncPreempt
+ funcID_systemstack_switch
funcID_wrapper // any autogenerated code (hash/eq algorithms, method wrappers, etc.)
)
+// A FuncFlag holds bits about a function.
+// This list must match the list in cmd/internal/objabi/funcid.go.
+type funcFlag uint8
+
+const (
+ // TOPFRAME indicates a function that appears at the top of its stack.
+ // The traceback routine stop at such a function and consider that a
+ // successful, complete traversal of the stack.
+ // Examples of TOPFRAME functions include goexit, which appears
+ // at the top of a user goroutine stack, and mstart, which appears
+ // at the top of a system goroutine stack.
+ funcFlag_TOPFRAME funcFlag = 1 << iota
+
+ // SPWRITE indicates a function that writes an arbitrary value to SP
+ // (any write other than adding or subtracting a constant amount).
+ // The traceback routines cannot encode such changes into the
+ // pcsp tables, so the function traceback cannot safely unwind past
+ // SPWRITE functions. Stopping at an SPWRITE function is considered
+ // to be an incomplete unwinding of the stack. In certain contexts
+ // (in particular garbage collector stack scans) that is a fatal error.
+ funcFlag_SPWRITE
+)
+
// pcHeader holds data used by the pclntab lookups.
type pcHeader struct {
magic uint32 // 0xFFFFFFFA
diff --git a/src/runtime/sys_darwin.go b/src/runtime/sys_darwin.go
index c89ce78012..dacce2ee1a 100644
--- a/src/runtime/sys_darwin.go
+++ b/src/runtime/sys_darwin.go
@@ -6,50 +6,6 @@ package runtime
import "unsafe"
-// Call fn with arg as its argument. Return what fn returns.
-// fn is the raw pc value of the entry point of the desired function.
-// Switches to the system stack, if not already there.
-// Preserves the calling point as the location where a profiler traceback will begin.
-//go:nosplit
-func libcCall(fn, arg unsafe.Pointer) int32 {
- // Leave caller's PC/SP/G around for traceback.
- gp := getg()
- var mp *m
- if gp != nil {
- mp = gp.m
- }
- if mp != nil && mp.libcallsp == 0 {
- mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
- // sp must be the last, because once async cpu profiler finds
- // all three values to be non-zero, it will use them
- mp.libcallsp = getcallersp()
- } else {
- // Make sure we don't reset libcallsp. This makes
- // libcCall reentrant; We remember the g/pc/sp for the
- // first call on an M, until that libcCall instance
- // returns. Reentrance only matters for signals, as
- // libc never calls back into Go. The tricky case is
- // where we call libcX from an M and record g/pc/sp.
- // Before that call returns, a signal arrives on the
- // same M and the signal handling code calls another
- // libc function. We don't want that second libcCall
- // from within the handler to be recorded, and we
- // don't want that call's completion to zero
- // libcallsp.
- // We don't need to set libcall* while we're in a sighandler
- // (even if we're not currently in libc) because we block all
- // signals while we're handling a signal. That includes the
- // profile signal, which is the one that uses the libcall* info.
- mp = nil
- }
- res := asmcgocall(fn, arg)
- if mp != nil {
- mp.libcallsp = 0
- }
- return res
-}
-
// The X versions of syscall expect the libc call to return a 64-bit result.
// Otherwise (the non-X version) expects a 32-bit result.
// This distinction is required because an error is indicated by returning -1,
@@ -273,6 +229,12 @@ func usleep_trampoline()
//go:nosplit
//go:cgo_unsafe_args
+func usleep_no_g(usec uint32) {
+ asmcgocall_no_g(unsafe.Pointer(funcPC(usleep_trampoline)), unsafe.Pointer(&usec))
+}
+
+//go:nosplit
+//go:cgo_unsafe_args
func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
return libcCall(unsafe.Pointer(funcPC(write_trampoline)), unsafe.Pointer(&fd))
}
@@ -467,7 +429,7 @@ func setNonblock(fd int32) {
//go:cgo_import_dynamic libc_pthread_create pthread_create "/usr/lib/libSystem.B.dylib"
//go:cgo_import_dynamic libc_pthread_self pthread_self "/usr/lib/libSystem.B.dylib"
//go:cgo_import_dynamic libc_pthread_kill pthread_kill "/usr/lib/libSystem.B.dylib"
-//go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib"
+//go:cgo_import_dynamic libc_exit _exit "/usr/lib/libSystem.B.dylib"
//go:cgo_import_dynamic libc_raise raise "/usr/lib/libSystem.B.dylib"
//go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib"
diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s
index 630fb5df64..0fe8c7e172 100644
--- a/src/runtime/sys_darwin_amd64.s
+++ b/src/runtime/sys_darwin_amd64.s
@@ -5,6 +5,8 @@
// System calls and other sys.stuff for AMD64, Darwin
// System calls are implemented in libSystem, this file contains
// trampolines that convert from Go to C calling convention.
+// The trampolines are ABIInternal as they are referenced from
+// Go code with funcPC.
#include "go_asm.h"
#include "go_tls.h"
@@ -13,7 +15,7 @@
#define CLOCK_REALTIME 0
// Exit the entire program (like C exit)
-TEXT runtime·exit_trampoline(SB),NOSPLIT,$0
+TEXT runtime·exit_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVL 0(DI), DI // arg 1 exit status
@@ -22,7 +24,7 @@ TEXT runtime·exit_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·open_trampoline(SB),NOSPLIT,$0
+TEXT runtime·open_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVL 8(DI), SI // arg 2 flags
@@ -33,7 +35,7 @@ TEXT runtime·open_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·close_trampoline(SB),NOSPLIT,$0
+TEXT runtime·close_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVL 0(DI), DI // arg 1 fd
@@ -41,7 +43,7 @@ TEXT runtime·close_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·read_trampoline(SB),NOSPLIT,$0
+TEXT runtime·read_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 buf
@@ -57,7 +59,7 @@ noerr:
POPQ BP
RET
-TEXT runtime·write_trampoline(SB),NOSPLIT,$0
+TEXT runtime·write_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 buf
@@ -73,7 +75,7 @@ noerr:
POPQ BP
RET
-TEXT runtime·pipe_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pipe_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
CALL libc_pipe(SB) // pointer already in DI
@@ -84,7 +86,7 @@ TEXT runtime·pipe_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$0
+TEXT runtime·setitimer_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 new
@@ -94,7 +96,7 @@ TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·madvise_trampoline(SB), NOSPLIT, $0
+TEXT runtime·madvise_trampoline<ABIInternal>(SB), NOSPLIT, $0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 len
@@ -105,12 +107,12 @@ TEXT runtime·madvise_trampoline(SB), NOSPLIT, $0
POPQ BP
RET
-TEXT runtime·mlock_trampoline(SB), NOSPLIT, $0
+TEXT runtime·mlock_trampoline<ABIInternal>(SB), NOSPLIT, $0
UNDEF // unimplemented
GLOBL timebase<>(SB),NOPTR,$(machTimebaseInfo__size)
-TEXT runtime·nanotime_trampoline(SB),NOSPLIT,$0
+TEXT runtime·nanotime_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ DI, BX
@@ -139,7 +141,7 @@ initialized:
POPQ BP
RET
-TEXT runtime·walltime_trampoline(SB),NOSPLIT,$0
+TEXT runtime·walltime_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP // make a frame; keep stack aligned
MOVQ SP, BP
MOVQ DI, SI // arg 2 timespec
@@ -148,7 +150,7 @@ TEXT runtime·walltime_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$0
+TEXT runtime·sigaction_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 new
@@ -161,7 +163,7 @@ TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$0
+TEXT runtime·sigprocmask_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 new
@@ -174,7 +176,7 @@ TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0
+TEXT runtime·sigaltstack_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 old
@@ -186,7 +188,7 @@ TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·raiseproc_trampoline(SB),NOSPLIT,$0
+TEXT runtime·raiseproc_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVL 0(DI), BX // signal
@@ -212,7 +214,7 @@ TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
// This is the function registered during sigaction and is invoked when
// a signal is received. It just redirects to the Go function sigtrampgo.
-TEXT runtime·sigtramp(SB),NOSPLIT,$0
+TEXT runtime·sigtramp<ABIInternal>(SB),NOSPLIT,$0
// This runs on the signal stack, so we have lots of stack available.
// We allocate our own stack space, because if we tell the linker
// how much we're using, the NOSPLIT check fails.
@@ -246,7 +248,7 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$0
// Used instead of sigtramp in programs that use cgo.
// Arguments from kernel are in DI, SI, DX.
-TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
+TEXT runtime·cgoSigtramp<ABIInternal>(SB),NOSPLIT,$0
// If no traceback function, do usual sigtramp.
MOVQ runtime·cgoTraceback(SB), AX
TESTQ AX, AX
@@ -289,12 +291,12 @@ TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0
// The first three arguments, and the fifth, are already in registers.
// Set the two remaining arguments now.
MOVQ runtime·cgoTraceback(SB), CX
- MOVQ $runtime·sigtramp(SB), R9
+ MOVQ $runtime·sigtramp<ABIInternal>(SB), R9
MOVQ _cgo_callers(SB), AX
JMP AX
sigtramp:
- JMP runtime·sigtramp(SB)
+ JMP runtime·sigtramp<ABIInternal>(SB)
sigtrampnog:
// Signal arrived on a non-Go thread. If this is SIGPROF, get a
@@ -320,7 +322,7 @@ sigtrampnog:
MOVQ _cgo_callers(SB), AX
JMP AX
-TEXT runtime·mmap_trampoline(SB),NOSPLIT,$0
+TEXT runtime·mmap_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP // make a frame; keep stack aligned
MOVQ SP, BP
MOVQ DI, BX
@@ -343,7 +345,7 @@ ok:
POPQ BP
RET
-TEXT runtime·munmap_trampoline(SB),NOSPLIT,$0
+TEXT runtime·munmap_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 len
@@ -355,7 +357,7 @@ TEXT runtime·munmap_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·usleep_trampoline(SB),NOSPLIT,$0
+TEXT runtime·usleep_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVL 0(DI), DI // arg 1 usec
@@ -367,7 +369,7 @@ TEXT runtime·settls(SB),NOSPLIT,$32
// Nothing to do on Darwin, pthread already set thread-local storage up.
RET
-TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$0
+TEXT runtime·sysctl_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVL 8(DI), SI // arg 2 miblen
@@ -380,7 +382,7 @@ TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·sysctlbyname_trampoline(SB),NOSPLIT,$0
+TEXT runtime·sysctlbyname_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 oldp
@@ -392,14 +394,14 @@ TEXT runtime·sysctlbyname_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·kqueue_trampoline(SB),NOSPLIT,$0
+TEXT runtime·kqueue_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
CALL libc_kqueue(SB)
POPQ BP
RET
-TEXT runtime·kevent_trampoline(SB),NOSPLIT,$0
+TEXT runtime·kevent_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 keventt
@@ -418,7 +420,7 @@ ok:
POPQ BP
RET
-TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0
+TEXT runtime·fcntl_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVL 4(DI), SI // arg 2 cmd
@@ -475,7 +477,7 @@ TEXT runtime·mstart_stub(SB),NOSPLIT,$0
// A pointer to the arguments is passed in DI.
// A single int32 result is returned in AX.
// (For more results, make an args/results structure.)
-TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_attr_init_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP // make frame, keep stack 16-byte aligned.
MOVQ SP, BP
MOVQ 0(DI), DI // arg 1 attr
@@ -483,7 +485,7 @@ TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_attr_getstacksize_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 size
@@ -492,7 +494,7 @@ TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_attr_setdetachstate_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 state
@@ -501,7 +503,7 @@ TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_create_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
SUBQ $16, SP
@@ -514,7 +516,7 @@ TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·raise_trampoline(SB),NOSPLIT,$0
+TEXT runtime·raise_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVL 0(DI), DI // arg 1 signal
@@ -522,7 +524,7 @@ TEXT runtime·raise_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_mutex_init_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_mutex_init_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 attr
@@ -531,7 +533,7 @@ TEXT runtime·pthread_mutex_init_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_mutex_lock_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_mutex_lock_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 0(DI), DI // arg 1 mutex
@@ -539,7 +541,7 @@ TEXT runtime·pthread_mutex_lock_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_mutex_unlock_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_mutex_unlock_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 0(DI), DI // arg 1 mutex
@@ -547,7 +549,7 @@ TEXT runtime·pthread_mutex_unlock_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_cond_init_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_cond_init_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 attr
@@ -556,7 +558,7 @@ TEXT runtime·pthread_cond_init_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_cond_wait_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_cond_wait_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 mutex
@@ -565,7 +567,7 @@ TEXT runtime·pthread_cond_wait_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_cond_timedwait_relative_np_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_cond_timedwait_relative_np_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 mutex
@@ -575,7 +577,7 @@ TEXT runtime·pthread_cond_timedwait_relative_np_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_cond_signal_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 0(DI), DI // arg 1 cond
@@ -583,7 +585,7 @@ TEXT runtime·pthread_cond_signal_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_self_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ DI, BX // BX is caller-save
@@ -592,7 +594,7 @@ TEXT runtime·pthread_self_trampoline(SB),NOSPLIT,$0
POPQ BP
RET
-TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0
+TEXT runtime·pthread_kill_trampoline<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
MOVQ 8(DI), SI // arg 2 sig
@@ -617,7 +619,7 @@ TEXT runtime·pthread_kill_trampoline(SB),NOSPLIT,$0
//
// syscall expects a 32-bit result and tests for 32-bit -1
// to decide there was an error.
-TEXT runtime·syscall(SB),NOSPLIT,$0
+TEXT runtime·syscall<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
SUBQ $16, SP
@@ -667,7 +669,7 @@ ok:
//
// syscallX is like syscall but expects a 64-bit result
// and tests for 64-bit -1 to decide there was an error.
-TEXT runtime·syscallX(SB),NOSPLIT,$0
+TEXT runtime·syscallX<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
SUBQ $16, SP
@@ -703,7 +705,7 @@ ok:
// syscallPtr is like syscallX except that the libc function reports an
// error by returning NULL and setting errno.
-TEXT runtime·syscallPtr(SB),NOSPLIT,$0
+TEXT runtime·syscallPtr<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
SUBQ $16, SP
@@ -756,7 +758,7 @@ ok:
//
// syscall6 expects a 32-bit result and tests for 32-bit -1
// to decide there was an error.
-TEXT runtime·syscall6(SB),NOSPLIT,$0
+TEXT runtime·syscall6<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
SUBQ $16, SP
@@ -809,7 +811,7 @@ ok:
//
// syscall6X is like syscall6 but expects a 64-bit result
// and tests for 64-bit -1 to decide there was an error.
-TEXT runtime·syscall6X(SB),NOSPLIT,$0
+TEXT runtime·syscall6X<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
SUBQ $16, SP
@@ -845,7 +847,7 @@ ok:
// syscallNoErr is like syscall6 but does not check for errors, and
// only returns one value, for use with standard C ABI library functions.
-TEXT runtime·syscallNoErr(SB),NOSPLIT,$0
+TEXT runtime·syscallNoErr<ABIInternal>(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
SUBQ $16, SP
diff --git a/src/runtime/sys_freebsd_386.s b/src/runtime/sys_freebsd_386.s
index c346e719e1..97e6d9ab36 100644
--- a/src/runtime/sys_freebsd_386.s
+++ b/src/runtime/sys_freebsd_386.s
@@ -13,12 +13,16 @@
TEXT runtime·sys_umtx_op(SB),NOSPLIT,$-4
MOVL $454, AX
INT $0x80
+ JAE 2(PC)
+ NEGL AX
MOVL AX, ret+20(FP)
RET
TEXT runtime·thr_new(SB),NOSPLIT,$-4
MOVL $455, AX
INT $0x80
+ JAE 2(PC)
+ NEGL AX
MOVL AX, ret+8(FP)
RET
@@ -120,6 +124,8 @@ TEXT runtime·pipe2(SB),NOSPLIT,$12-16
MOVL flags+0(FP), BX
MOVL BX, 8(SP)
INT $0x80
+ JAE 2(PC)
+ NEGL AX
MOVL AX, errno+12(FP)
RET
diff --git a/src/runtime/sys_freebsd_amd64.s b/src/runtime/sys_freebsd_amd64.s
index 010b2ec4d4..07734b0d7d 100644
--- a/src/runtime/sys_freebsd_amd64.s
+++ b/src/runtime/sys_freebsd_amd64.s
@@ -18,6 +18,8 @@ TEXT runtime·sys_umtx_op(SB),NOSPLIT,$0
MOVQ ut+24(FP), R8
MOVL $454, AX
SYSCALL
+ JCC 2(PC)
+ NEGQ AX
MOVL AX, ret+32(FP)
RET
@@ -26,6 +28,8 @@ TEXT runtime·thr_new(SB),NOSPLIT,$0
MOVL size+8(FP), SI
MOVL $455, AX
SYSCALL
+ JCC 2(PC)
+ NEGQ AX
MOVL AX, ret+16(FP)
RET
@@ -118,6 +122,8 @@ TEXT runtime·pipe2(SB),NOSPLIT,$0-20
MOVL flags+0(FP), SI
MOVL $542, AX
SYSCALL
+ JCC 2(PC)
+ NEGQ AX
MOVL AX, errno+16(FP)
RET
diff --git a/src/runtime/sys_freebsd_arm.s b/src/runtime/sys_freebsd_arm.s
index 1e12f9cfcb..b12e47c576 100644
--- a/src/runtime/sys_freebsd_arm.s
+++ b/src/runtime/sys_freebsd_arm.s
@@ -51,6 +51,7 @@ TEXT runtime·sys_umtx_op(SB),NOSPLIT,$0
ADD $20, R13 // arg 5 is passed on stack
MOVW $SYS__umtx_op, R7
SWI $0
+ RSB.CS $0, R0
SUB $20, R13
// BCS error
MOVW R0, ret+20(FP)
@@ -61,6 +62,7 @@ TEXT runtime·thr_new(SB),NOSPLIT,$0
MOVW size+4(FP), R1
MOVW $SYS_thr_new, R7
SWI $0
+ RSB.CS $0, R0
MOVW R0, ret+8(FP)
RET
@@ -144,6 +146,7 @@ TEXT runtime·pipe2(SB),NOSPLIT,$0-16
MOVW flags+0(FP), R1
MOVW $SYS_pipe2, R7
SWI $0
+ RSB.CS $0, R0
MOVW R0, errno+12(FP)
RET
diff --git a/src/runtime/sys_freebsd_arm64.s b/src/runtime/sys_freebsd_arm64.s
index 8a4f9b7fa1..1aa09e87ca 100644
--- a/src/runtime/sys_freebsd_arm64.s
+++ b/src/runtime/sys_freebsd_arm64.s
@@ -60,6 +60,9 @@ TEXT runtime·sys_umtx_op(SB),NOSPLIT,$0
MOVD ut+24(FP), R4
MOVD $SYS__umtx_op, R8
SVC
+ BCC ok
+ NEG R0, R0
+ok:
MOVW R0, ret+32(FP)
RET
@@ -69,6 +72,9 @@ TEXT runtime·thr_new(SB),NOSPLIT,$0
MOVW size+8(FP), R1
MOVD $SYS_thr_new, R8
SVC
+ BCC ok
+ NEG R0, R0
+ok:
MOVW R0, ret+16(FP)
RET
diff --git a/src/runtime/sys_libc.go b/src/runtime/sys_libc.go
new file mode 100644
index 0000000000..996c032105
--- /dev/null
+++ b/src/runtime/sys_libc.go
@@ -0,0 +1,53 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin openbsd,amd64 openbsd,arm64
+
+package runtime
+
+import "unsafe"
+
+// Call fn with arg as its argument. Return what fn returns.
+// fn is the raw pc value of the entry point of the desired function.
+// Switches to the system stack, if not already there.
+// Preserves the calling point as the location where a profiler traceback will begin.
+//go:nosplit
+func libcCall(fn, arg unsafe.Pointer) int32 {
+ // Leave caller's PC/SP/G around for traceback.
+ gp := getg()
+ var mp *m
+ if gp != nil {
+ mp = gp.m
+ }
+ if mp != nil && mp.libcallsp == 0 {
+ mp.libcallg.set(gp)
+ mp.libcallpc = getcallerpc()
+ // sp must be the last, because once async cpu profiler finds
+ // all three values to be non-zero, it will use them
+ mp.libcallsp = getcallersp()
+ } else {
+ // Make sure we don't reset libcallsp. This makes
+ // libcCall reentrant; We remember the g/pc/sp for the
+ // first call on an M, until that libcCall instance
+ // returns. Reentrance only matters for signals, as
+ // libc never calls back into Go. The tricky case is
+ // where we call libcX from an M and record g/pc/sp.
+ // Before that call returns, a signal arrives on the
+ // same M and the signal handling code calls another
+ // libc function. We don't want that second libcCall
+ // from within the handler to be recorded, and we
+ // don't want that call's completion to zero
+ // libcallsp.
+ // We don't need to set libcall* while we're in a sighandler
+ // (even if we're not currently in libc) because we block all
+ // signals while we're handling a signal. That includes the
+ // profile signal, which is the one that uses the libcall* info.
+ mp = nil
+ }
+ res := asmcgocall(fn, arg)
+ if mp != nil {
+ mp.libcallsp = 0
+ }
+ return res
+}
diff --git a/src/runtime/sys_linux_amd64.s b/src/runtime/sys_linux_amd64.s
index 37cb8dad03..d48573c2c5 100644
--- a/src/runtime/sys_linux_amd64.s
+++ b/src/runtime/sys_linux_amd64.s
@@ -215,9 +215,13 @@ TEXT runtime·walltime1(SB),NOSPLIT,$16-12
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
+#ifdef GOEXPERIMENT_REGABI
+ MOVQ g_m(R14), BX // BX unchanged by C code.
+#else
get_tls(CX)
MOVQ g(CX), AX
MOVQ g_m(AX), BX // BX unchanged by C code.
+#endif
// Set vdsoPC and vdsoSP for SIGPROF traceback.
// Save the old values on stack and restore them on exit,
@@ -232,7 +236,11 @@ TEXT runtime·walltime1(SB),NOSPLIT,$16-12
MOVQ CX, m_vdsoPC(BX)
MOVQ DX, m_vdsoSP(BX)
+#ifdef GOEXPERIMENT_REGABI
+ CMPQ R14, m_curg(BX) // Only switch if on curg.
+#else
CMPQ AX, m_curg(BX) // Only switch if on curg.
+#endif
JNE noswitch
MOVQ m_g0(BX), DX
@@ -275,9 +283,13 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
MOVQ SP, R12 // Save old SP; R12 unchanged by C code.
+#ifdef GOEXPERIMENT_REGABI
+ MOVQ g_m(R14), BX // BX unchanged by C code.
+#else
get_tls(CX)
MOVQ g(CX), AX
MOVQ g_m(AX), BX // BX unchanged by C code.
+#endif
// Set vdsoPC and vdsoSP for SIGPROF traceback.
// Save the old values on stack and restore them on exit,
@@ -292,7 +304,11 @@ TEXT runtime·nanotime1(SB),NOSPLIT,$16-8
MOVQ CX, m_vdsoPC(BX)
MOVQ DX, m_vdsoSP(BX)
+#ifdef GOEXPERIMENT_REGABI
+ CMPQ R14, m_curg(BX) // Only switch if on curg.
+#else
CMPQ AX, m_curg(BX) // Only switch if on curg.
+#endif
JNE noswitch
MOVQ m_g0(BX), DX
@@ -632,6 +648,7 @@ nog1:
get_tls(CX)
MOVQ R13, g_m(R9)
MOVQ R9, g(CX)
+ MOVQ R9, R14 // set g register
CALL runtime·stackcheck(SB)
nog2:
diff --git a/src/runtime/sys_linux_mips64x.s b/src/runtime/sys_linux_mips64x.s
index afad056d06..c3e9f37694 100644
--- a/src/runtime/sys_linux_mips64x.s
+++ b/src/runtime/sys_linux_mips64x.s
@@ -250,6 +250,14 @@ noswitch:
BEQ R25, fallback
JAL (R25)
+ // check on vdso call return for kernel compatibility
+ // see https://golang.org/issues/39046
+ // if we get any error make fallback permanent.
+ BEQ R2, R0, finish
+ MOVV R0, runtime·vdsoClockgettimeSym(SB)
+ MOVW $0, R4 // CLOCK_REALTIME
+ MOVV $0(R29), R5
+ JMP fallback
finish:
MOVV 0(R29), R3 // sec
@@ -311,6 +319,12 @@ noswitch:
BEQ R25, fallback
JAL (R25)
+ // see walltime1 for detail
+ BEQ R2, R0, finish
+ MOVV R0, runtime·vdsoClockgettimeSym(SB)
+ MOVW $1, R4 // CLOCK_MONOTONIC
+ MOVV $0(R29), R5
+ JMP fallback
finish:
MOVV 0(R29), R3 // sec
diff --git a/src/runtime/sys_openbsd.go b/src/runtime/sys_openbsd.go
new file mode 100644
index 0000000000..fcddf4d6a5
--- /dev/null
+++ b/src/runtime/sys_openbsd.go
@@ -0,0 +1,60 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build openbsd,amd64 openbsd,arm64
+
+package runtime
+
+import "unsafe"
+
+// The *_trampoline functions convert from the Go calling convention to the C calling convention
+// and then call the underlying libc function. These are defined in sys_openbsd_$ARCH.s.
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_attr_init(attr *pthreadattr) int32 {
+ return libcCall(unsafe.Pointer(funcPC(pthread_attr_init_trampoline)), unsafe.Pointer(&attr))
+}
+func pthread_attr_init_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_attr_destroy(attr *pthreadattr) int32 {
+ return libcCall(unsafe.Pointer(funcPC(pthread_attr_destroy_trampoline)), unsafe.Pointer(&attr))
+}
+func pthread_attr_destroy_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32 {
+ return libcCall(unsafe.Pointer(funcPC(pthread_attr_getstacksize_trampoline)), unsafe.Pointer(&attr))
+}
+func pthread_attr_getstacksize_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32 {
+ return libcCall(unsafe.Pointer(funcPC(pthread_attr_setdetachstate_trampoline)), unsafe.Pointer(&attr))
+}
+func pthread_attr_setdetachstate_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32 {
+ return libcCall(unsafe.Pointer(funcPC(pthread_create_trampoline)), unsafe.Pointer(&attr))
+}
+func pthread_create_trampoline()
+
+// Tell the linker that the libc_* functions are to be found
+// in a system library, with the libc_ prefix missing.
+
+//go:cgo_import_dynamic libc_pthread_attr_init pthread_attr_init "libpthread.so"
+//go:cgo_import_dynamic libc_pthread_attr_destroy pthread_attr_destroy "libpthread.so"
+//go:cgo_import_dynamic libc_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so"
+//go:cgo_import_dynamic libc_pthread_attr_setdetachstate pthread_attr_setdetachstate "libpthread.so"
+//go:cgo_import_dynamic libc_pthread_create pthread_create "libpthread.so"
+//go:cgo_import_dynamic libc_pthread_sigmask pthread_sigmask "libpthread.so"
+
+//go:cgo_import_dynamic _ _ "libpthread.so"
+//go:cgo_import_dynamic _ _ "libc.so"
diff --git a/src/runtime/sys_openbsd1.go b/src/runtime/sys_openbsd1.go
new file mode 100644
index 0000000000..44c7871ceb
--- /dev/null
+++ b/src/runtime/sys_openbsd1.go
@@ -0,0 +1,39 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build openbsd,amd64 openbsd,arm64
+
+package runtime
+
+import "unsafe"
+
+//go:nosplit
+//go:cgo_unsafe_args
+func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32 {
+ return libcCall(unsafe.Pointer(funcPC(thrsleep_trampoline)), unsafe.Pointer(&ident))
+}
+func thrsleep_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func thrwakeup(ident uintptr, n int32) int32 {
+ return libcCall(unsafe.Pointer(funcPC(thrwakeup_trampoline)), unsafe.Pointer(&ident))
+}
+func thrwakeup_trampoline()
+
+func osyield() {
+ libcCall(unsafe.Pointer(funcPC(sched_yield_trampoline)), unsafe.Pointer(nil))
+}
+func sched_yield_trampoline()
+
+//go:nosplit
+func osyield_no_g() {
+ asmcgocall_no_g(unsafe.Pointer(funcPC(sched_yield_trampoline)), unsafe.Pointer(nil))
+}
+
+//go:cgo_import_dynamic libc_thrsleep __thrsleep "libc.so"
+//go:cgo_import_dynamic libc_thrwakeup __thrwakeup "libc.so"
+//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so"
+
+//go:cgo_import_dynamic _ _ "libc.so"
diff --git a/src/runtime/sys_openbsd2.go b/src/runtime/sys_openbsd2.go
new file mode 100644
index 0000000000..33032596c3
--- /dev/null
+++ b/src/runtime/sys_openbsd2.go
@@ -0,0 +1,256 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build openbsd,amd64 openbsd,arm64
+
+package runtime
+
+import "unsafe"
+
+// This is exported via linkname to assembly in runtime/cgo.
+//go:linkname exit
+//go:nosplit
+//go:cgo_unsafe_args
+func exit(code int32) {
+ libcCall(unsafe.Pointer(funcPC(exit_trampoline)), unsafe.Pointer(&code))
+}
+func exit_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func getthrid() (tid int32) {
+ libcCall(unsafe.Pointer(funcPC(getthrid_trampoline)), unsafe.Pointer(&tid))
+ return
+}
+func getthrid_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func raiseproc(sig uint32) {
+ libcCall(unsafe.Pointer(funcPC(raiseproc_trampoline)), unsafe.Pointer(&sig))
+}
+func raiseproc_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func thrkill(tid int32, sig int) {
+ libcCall(unsafe.Pointer(funcPC(thrkill_trampoline)), unsafe.Pointer(&tid))
+}
+func thrkill_trampoline()
+
+// mmap is used to do low-level memory allocation via mmap. Don't allow stack
+// splits, since this function (used by sysAlloc) is called in a lot of low-level
+// parts of the runtime and callers often assume it won't acquire any locks.
+// go:nosplit
+func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (unsafe.Pointer, int) {
+ args := struct {
+ addr unsafe.Pointer
+ n uintptr
+ prot, flags, fd int32
+ off uint32
+ ret1 unsafe.Pointer
+ ret2 int
+ }{addr, n, prot, flags, fd, off, nil, 0}
+ libcCall(unsafe.Pointer(funcPC(mmap_trampoline)), unsafe.Pointer(&args))
+ return args.ret1, args.ret2
+}
+func mmap_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func munmap(addr unsafe.Pointer, n uintptr) {
+ libcCall(unsafe.Pointer(funcPC(munmap_trampoline)), unsafe.Pointer(&addr))
+}
+func munmap_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func madvise(addr unsafe.Pointer, n uintptr, flags int32) {
+ libcCall(unsafe.Pointer(funcPC(madvise_trampoline)), unsafe.Pointer(&addr))
+}
+func madvise_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func open(name *byte, mode, perm int32) (ret int32) {
+ return libcCall(unsafe.Pointer(funcPC(open_trampoline)), unsafe.Pointer(&name))
+}
+func open_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func closefd(fd int32) int32 {
+ return libcCall(unsafe.Pointer(funcPC(close_trampoline)), unsafe.Pointer(&fd))
+}
+func close_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func read(fd int32, p unsafe.Pointer, n int32) int32 {
+ return libcCall(unsafe.Pointer(funcPC(read_trampoline)), unsafe.Pointer(&fd))
+}
+func read_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
+ return libcCall(unsafe.Pointer(funcPC(write_trampoline)), unsafe.Pointer(&fd))
+}
+func write_trampoline()
+
+func pipe() (r, w int32, errno int32) {
+ return pipe2(0)
+}
+
+func pipe2(flags int32) (r, w int32, errno int32) {
+ var p [2]int32
+ args := struct {
+ p unsafe.Pointer
+ flags int32
+ }{noescape(unsafe.Pointer(&p)), flags}
+ errno = libcCall(unsafe.Pointer(funcPC(pipe2_trampoline)), unsafe.Pointer(&args))
+ return p[0], p[1], errno
+}
+func pipe2_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func setitimer(mode int32, new, old *itimerval) {
+ libcCall(unsafe.Pointer(funcPC(setitimer_trampoline)), unsafe.Pointer(&mode))
+}
+func setitimer_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func usleep(usec uint32) {
+ libcCall(unsafe.Pointer(funcPC(usleep_trampoline)), unsafe.Pointer(&usec))
+}
+func usleep_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func usleep_no_g(usec uint32) {
+ asmcgocall_no_g(unsafe.Pointer(funcPC(usleep_trampoline)), unsafe.Pointer(&usec))
+}
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 {
+ return libcCall(unsafe.Pointer(funcPC(sysctl_trampoline)), unsafe.Pointer(&mib))
+}
+func sysctl_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func fcntl(fd, cmd, arg int32) int32 {
+ return libcCall(unsafe.Pointer(funcPC(fcntl_trampoline)), unsafe.Pointer(&fd))
+}
+func fcntl_trampoline()
+
+//go:nosplit
+func nanotime1() int64 {
+ var ts timespec
+ args := struct {
+ clock_id int32
+ tp unsafe.Pointer
+ }{_CLOCK_MONOTONIC, unsafe.Pointer(&ts)}
+ libcCall(unsafe.Pointer(funcPC(clock_gettime_trampoline)), unsafe.Pointer(&args))
+ return ts.tv_sec*1e9 + int64(ts.tv_nsec)
+}
+func clock_gettime_trampoline()
+
+//go:nosplit
+func walltime1() (int64, int32) {
+ var ts timespec
+ args := struct {
+ clock_id int32
+ tp unsafe.Pointer
+ }{_CLOCK_REALTIME, unsafe.Pointer(&ts)}
+ libcCall(unsafe.Pointer(funcPC(clock_gettime_trampoline)), unsafe.Pointer(&args))
+ return ts.tv_sec, int32(ts.tv_nsec)
+}
+
+//go:nosplit
+//go:cgo_unsafe_args
+func kqueue() int32 {
+ return libcCall(unsafe.Pointer(funcPC(kqueue_trampoline)), nil)
+}
+func kqueue_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 {
+ return libcCall(unsafe.Pointer(funcPC(kevent_trampoline)), unsafe.Pointer(&kq))
+}
+func kevent_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sigaction(sig uint32, new *sigactiont, old *sigactiont) {
+ libcCall(unsafe.Pointer(funcPC(sigaction_trampoline)), unsafe.Pointer(&sig))
+}
+func sigaction_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sigprocmask(how uint32, new *sigset, old *sigset) {
+ libcCall(unsafe.Pointer(funcPC(sigprocmask_trampoline)), unsafe.Pointer(&how))
+}
+func sigprocmask_trampoline()
+
+//go:nosplit
+//go:cgo_unsafe_args
+func sigaltstack(new *stackt, old *stackt) {
+ libcCall(unsafe.Pointer(funcPC(sigaltstack_trampoline)), unsafe.Pointer(&new))
+}
+func sigaltstack_trampoline()
+
+// Not used on OpenBSD, but must be defined.
+func exitThread(wait *uint32) {
+}
+
+//go:nosplit
+func closeonexec(fd int32) {
+ fcntl(fd, _F_SETFD, _FD_CLOEXEC)
+}
+
+//go:nosplit
+func setNonblock(fd int32) {
+ flags := fcntl(fd, _F_GETFL, 0)
+ fcntl(fd, _F_SETFL, flags|_O_NONBLOCK)
+}
+
+// Tell the linker that the libc_* functions are to be found
+// in a system library, with the libc_ prefix missing.
+
+//go:cgo_import_dynamic libc_errno __errno "libc.so"
+//go:cgo_import_dynamic libc_exit exit "libc.so"
+//go:cgo_import_dynamic libc_getthrid getthrid "libc.so"
+//go:cgo_import_dynamic libc_sched_yield sched_yield "libc.so"
+//go:cgo_import_dynamic libc_thrkill thrkill "libc.so"
+
+//go:cgo_import_dynamic libc_mmap mmap "libc.so"
+//go:cgo_import_dynamic libc_munmap munmap "libc.so"
+//go:cgo_import_dynamic libc_madvise madvise "libc.so"
+
+//go:cgo_import_dynamic libc_open open "libc.so"
+//go:cgo_import_dynamic libc_close close "libc.so"
+//go:cgo_import_dynamic libc_read read "libc.so"
+//go:cgo_import_dynamic libc_write write "libc.so"
+//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so"
+
+//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so"
+//go:cgo_import_dynamic libc_setitimer setitimer "libc.so"
+//go:cgo_import_dynamic libc_usleep usleep "libc.so"
+//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
+//go:cgo_import_dynamic libc_fcntl fcntl "libc.so"
+//go:cgo_import_dynamic libc_getpid getpid "libc.so"
+//go:cgo_import_dynamic libc_kill kill "libc.so"
+//go:cgo_import_dynamic libc_kqueue kqueue "libc.so"
+//go:cgo_import_dynamic libc_kevent kevent "libc.so"
+
+//go:cgo_import_dynamic libc_sigaction sigaction "libc.so"
+//go:cgo_import_dynamic libc_sigaltstack sigaltstack "libc.so"
+
+//go:cgo_import_dynamic _ _ "libc.so"
diff --git a/src/runtime/sys_openbsd3.go b/src/runtime/sys_openbsd3.go
new file mode 100644
index 0000000000..4d4c88e3ac
--- /dev/null
+++ b/src/runtime/sys_openbsd3.go
@@ -0,0 +1,113 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build openbsd,amd64 openbsd,arm64
+
+package runtime
+
+import "unsafe"
+
+// The X versions of syscall expect the libc call to return a 64-bit result.
+// Otherwise (the non-X version) expects a 32-bit result.
+// This distinction is required because an error is indicated by returning -1,
+// and we need to know whether to check 32 or 64 bits of the result.
+// (Some libc functions that return 32 bits put junk in the upper 32 bits of AX.)
+
+//go:linkname syscall_syscall syscall.syscall
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(funcPC(syscall)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscall()
+
+//go:linkname syscall_syscallX syscall.syscallX
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscallX(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(funcPC(syscallX)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscallX()
+
+//go:linkname syscall_syscall6 syscall.syscall6
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(funcPC(syscall6)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscall6()
+
+//go:linkname syscall_syscall6X syscall.syscall6X
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(funcPC(syscall6X)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscall6X()
+
+//go:linkname syscall_syscall10 syscall.syscall10
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(funcPC(syscall10)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscall10()
+
+//go:linkname syscall_syscall10X syscall.syscall10X
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_syscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
+ entersyscall()
+ libcCall(unsafe.Pointer(funcPC(syscall10X)), unsafe.Pointer(&fn))
+ exitsyscall()
+ return
+}
+func syscall10X()
+
+//go:linkname syscall_rawSyscall syscall.rawSyscall
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
+ libcCall(unsafe.Pointer(funcPC(syscall)), unsafe.Pointer(&fn))
+ return
+}
+
+//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ libcCall(unsafe.Pointer(funcPC(syscall6)), unsafe.Pointer(&fn))
+ return
+}
+
+//go:linkname syscall_rawSyscall6X syscall.rawSyscall6X
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_rawSyscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
+ libcCall(unsafe.Pointer(funcPC(syscall6X)), unsafe.Pointer(&fn))
+ return
+}
+
+//go:linkname syscall_rawSyscall10X syscall.rawSyscall10X
+//go:nosplit
+//go:cgo_unsafe_args
+func syscall_rawSyscall10X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2, err uintptr) {
+ libcCall(unsafe.Pointer(funcPC(syscall10X)), unsafe.Pointer(&fn))
+ return
+}
diff --git a/src/runtime/sys_openbsd_amd64.s b/src/runtime/sys_openbsd_amd64.s
index 37d70ab9aa..b3a76b57a3 100644
--- a/src/runtime/sys_openbsd_amd64.s
+++ b/src/runtime/sys_openbsd_amd64.s
@@ -2,8 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
-// System calls and other sys.stuff for AMD64, OpenBSD
-// /usr/src/sys/kern/syscalls.master for syscall numbers.
+// System calls and other sys.stuff for AMD64, OpenBSD.
+// System calls are implemented in libc/libpthread, this file
+// contains trampolines that convert from Go to C calling convention.
+// Some direct system call implementations currently remain.
//
#include "go_asm.h"
@@ -12,405 +14,775 @@
#define CLOCK_MONOTONIC $3
-// int32 tfork(void *param, uintptr psize, M *mp, G *gp, void (*fn)(void));
-TEXT runtime·tfork(SB),NOSPLIT,$32
-
- // Copy mp, gp and fn off parent stack for use by child.
- MOVQ mm+16(FP), R8
- MOVQ gg+24(FP), R9
- MOVQ fn+32(FP), R12
+TEXT runtime·settls(SB),NOSPLIT,$0
+ // Nothing to do, pthread already set thread-local storage up.
+ RET
- MOVQ param+0(FP), DI
- MOVQ psize+8(FP), SI
- MOVL $8, AX // sys___tfork
- SYSCALL
+// mstart_stub is the first function executed on a new thread started by pthread_create.
+// It just does some low-level setup and then calls mstart.
+// Note: called with the C calling convention.
+TEXT runtime·mstart_stub(SB),NOSPLIT,$0
+ // DI points to the m.
+ // We are already on m's g0 stack.
- // Return if tfork syscall failed.
- JCC 4(PC)
- NEGQ AX
- MOVL AX, ret+40(FP)
- RET
+ // Save callee-save registers.
+ SUBQ $48, SP
+ MOVQ BX, 0(SP)
+ MOVQ BP, 8(SP)
+ MOVQ R12, 16(SP)
+ MOVQ R13, 24(SP)
+ MOVQ R14, 32(SP)
+ MOVQ R15, 40(SP)
- // In parent, return.
- CMPL AX, $0
- JEQ 3(PC)
- MOVL AX, ret+40(FP)
- RET
+ // Load g and save to TLS entry.
+ // See cmd/link/internal/ld/sym.go:computeTLSOffset.
+ MOVQ m_g0(DI), DX // g
+ MOVQ DX, -8(FS)
- // Set FS to point at m->tls.
- LEAQ m_tls(R8), DI
- CALL runtime·settls(SB)
+ // Someday the convention will be D is always cleared.
+ CLD
- // In child, set up new stack.
- get_tls(CX)
- MOVQ R8, g_m(R9)
- MOVQ R9, g(CX)
- CALL runtime·stackcheck(SB)
+ CALL runtime·mstart(SB)
- // Call fn
- CALL R12
+ // Restore callee-save registers.
+ MOVQ 0(SP), BX
+ MOVQ 8(SP), BP
+ MOVQ 16(SP), R12
+ MOVQ 24(SP), R13
+ MOVQ 32(SP), R14
+ MOVQ 40(SP), R15
- // It shouldn't return. If it does, exit
- MOVQ $0, DI // arg 1 - notdead
- MOVL $302, AX // sys___threxit
- SYSCALL
- JMP -3(PC) // keep exiting
+ // Go is all done with this OS thread.
+ // Tell pthread everything is ok (we never join with this thread, so
+ // the value here doesn't really matter).
+ XORL AX, AX
-TEXT runtime·osyield(SB),NOSPLIT,$0
- MOVL $298, AX // sys_sched_yield
- SYSCALL
+ ADDQ $48, SP
RET
-TEXT runtime·thrsleep(SB),NOSPLIT,$0
- MOVQ ident+0(FP), DI // arg 1 - ident
- MOVL clock_id+8(FP), SI // arg 2 - clock_id
- MOVQ tsp+16(FP), DX // arg 3 - tp
- MOVQ lock+24(FP), R10 // arg 4 - lock
- MOVQ abort+32(FP), R8 // arg 5 - abort
- MOVL $94, AX // sys___thrsleep
- SYSCALL
- MOVL AX, ret+40(FP)
+TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
+ MOVQ fn+0(FP), AX
+ MOVL sig+8(FP), DI
+ MOVQ info+16(FP), SI
+ MOVQ ctx+24(FP), DX
+ PUSHQ BP
+ MOVQ SP, BP
+ ANDQ $~15, SP // alignment for x86_64 ABI
+ CALL AX
+ MOVQ BP, SP
+ POPQ BP
RET
-TEXT runtime·thrwakeup(SB),NOSPLIT,$0
- MOVQ ident+0(FP), DI // arg 1 - ident
- MOVL n+8(FP), SI // arg 2 - n
- MOVL $301, AX // sys___thrwakeup
- SYSCALL
- MOVL AX, ret+16(FP)
- RET
+TEXT runtime·sigtramp(SB),NOSPLIT,$72
+ // Save callee-saved C registers, since the caller may be a C signal handler.
+ MOVQ BX, bx-8(SP)
+ MOVQ BP, bp-16(SP) // save in case GOEXPERIMENT=noframepointer is set
+ MOVQ R12, r12-24(SP)
+ MOVQ R13, r13-32(SP)
+ MOVQ R14, r14-40(SP)
+ MOVQ R15, r15-48(SP)
+ // We don't save mxcsr or the x87 control word because sigtrampgo doesn't
+ // modify them.
-// Exit the entire program (like C exit)
-TEXT runtime·exit(SB),NOSPLIT,$-8
- MOVL code+0(FP), DI // arg 1 - exit status
- MOVL $1, AX // sys_exit
- SYSCALL
- MOVL $0xf1, 0xf1 // crash
+ MOVQ DX, ctx-56(SP)
+ MOVQ SI, info-64(SP)
+ MOVQ DI, signum-72(SP)
+ CALL runtime·sigtrampgo(SB)
+
+ MOVQ r15-48(SP), R15
+ MOVQ r14-40(SP), R14
+ MOVQ r13-32(SP), R13
+ MOVQ r12-24(SP), R12
+ MOVQ bp-16(SP), BP
+ MOVQ bx-8(SP), BX
RET
-// func exitThread(wait *uint32)
-TEXT runtime·exitThread(SB),NOSPLIT,$0-8
- MOVQ wait+0(FP), DI // arg 1 - notdead
- MOVL $302, AX // sys___threxit
- SYSCALL
- MOVL $0xf1, 0xf1 // crash
- JMP 0(PC)
+//
+// These trampolines help convert from Go calling convention to C calling convention.
+// They should be called with asmcgocall.
+// A pointer to the arguments is passed in DI.
+// A single int32 result is returned in AX.
+// (For more results, make an args/results structure.)
+TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 0(DI), DI // arg 1 - attr
+ CALL libc_pthread_attr_init(SB)
+ POPQ BP
+ RET
-TEXT runtime·open(SB),NOSPLIT,$-8
- MOVQ name+0(FP), DI // arg 1 pathname
- MOVL mode+8(FP), SI // arg 2 flags
- MOVL perm+12(FP), DX // arg 3 mode
- MOVL $5, AX
- SYSCALL
- JCC 2(PC)
- MOVL $-1, AX
- MOVL AX, ret+16(FP)
+TEXT runtime·pthread_attr_destroy_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 0(DI), DI // arg 1 - attr
+ CALL libc_pthread_attr_destroy(SB)
+ POPQ BP
RET
-TEXT runtime·closefd(SB),NOSPLIT,$-8
- MOVL fd+0(FP), DI // arg 1 fd
- MOVL $6, AX
- SYSCALL
- JCC 2(PC)
- MOVL $-1, AX
- MOVL AX, ret+8(FP)
+TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 - stacksize
+ MOVQ 0(DI), DI // arg 1 - attr
+ CALL libc_pthread_attr_getstacksize(SB)
+ POPQ BP
RET
-TEXT runtime·read(SB),NOSPLIT,$-8
- MOVL fd+0(FP), DI // arg 1 fd
- MOVQ p+8(FP), SI // arg 2 buf
- MOVL n+16(FP), DX // arg 3 count
- MOVL $3, AX
- SYSCALL
- JCC 2(PC)
- NEGQ AX // caller expects negative errno
- MOVL AX, ret+24(FP)
+TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 - detachstate
+ MOVQ 0(DI), DI // arg 1 - attr
+ CALL libc_pthread_attr_setdetachstate(SB)
+ POPQ BP
RET
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT,$0-12
- LEAQ r+0(FP), DI
- MOVL $263, AX
- SYSCALL
- MOVL AX, errno+8(FP)
+TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ 0(DI), SI // arg 2 - attr
+ MOVQ 8(DI), DX // arg 3 - start
+ MOVQ 16(DI), CX // arg 4 - arg
+ MOVQ SP, DI // arg 1 - &thread (discarded)
+ CALL libc_pthread_create(SB)
+ MOVQ BP, SP
+ POPQ BP
RET
-// func pipe2(flags int32) (r, w int32, errno int32)
-TEXT runtime·pipe2(SB),NOSPLIT,$0-20
- LEAQ r+8(FP), DI
- MOVL flags+0(FP), SI
- MOVL $101, AX
- SYSCALL
- MOVL AX, errno+16(FP)
+TEXT runtime·thrkill_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 8(DI), SI // arg 2 - signal
+ MOVQ $0, DX // arg 3 - tcb
+ MOVL 0(DI), DI // arg 1 - tid
+ CALL libc_thrkill(SB)
+ POPQ BP
RET
-TEXT runtime·write1(SB),NOSPLIT,$-8
- MOVQ fd+0(FP), DI // arg 1 - fd
- MOVQ p+8(FP), SI // arg 2 - buf
- MOVL n+16(FP), DX // arg 3 - nbyte
- MOVL $4, AX // sys_write
- SYSCALL
- JCC 2(PC)
- NEGQ AX // caller expects negative errno
- MOVL AX, ret+24(FP)
+TEXT runtime·thrsleep_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 8(DI), SI // arg 2 - clock_id
+ MOVQ 16(DI), DX // arg 3 - abstime
+ MOVQ 24(DI), CX // arg 4 - lock
+ MOVQ 32(DI), R8 // arg 5 - abort
+ MOVQ 0(DI), DI // arg 1 - id
+ CALL libc_thrsleep(SB)
+ POPQ BP
RET
-TEXT runtime·usleep(SB),NOSPLIT,$16
- MOVL $0, DX
- MOVL usec+0(FP), AX
- MOVL $1000000, CX
- DIVL CX
- MOVQ AX, 0(SP) // tv_sec
- MOVL $1000, AX
- MULL DX
- MOVQ AX, 8(SP) // tv_nsec
+TEXT runtime·thrwakeup_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 8(DI), SI // arg 2 - count
+ MOVQ 0(DI), DI // arg 1 - id
+ CALL libc_thrwakeup(SB)
+ POPQ BP
+ RET
- MOVQ SP, DI // arg 1 - rqtp
- MOVQ $0, SI // arg 2 - rmtp
- MOVL $91, AX // sys_nanosleep
- SYSCALL
+TEXT runtime·exit_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 0(DI), DI // arg 1 exit status
+ CALL libc_exit(SB)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
RET
-TEXT runtime·getthrid(SB),NOSPLIT,$0-4
- MOVL $299, AX // sys_getthrid
- SYSCALL
- MOVL AX, ret+0(FP)
+TEXT runtime·getthrid_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ DI, BX // BX is caller-save
+ CALL libc_getthrid(SB)
+ MOVL AX, 0(BX) // return value
+ POPQ BP
RET
-TEXT runtime·thrkill(SB),NOSPLIT,$0-16
- MOVL tid+0(FP), DI // arg 1 - tid
- MOVQ sig+8(FP), SI // arg 2 - signum
- MOVQ $0, DX // arg 3 - tcb
- MOVL $119, AX // sys_thrkill
- SYSCALL
+TEXT runtime·raiseproc_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 0(DI), BX // signal
+ CALL libc_getpid(SB)
+ MOVL AX, DI // arg 1 pid
+ MOVL BX, SI // arg 2 signal
+ CALL libc_kill(SB)
+ POPQ BP
RET
-TEXT runtime·raiseproc(SB),NOSPLIT,$16
- MOVL $20, AX // sys_getpid
- SYSCALL
- MOVQ AX, DI // arg 1 - pid
- MOVL sig+0(FP), SI // arg 2 - signum
- MOVL $122, AX // sys_kill
- SYSCALL
+TEXT runtime·sched_yield_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ CALL libc_sched_yield(SB)
+ POPQ BP
RET
-TEXT runtime·setitimer(SB),NOSPLIT,$-8
- MOVL mode+0(FP), DI // arg 1 - which
- MOVQ new+8(FP), SI // arg 2 - itv
- MOVQ old+16(FP), DX // arg 3 - oitv
- MOVL $69, AX // sys_setitimer
- SYSCALL
+TEXT runtime·mmap_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP // make a frame; keep stack aligned
+ MOVQ SP, BP
+ MOVQ DI, BX
+ MOVQ 0(BX), DI // arg 1 addr
+ MOVQ 8(BX), SI // arg 2 len
+ MOVL 16(BX), DX // arg 3 prot
+ MOVL 20(BX), CX // arg 4 flags
+ MOVL 24(BX), R8 // arg 5 fid
+ MOVL 28(BX), R9 // arg 6 offset
+ CALL libc_mmap(SB)
+ XORL DX, DX
+ CMPQ AX, $-1
+ JNE ok
+ CALL libc_errno(SB)
+ MOVLQSX (AX), DX // errno
+ XORQ AX, AX
+ok:
+ MOVQ AX, 32(BX)
+ MOVQ DX, 40(BX)
+ POPQ BP
RET
-// func walltime1() (sec int64, nsec int32)
-TEXT runtime·walltime1(SB), NOSPLIT, $32
- MOVQ $0, DI // arg 1 - clock_id
- LEAQ 8(SP), SI // arg 2 - tp
- MOVL $87, AX // sys_clock_gettime
- SYSCALL
- MOVQ 8(SP), AX // sec
- MOVQ 16(SP), DX // nsec
+TEXT runtime·munmap_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 len
+ MOVQ 0(DI), DI // arg 1 addr
+ CALL libc_munmap(SB)
+ TESTQ AX, AX
+ JEQ 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
+ RET
- // sec is in AX, nsec in DX
- MOVQ AX, sec+0(FP)
- MOVL DX, nsec+8(FP)
+TEXT runtime·madvise_trampoline(SB), NOSPLIT, $0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 len
+ MOVL 16(DI), DX // arg 3 advice
+ MOVQ 0(DI), DI // arg 1 addr
+ CALL libc_madvise(SB)
+ // ignore failure - maybe pages are locked
+ POPQ BP
RET
-TEXT runtime·nanotime1(SB),NOSPLIT,$24
- MOVQ CLOCK_MONOTONIC, DI // arg 1 - clock_id
- LEAQ 8(SP), SI // arg 2 - tp
- MOVL $87, AX // sys_clock_gettime
- SYSCALL
- MOVQ 8(SP), AX // sec
- MOVQ 16(SP), DX // nsec
+TEXT runtime·open_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 8(DI), SI // arg 2 - flags
+ MOVL 12(DI), DX // arg 3 - mode
+ MOVQ 0(DI), DI // arg 1 - path
+ XORL AX, AX // vararg: say "no float args"
+ CALL libc_open(SB)
+ POPQ BP
+ RET
- // sec is in AX, nsec in DX
- // return nsec in AX
- IMULQ $1000000000, AX
- ADDQ DX, AX
- MOVQ AX, ret+0(FP)
+TEXT runtime·close_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 0(DI), DI // arg 1 - fd
+ CALL libc_close(SB)
+ POPQ BP
RET
-TEXT runtime·sigaction(SB),NOSPLIT,$-8
- MOVL sig+0(FP), DI // arg 1 - signum
- MOVQ new+8(FP), SI // arg 2 - nsa
- MOVQ old+16(FP), DX // arg 3 - osa
- MOVL $46, AX
- SYSCALL
- JCC 2(PC)
- MOVL $0xf1, 0xf1 // crash
+TEXT runtime·read_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 - buf
+ MOVL 16(DI), DX // arg 3 - count
+ MOVL 0(DI), DI // arg 1 - fd
+ CALL libc_read(SB)
+ TESTL AX, AX
+ JGE noerr
+ CALL libc_errno(SB)
+ MOVL (AX), AX // errno
+ NEGL AX // caller expects negative errno value
+noerr:
+ POPQ BP
RET
-TEXT runtime·obsdsigprocmask(SB),NOSPLIT,$0
- MOVL how+0(FP), DI // arg 1 - how
- MOVL new+4(FP), SI // arg 2 - set
- MOVL $48, AX // sys_sigprocmask
- SYSCALL
- JCC 2(PC)
- MOVL $0xf1, 0xf1 // crash
- MOVL AX, ret+8(FP)
+TEXT runtime·write_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 buf
+ MOVL 16(DI), DX // arg 3 count
+ MOVL 0(DI), DI // arg 1 fd
+ CALL libc_write(SB)
+ TESTL AX, AX
+ JGE noerr
+ CALL libc_errno(SB)
+ MOVL (AX), AX // errno
+ NEGL AX // caller expects negative errno value
+noerr:
+ POPQ BP
RET
-TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
- MOVQ fn+0(FP), AX
- MOVL sig+8(FP), DI
- MOVQ info+16(FP), SI
- MOVQ ctx+24(FP), DX
+TEXT runtime·pipe2_trampoline(SB),NOSPLIT,$0
PUSHQ BP
MOVQ SP, BP
- ANDQ $~15, SP // alignment for x86_64 ABI
- CALL AX
- MOVQ BP, SP
+ MOVL 8(DI), SI // arg 2 flags
+ MOVQ 0(DI), DI // arg 1 filedes
+ CALL libc_pipe2(SB)
+ TESTL AX, AX
+ JEQ 3(PC)
+ CALL libc_errno(SB)
+ MOVL (AX), AX // errno
+ NEGL AX // caller expects negative errno value
POPQ BP
RET
-TEXT runtime·sigtramp(SB),NOSPLIT,$72
- // Save callee-saved C registers, since the caller may be a C signal handler.
- MOVQ BX, bx-8(SP)
- MOVQ BP, bp-16(SP) // save in case GOEXPERIMENT=noframepointer is set
- MOVQ R12, r12-24(SP)
- MOVQ R13, r13-32(SP)
- MOVQ R14, r14-40(SP)
- MOVQ R15, r15-48(SP)
- // We don't save mxcsr or the x87 control word because sigtrampgo doesn't
- // modify them.
+TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 new
+ MOVQ 16(DI), DX // arg 3 old
+ MOVL 0(DI), DI // arg 1 which
+ CALL libc_setitimer(SB)
+ POPQ BP
+ RET
- MOVQ DX, ctx-56(SP)
- MOVQ SI, info-64(SP)
- MOVQ DI, signum-72(SP)
- CALL runtime·sigtrampgo(SB)
+TEXT runtime·usleep_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 0(DI), DI // arg 1 usec
+ CALL libc_usleep(SB)
+ POPQ BP
+ RET
- MOVQ r15-48(SP), R15
- MOVQ r14-40(SP), R14
- MOVQ r13-32(SP), R13
- MOVQ r12-24(SP), R12
- MOVQ bp-16(SP), BP
- MOVQ bx-8(SP), BX
+TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 8(DI), SI // arg 2 miblen
+ MOVQ 16(DI), DX // arg 3 out
+ MOVQ 24(DI), CX // arg 4 size
+ MOVQ 32(DI), R8 // arg 5 dst
+ MOVQ 40(DI), R9 // arg 6 ndst
+ MOVQ 0(DI), DI // arg 1 mib
+ CALL libc_sysctl(SB)
+ POPQ BP
RET
-TEXT runtime·mmap(SB),NOSPLIT,$0
- MOVQ addr+0(FP), DI // arg 1 - addr
- MOVQ n+8(FP), SI // arg 2 - len
- MOVL prot+16(FP), DX // arg 3 - prot
- MOVL flags+20(FP), R10 // arg 4 - flags
- MOVL fd+24(FP), R8 // arg 5 - fd
- MOVL off+28(FP), R9
- SUBQ $16, SP
- MOVQ R9, 8(SP) // arg 7 - offset (passed on stack)
- MOVQ $0, R9 // arg 6 - pad
- MOVL $197, AX
- SYSCALL
- JCC ok
- ADDQ $16, SP
- MOVQ $0, p+32(FP)
- MOVQ AX, err+40(FP)
+TEXT runtime·kqueue_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ CALL libc_kqueue(SB)
+ POPQ BP
RET
+
+TEXT runtime·kevent_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 keventt
+ MOVL 16(DI), DX // arg 3 nch
+ MOVQ 24(DI), CX // arg 4 ev
+ MOVL 32(DI), R8 // arg 5 nev
+ MOVQ 40(DI), R9 // arg 6 ts
+ MOVL 0(DI), DI // arg 1 kq
+ CALL libc_kevent(SB)
+ CMPL AX, $-1
+ JNE ok
+ CALL libc_errno(SB)
+ MOVL (AX), AX // errno
+ NEGL AX // caller expects negative errno value
ok:
- ADDQ $16, SP
- MOVQ AX, p+32(FP)
- MOVQ $0, err+40(FP)
+ POPQ BP
RET
-TEXT runtime·munmap(SB),NOSPLIT,$0
- MOVQ addr+0(FP), DI // arg 1 - addr
- MOVQ n+8(FP), SI // arg 2 - len
- MOVL $73, AX // sys_munmap
- SYSCALL
- JCC 2(PC)
- MOVL $0xf1, 0xf1 // crash
+TEXT runtime·clock_gettime_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP // make a frame; keep stack aligned
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 tp
+ MOVL 0(DI), DI // arg 1 clock_id
+ CALL libc_clock_gettime(SB)
+ TESTL AX, AX
+ JEQ 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
RET
-TEXT runtime·madvise(SB),NOSPLIT,$0
- MOVQ addr+0(FP), DI // arg 1 - addr
- MOVQ n+8(FP), SI // arg 2 - len
- MOVL flags+16(FP), DX // arg 3 - behav
- MOVQ $75, AX // sys_madvise
- SYSCALL
- JCC 2(PC)
- MOVL $-1, AX
- MOVL AX, ret+24(FP)
+TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVL 4(DI), SI // arg 2 cmd
+ MOVL 8(DI), DX // arg 3 arg
+ MOVL 0(DI), DI // arg 1 fd
+ XORL AX, AX // vararg: say "no float args"
+ CALL libc_fcntl(SB)
+ POPQ BP
RET
-TEXT runtime·sigaltstack(SB),NOSPLIT,$-8
- MOVQ new+0(FP), DI // arg 1 - nss
- MOVQ old+8(FP), SI // arg 2 - oss
- MOVQ $288, AX // sys_sigaltstack
- SYSCALL
- JCC 2(PC)
- MOVL $0xf1, 0xf1 // crash
+TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 new
+ MOVQ 16(DI), DX // arg 3 old
+ MOVL 0(DI), DI // arg 1 sig
+ CALL libc_sigaction(SB)
+ TESTL AX, AX
+ JEQ 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
RET
-// set tls base to DI
-TEXT runtime·settls(SB),NOSPLIT,$0
- // adjust for ELF: wants to use -8(FS) for g
- ADDQ $8, DI
- MOVQ $329, AX // sys___settcb
- SYSCALL
- JCC 2(PC)
- MOVL $0xf1, 0xf1 // crash
+TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 new
+ MOVQ 16(DI), DX // arg 3 old
+ MOVL 0(DI), DI // arg 1 how
+ CALL libc_pthread_sigmask(SB)
+ TESTL AX, AX
+ JEQ 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
RET
-TEXT runtime·sysctl(SB),NOSPLIT,$0
- MOVQ mib+0(FP), DI // arg 1 - name
- MOVL miblen+8(FP), SI // arg 2 - namelen
- MOVQ out+16(FP), DX // arg 3 - oldp
- MOVQ size+24(FP), R10 // arg 4 - oldlenp
- MOVQ dst+32(FP), R8 // arg 5 - newp
- MOVQ ndst+40(FP), R9 // arg 6 - newlen
- MOVQ $202, AX // sys___sysctl
- SYSCALL
- JCC 4(PC)
- NEGQ AX
- MOVL AX, ret+48(FP)
+TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ MOVQ 8(DI), SI // arg 2 old
+ MOVQ 0(DI), DI // arg 1 new
+ CALL libc_sigaltstack(SB)
+ TESTQ AX, AX
+ JEQ 2(PC)
+ MOVL $0xf1, 0xf1 // crash
+ POPQ BP
RET
- MOVL $0, AX
- MOVL AX, ret+48(FP)
+
+// syscall calls a function in libc on behalf of the syscall package.
+// syscall takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall expects a 32-bit result and tests for 32-bit -1
+// to decide there was an error.
+TEXT runtime·syscall(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ (0*8)(DI), CX // fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ DI, (SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL CX
+
+ MOVQ (SP), DI
+ MOVQ AX, (4*8)(DI) // r1
+ MOVQ DX, (5*8)(DI) // r2
+
+ // Standard libc functions return -1 on error
+ // and set errno.
+ CMPL AX, $-1 // Note: high 32 bits are junk
+ JNE ok
+
+ // Get error code from libc.
+ CALL libc_errno(SB)
+ MOVLQSX (AX), AX
+ MOVQ (SP), DI
+ MOVQ AX, (6*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
RET
-// int32 runtime·kqueue(void);
-TEXT runtime·kqueue(SB),NOSPLIT,$0
- MOVL $269, AX
- SYSCALL
- JCC 2(PC)
- NEGQ AX
- MOVL AX, ret+0(FP)
+// syscallX calls a function in libc on behalf of the syscall package.
+// syscallX takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscallX must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscallX is like syscall but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscallX(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ (0*8)(DI), CX // fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ DI, (SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL CX
+
+ MOVQ (SP), DI
+ MOVQ AX, (4*8)(DI) // r1
+ MOVQ DX, (5*8)(DI) // r2
+
+ // Standard libc functions return -1 on error
+ // and set errno.
+ CMPQ AX, $-1
+ JNE ok
+
+ // Get error code from libc.
+ CALL libc_errno(SB)
+ MOVLQSX (AX), AX
+ MOVQ (SP), DI
+ MOVQ AX, (6*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
+ RET
+
+// syscall6 calls a function in libc on behalf of the syscall package.
+// syscall6 takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall6 must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall6 expects a 32-bit result and tests for 32-bit -1
+// to decide there was an error.
+TEXT runtime·syscall6(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ (0*8)(DI), R11// fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ (4*8)(DI), CX // a4
+ MOVQ (5*8)(DI), R8 // a5
+ MOVQ (6*8)(DI), R9 // a6
+ MOVQ DI, (SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL R11
+
+ MOVQ (SP), DI
+ MOVQ AX, (7*8)(DI) // r1
+ MOVQ DX, (8*8)(DI) // r2
+
+ CMPL AX, $-1
+ JNE ok
+
+ CALL libc_errno(SB)
+ MOVLQSX (AX), AX
+ MOVQ (SP), DI
+ MOVQ AX, (9*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
RET
-// int32 runtime·kevent(int kq, Kevent *changelist, int nchanges, Kevent *eventlist, int nevents, Timespec *timeout);
-TEXT runtime·kevent(SB),NOSPLIT,$0
- MOVL kq+0(FP), DI
- MOVQ ch+8(FP), SI
- MOVL nch+16(FP), DX
- MOVQ ev+24(FP), R10
- MOVL nev+32(FP), R8
- MOVQ ts+40(FP), R9
- MOVL $72, AX
- SYSCALL
- JCC 2(PC)
- NEGQ AX
- MOVL AX, ret+48(FP)
+// syscall6X calls a function in libc on behalf of the syscall package.
+// syscall6X takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall6X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall6X is like syscall6 but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscall6X(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $16, SP
+ MOVQ (0*8)(DI), R11// fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ (4*8)(DI), CX // a4
+ MOVQ (5*8)(DI), R8 // a5
+ MOVQ (6*8)(DI), R9 // a6
+ MOVQ DI, (SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL R11
+
+ MOVQ (SP), DI
+ MOVQ AX, (7*8)(DI) // r1
+ MOVQ DX, (8*8)(DI) // r2
+
+ CMPQ AX, $-1
+ JNE ok
+
+ CALL libc_errno(SB)
+ MOVLQSX (AX), AX
+ MOVQ (SP), DI
+ MOVQ AX, (9*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
RET
-// void runtime·closeonexec(int32 fd);
-TEXT runtime·closeonexec(SB),NOSPLIT,$0
- MOVL fd+0(FP), DI // fd
- MOVQ $2, SI // F_SETFD
- MOVQ $1, DX // FD_CLOEXEC
- MOVL $92, AX // fcntl
- SYSCALL
+// syscall10 calls a function in libc on behalf of the syscall package.
+// syscall10 takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// a7 uintptr
+// a8 uintptr
+// a9 uintptr
+// a10 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall10 must be called on the g0 stack with the
+// C calling convention (use libcCall).
+TEXT runtime·syscall10(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $48, SP
+
+ // Arguments a1 to a6 get passed in registers, with a7 onwards being
+ // passed via the stack per the x86-64 System V ABI
+ // (https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf).
+ MOVQ (7*8)(DI), R10 // a7
+ MOVQ (8*8)(DI), R11 // a8
+ MOVQ (9*8)(DI), R12 // a9
+ MOVQ (10*8)(DI), R13 // a10
+ MOVQ R10, (0*8)(SP) // a7
+ MOVQ R11, (1*8)(SP) // a8
+ MOVQ R12, (2*8)(SP) // a9
+ MOVQ R13, (3*8)(SP) // a10
+ MOVQ (0*8)(DI), R11 // fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ (4*8)(DI), CX // a4
+ MOVQ (5*8)(DI), R8 // a5
+ MOVQ (6*8)(DI), R9 // a6
+ MOVQ DI, (4*8)(SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL R11
+
+ MOVQ (4*8)(SP), DI
+ MOVQ AX, (11*8)(DI) // r1
+ MOVQ DX, (12*8)(DI) // r2
+
+ CMPL AX, $-1
+ JNE ok
+
+ CALL libc_errno(SB)
+ MOVLQSX (AX), AX
+ MOVQ (4*8)(SP), DI
+ MOVQ AX, (13*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT,$0-4
- MOVL fd+0(FP), DI // fd
- MOVQ $3, SI // F_GETFL
- MOVQ $0, DX
- MOVL $92, AX // fcntl
- SYSCALL
- MOVL fd+0(FP), DI // fd
- MOVQ $4, SI // F_SETFL
- MOVQ $4, DX // O_NONBLOCK
- ORL AX, DX
- MOVL $92, AX // fcntl
- SYSCALL
+// syscall10X calls a function in libc on behalf of the syscall package.
+// syscall10X takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// a7 uintptr
+// a8 uintptr
+// a9 uintptr
+// a10 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall10X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall10X is like syscall10 but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscall10X(SB),NOSPLIT,$0
+ PUSHQ BP
+ MOVQ SP, BP
+ SUBQ $48, SP
+
+ // Arguments a1 to a6 get passed in registers, with a7 onwards being
+ // passed via the stack per the x86-64 System V ABI
+ // (https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf).
+ MOVQ (7*8)(DI), R10 // a7
+ MOVQ (8*8)(DI), R11 // a8
+ MOVQ (9*8)(DI), R12 // a9
+ MOVQ (10*8)(DI), R13 // a10
+ MOVQ R10, (0*8)(SP) // a7
+ MOVQ R11, (1*8)(SP) // a8
+ MOVQ R12, (2*8)(SP) // a9
+ MOVQ R13, (3*8)(SP) // a10
+ MOVQ (0*8)(DI), R11 // fn
+ MOVQ (2*8)(DI), SI // a2
+ MOVQ (3*8)(DI), DX // a3
+ MOVQ (4*8)(DI), CX // a4
+ MOVQ (5*8)(DI), R8 // a5
+ MOVQ (6*8)(DI), R9 // a6
+ MOVQ DI, (4*8)(SP)
+ MOVQ (1*8)(DI), DI // a1
+ XORL AX, AX // vararg: say "no float args"
+
+ CALL R11
+
+ MOVQ (4*8)(SP), DI
+ MOVQ AX, (11*8)(DI) // r1
+ MOVQ DX, (12*8)(DI) // r2
+
+ CMPQ AX, $-1
+ JNE ok
+
+ CALL libc_errno(SB)
+ MOVLQSX (AX), AX
+ MOVQ (4*8)(SP), DI
+ MOVQ AX, (13*8)(DI) // err
+
+ok:
+ XORL AX, AX // no error (it's ignored anyway)
+ MOVQ BP, SP
+ POPQ BP
RET
diff --git a/src/runtime/sys_openbsd_arm64.s b/src/runtime/sys_openbsd_arm64.s
index 621b1b1a42..9b4acc90a5 100644
--- a/src/runtime/sys_openbsd_arm64.s
+++ b/src/runtime/sys_openbsd_arm64.s
@@ -3,7 +3,9 @@
// license that can be found in the LICENSE file.
//
// System calls and other sys.stuff for arm64, OpenBSD
-// /usr/src/sys/kern/syscalls.master for syscall numbers.
+// System calls are implemented in libc/libpthread, this file
+// contains trampolines that convert from Go to C calling convention.
+// Some direct system call implementations currently remain.
//
#include "go_asm.h"
@@ -13,239 +15,65 @@
#define CLOCK_REALTIME $0
#define CLOCK_MONOTONIC $3
-// With OpenBSD 6.7 onwards, an arm64 syscall returns two instructions
-// after the SVC instruction, to allow for a speculative execution
-// barrier to be placed after the SVC without impacting performance.
-// For now use hardware no-ops as this works with both older and newer
-// kernels. After OpenBSD 6.8 is released this should be changed to
-// speculation barriers.
-#define INVOKE_SYSCALL \
- SVC; \
- NOOP; \
- NOOP
+// mstart_stub is the first function executed on a new thread started by pthread_create.
+// It just does some low-level setup and then calls mstart.
+// Note: called with the C calling convention.
+TEXT runtime·mstart_stub(SB),NOSPLIT,$160
+ // R0 points to the m.
+ // We are already on m's g0 stack.
-// Exit the entire program (like C exit)
-TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0
- MOVW code+0(FP), R0 // arg 1 - status
- MOVD $1, R8 // sys_exit
- INVOKE_SYSCALL
- BCC 3(PC)
- MOVD $0, R0 // crash on syscall failure
- MOVD R0, (R0)
- RET
-
-// func exitThread(wait *uint32)
-TEXT runtime·exitThread(SB),NOSPLIT,$0
- MOVD wait+0(FP), R0 // arg 1 - notdead
- MOVD $302, R8 // sys___threxit
- INVOKE_SYSCALL
- MOVD $0, R0 // crash on syscall failure
- MOVD R0, (R0)
- JMP 0(PC)
-
-TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0
- MOVD name+0(FP), R0 // arg 1 - path
- MOVW mode+8(FP), R1 // arg 2 - mode
- MOVW perm+12(FP), R2 // arg 3 - perm
- MOVD $5, R8 // sys_open
- INVOKE_SYSCALL
- BCC 2(PC)
- MOVW $-1, R0
- MOVW R0, ret+16(FP)
- RET
-
-TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0
- MOVW fd+0(FP), R0 // arg 1 - fd
- MOVD $6, R8 // sys_close
- INVOKE_SYSCALL
- BCC 2(PC)
- MOVW $-1, R0
- MOVW R0, ret+8(FP)
- RET
-
-TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0
- MOVW fd+0(FP), R0 // arg 1 - fd
- MOVD p+8(FP), R1 // arg 2 - buf
- MOVW n+16(FP), R2 // arg 3 - nbyte
- MOVD $3, R8 // sys_read
- INVOKE_SYSCALL
- BCC 2(PC)
- NEG R0, R0
- MOVW R0, ret+24(FP)
- RET
-
-// func pipe() (r, w int32, errno int32)
-TEXT runtime·pipe(SB),NOSPLIT|NOFRAME,$0-12
- MOVD $r+0(FP), R0
- MOVW $0, R1
- MOVD $101, R8 // sys_pipe2
- INVOKE_SYSCALL
- BCC 2(PC)
- NEG R0, R0
- MOVW R0, errno+8(FP)
- RET
-
-// func pipe2(flags int32) (r, w int32, errno int32)
-TEXT runtime·pipe2(SB),NOSPLIT|NOFRAME,$0-20
- MOVD $r+8(FP), R0
- MOVW flags+0(FP), R1
- MOVD $101, R8 // sys_pipe2
- INVOKE_SYSCALL
- BCC 2(PC)
- NEG R0, R0
- MOVW R0, errno+16(FP)
- RET
-
-TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0
- MOVD fd+0(FP), R0 // arg 1 - fd
- MOVD p+8(FP), R1 // arg 2 - buf
- MOVW n+16(FP), R2 // arg 3 - nbyte
- MOVD $4, R8 // sys_write
- INVOKE_SYSCALL
- BCC 2(PC)
- NEG R0, R0
- MOVW R0, ret+24(FP)
- RET
+ // Save callee-save registers.
+ MOVD R19, 8(RSP)
+ MOVD R20, 16(RSP)
+ MOVD R21, 24(RSP)
+ MOVD R22, 32(RSP)
+ MOVD R23, 40(RSP)
+ MOVD R24, 48(RSP)
+ MOVD R25, 56(RSP)
+ MOVD R26, 64(RSP)
+ MOVD R27, 72(RSP)
+ MOVD g, 80(RSP)
+ MOVD R29, 88(RSP)
+ FMOVD F8, 96(RSP)
+ FMOVD F9, 104(RSP)
+ FMOVD F10, 112(RSP)
+ FMOVD F11, 120(RSP)
+ FMOVD F12, 128(RSP)
+ FMOVD F13, 136(RSP)
+ FMOVD F14, 144(RSP)
+ FMOVD F15, 152(RSP)
-TEXT runtime·usleep(SB),NOSPLIT,$24-4
- MOVWU usec+0(FP), R3
- MOVD R3, R5
- MOVW $1000000, R4
- UDIV R4, R3
- MOVD R3, 8(RSP) // tv_sec
- MUL R3, R4
- SUB R4, R5
- MOVW $1000, R4
- MUL R4, R5
- MOVD R5, 16(RSP) // tv_nsec
+ MOVD m_g0(R0), g
+ BL runtime·save_g(SB)
- ADD $8, RSP, R0 // arg 1 - rqtp
- MOVD $0, R1 // arg 2 - rmtp
- MOVD $91, R8 // sys_nanosleep
- INVOKE_SYSCALL
- RET
-
-TEXT runtime·getthrid(SB),NOSPLIT,$0-4
- MOVD $299, R8 // sys_getthrid
- INVOKE_SYSCALL
- MOVW R0, ret+0(FP)
- RET
-
-TEXT runtime·thrkill(SB),NOSPLIT,$0-16
- MOVW tid+0(FP), R0 // arg 1 - tid
- MOVD sig+8(FP), R1 // arg 2 - signum
- MOVW $0, R2 // arg 3 - tcb
- MOVD $119, R8 // sys_thrkill
- INVOKE_SYSCALL
- RET
+ BL runtime·mstart(SB)
-TEXT runtime·raiseproc(SB),NOSPLIT,$0
- MOVD $20, R8 // sys_getpid
- INVOKE_SYSCALL
- // arg 1 - pid, already in R0
- MOVW sig+0(FP), R1 // arg 2 - signum
- MOVD $122, R8 // sys_kill
- INVOKE_SYSCALL
- RET
+ // Restore callee-save registers.
+ MOVD 8(RSP), R19
+ MOVD 16(RSP), R20
+ MOVD 24(RSP), R21
+ MOVD 32(RSP), R22
+ MOVD 40(RSP), R23
+ MOVD 48(RSP), R24
+ MOVD 56(RSP), R25
+ MOVD 64(RSP), R26
+ MOVD 72(RSP), R27
+ MOVD 80(RSP), g
+ MOVD 88(RSP), R29
+ FMOVD 96(RSP), F8
+ FMOVD 104(RSP), F9
+ FMOVD 112(RSP), F10
+ FMOVD 120(RSP), F11
+ FMOVD 128(RSP), F12
+ FMOVD 136(RSP), F13
+ FMOVD 144(RSP), F14
+ FMOVD 152(RSP), F15
-TEXT runtime·mmap(SB),NOSPLIT,$0
- MOVD addr+0(FP), R0 // arg 1 - addr
- MOVD n+8(FP), R1 // arg 2 - len
- MOVW prot+16(FP), R2 // arg 3 - prot
- MOVW flags+20(FP), R3 // arg 4 - flags
- MOVW fd+24(FP), R4 // arg 5 - fd
- MOVW $0, R5 // arg 6 - pad
- MOVW off+28(FP), R6 // arg 7 - offset
- MOVD $197, R8 // sys_mmap
- INVOKE_SYSCALL
- MOVD $0, R1
- BCC 3(PC)
- MOVD R0, R1 // if error, move to R1
+ // Go is all done with this OS thread.
+ // Tell pthread everything is ok (we never join with this thread, so
+ // the value here doesn't really matter).
MOVD $0, R0
- MOVD R0, p+32(FP)
- MOVD R1, err+40(FP)
- RET
-TEXT runtime·munmap(SB),NOSPLIT,$0
- MOVD addr+0(FP), R0 // arg 1 - addr
- MOVD n+8(FP), R1 // arg 2 - len
- MOVD $73, R8 // sys_munmap
- INVOKE_SYSCALL
- BCC 3(PC)
- MOVD $0, R0 // crash on syscall failure
- MOVD R0, (R0)
- RET
-
-TEXT runtime·madvise(SB),NOSPLIT,$0
- MOVD addr+0(FP), R0 // arg 1 - addr
- MOVD n+8(FP), R1 // arg 2 - len
- MOVW flags+16(FP), R2 // arg 2 - flags
- MOVD $75, R8 // sys_madvise
- INVOKE_SYSCALL
- BCC 2(PC)
- MOVW $-1, R0
- MOVW R0, ret+24(FP)
- RET
-
-TEXT runtime·setitimer(SB),NOSPLIT,$0
- MOVW mode+0(FP), R0 // arg 1 - mode
- MOVD new+8(FP), R1 // arg 2 - new value
- MOVD old+16(FP), R2 // arg 3 - old value
- MOVD $69, R8 // sys_setitimer
- INVOKE_SYSCALL
- RET
-
-// func walltime1() (sec int64, nsec int32)
-TEXT runtime·walltime1(SB), NOSPLIT, $32
- MOVW CLOCK_REALTIME, R0 // arg 1 - clock_id
- MOVD $8(RSP), R1 // arg 2 - tp
- MOVD $87, R8 // sys_clock_gettime
- INVOKE_SYSCALL
-
- MOVD 8(RSP), R0 // sec
- MOVD 16(RSP), R1 // nsec
- MOVD R0, sec+0(FP)
- MOVW R1, nsec+8(FP)
-
- RET
-
-// int64 nanotime1(void) so really
-// void nanotime1(int64 *nsec)
-TEXT runtime·nanotime1(SB),NOSPLIT,$32
- MOVW CLOCK_MONOTONIC, R0 // arg 1 - clock_id
- MOVD $8(RSP), R1 // arg 2 - tp
- MOVD $87, R8 // sys_clock_gettime
- INVOKE_SYSCALL
-
- MOVW 8(RSP), R3 // sec
- MOVW 16(RSP), R5 // nsec
-
- MOVD $1000000000, R4
- MUL R4, R3
- ADD R5, R3
- MOVD R3, ret+0(FP)
- RET
-
-TEXT runtime·sigaction(SB),NOSPLIT,$0
- MOVW sig+0(FP), R0 // arg 1 - signum
- MOVD new+8(FP), R1 // arg 2 - new sigaction
- MOVD old+16(FP), R2 // arg 3 - old sigaction
- MOVD $46, R8 // sys_sigaction
- INVOKE_SYSCALL
- BCC 3(PC)
- MOVD $3, R0 // crash on syscall failure
- MOVD R0, (R0)
- RET
-
-TEXT runtime·obsdsigprocmask(SB),NOSPLIT,$0
- MOVW how+0(FP), R0 // arg 1 - mode
- MOVW new+4(FP), R1 // arg 2 - new
- MOVD $48, R8 // sys_sigprocmask
- INVOKE_SYSCALL
- BCC 3(PC)
- MOVD $3, R8 // crash on syscall failure
- MOVD R8, (R8)
- MOVW R0, ret+8(FP)
RET
TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
@@ -282,9 +110,6 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$192
// If called from an external code context, g will not be set.
// Save R0, since runtime·load_g will clobber it.
MOVW R0, 8(RSP) // signum
- MOVB runtime·iscgo(SB), R0
- CMP $0, R0
- BEQ 2(PC)
BL runtime·load_g(SB)
MOVD R1, 16(RSP)
@@ -314,135 +139,562 @@ TEXT runtime·sigtramp(SB),NOSPLIT,$192
RET
-// int32 tfork(void *param, uintptr psize, M *mp, G *gp, void (*fn)(void));
-TEXT runtime·tfork(SB),NOSPLIT,$0
+//
+// These trampolines help convert from Go calling convention to C calling convention.
+// They should be called with asmcgocall.
+// A pointer to the arguments is passed in R0.
+// A single int32 result is returned in R0.
+// (For more results, make an args/results structure.)
+TEXT runtime·pthread_attr_init_trampoline(SB),NOSPLIT,$0
+ MOVD 0(R0), R0 // arg 1 - attr
+ CALL libc_pthread_attr_init(SB)
+ RET
+
+TEXT runtime·pthread_attr_destroy_trampoline(SB),NOSPLIT,$0
+ MOVD 0(R0), R0 // arg 1 - attr
+ CALL libc_pthread_attr_destroy(SB)
+ RET
+
+TEXT runtime·pthread_attr_getstacksize_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - size
+ MOVD 0(R0), R0 // arg 1 - attr
+ CALL libc_pthread_attr_getstacksize(SB)
+ RET
+
+TEXT runtime·pthread_attr_setdetachstate_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - state
+ MOVD 0(R0), R0 // arg 1 - attr
+ CALL libc_pthread_attr_setdetachstate(SB)
+ RET
+
+TEXT runtime·pthread_create_trampoline(SB),NOSPLIT,$0
+ MOVD 0(R0), R1 // arg 2 - attr
+ MOVD 8(R0), R2 // arg 3 - start
+ MOVD 16(R0), R3 // arg 4 - arg
+ SUB $16, RSP
+ MOVD RSP, R0 // arg 1 - &threadid (discard)
+ CALL libc_pthread_create(SB)
+ ADD $16, RSP
+ RET
+
+TEXT runtime·thrkill_trampoline(SB),NOSPLIT,$0
+ MOVW 8(R0), R1 // arg 2 - signal
+ MOVD $0, R2 // arg 3 - tcb
+ MOVW 0(R0), R0 // arg 1 - tid
+ CALL libc_thrkill(SB)
+ RET
+
+TEXT runtime·thrsleep_trampoline(SB),NOSPLIT,$0
+ MOVW 8(R0), R1 // arg 2 - clock_id
+ MOVD 16(R0), R2 // arg 3 - abstime
+ MOVD 24(R0), R3 // arg 4 - lock
+ MOVD 32(R0), R4 // arg 5 - abort
+ MOVD 0(R0), R0 // arg 1 - id
+ CALL libc_thrsleep(SB)
+ RET
+
+TEXT runtime·thrwakeup_trampoline(SB),NOSPLIT,$0
+ MOVW 8(R0), R1 // arg 2 - count
+ MOVD 0(R0), R0 // arg 1 - id
+ CALL libc_thrwakeup(SB)
+ RET
+
+TEXT runtime·exit_trampoline(SB),NOSPLIT,$0
+ MOVW 0(R0), R0 // arg 1 - status
+ CALL libc_exit(SB)
+ MOVD $0, R0 // crash on failure
+ MOVD R0, (R0)
+ RET
+
+TEXT runtime·getthrid_trampoline(SB),NOSPLIT,$0
+ MOVD R0, R19 // pointer to args
+ CALL libc_getthrid(SB)
+ MOVW R0, 0(R19) // return value
+ RET
+
+TEXT runtime·raiseproc_trampoline(SB),NOSPLIT,$0
+ MOVD R0, R19 // pointer to args
+ CALL libc_getpid(SB) // arg 1 - pid
+ MOVW 0(R19), R1 // arg 2 - signal
+ CALL libc_kill(SB)
+ RET
+
+TEXT runtime·sched_yield_trampoline(SB),NOSPLIT,$0
+ CALL libc_sched_yield(SB)
+ RET
+
+TEXT runtime·mmap_trampoline(SB),NOSPLIT,$0
+ MOVD R0, R19 // pointer to args
+ MOVD 0(R19), R0 // arg 1 - addr
+ MOVD 8(R19), R1 // arg 2 - len
+ MOVW 16(R19), R2 // arg 3 - prot
+ MOVW 20(R19), R3 // arg 4 - flags
+ MOVW 24(R19), R4 // arg 5 - fid
+ MOVW 28(R19), R5 // arg 6 - offset
+ CALL libc_mmap(SB)
+ MOVD $0, R1
+ CMP $-1, R0
+ BNE noerr
+ CALL libc_errno(SB)
+ MOVW (R0), R1 // errno
+ MOVD $0, R0
+noerr:
+ MOVD R0, 32(R19)
+ MOVD R1, 40(R19)
+ RET
+
+TEXT runtime·munmap_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - len
+ MOVD 0(R0), R0 // arg 1 - addr
+ CALL libc_munmap(SB)
+ CMP $-1, R0
+ BNE 3(PC)
+ MOVD $0, R0 // crash on failure
+ MOVD R0, (R0)
+ RET
+
+TEXT runtime·madvise_trampoline(SB), NOSPLIT, $0
+ MOVD 8(R0), R1 // arg 2 - len
+ MOVW 16(R0), R2 // arg 3 - advice
+ MOVD 0(R0), R0 // arg 1 - addr
+ CALL libc_madvise(SB)
+ // ignore failure - maybe pages are locked
+ RET
+
+TEXT runtime·open_trampoline(SB),NOSPLIT,$0
+ MOVW 8(R0), R1 // arg 2 - flags
+ MOVW 12(R0), R2 // arg 3 - mode
+ MOVD 0(R0), R0 // arg 1 - path
+ MOVD $0, R3 // varargs
+ CALL libc_open(SB)
+ RET
- // Copy mp, gp and fn off parent stack for use by child.
- MOVD mm+16(FP), R4
- MOVD gg+24(FP), R5
- MOVD fn+32(FP), R6
+TEXT runtime·close_trampoline(SB),NOSPLIT,$0
+ MOVD 0(R0), R0 // arg 1 - fd
+ CALL libc_close(SB)
+ RET
- MOVD param+0(FP), R0 // arg 1 - param
- MOVD psize+8(FP), R1 // arg 2 - psize
- MOVD $8, R8 // sys___tfork
- INVOKE_SYSCALL
+TEXT runtime·read_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - buf
+ MOVW 16(R0), R2 // arg 3 - count
+ MOVW 0(R0), R0 // arg 1 - fd
+ CALL libc_read(SB)
+ CMP $-1, R0
+ BNE noerr
+ CALL libc_errno(SB)
+ MOVW (R0), R0 // errno
+ NEG R0, R0 // caller expects negative errno value
+noerr:
+ RET
- // Return if syscall failed.
- BCC 4(PC)
- NEG R0, R0
- MOVW R0, ret+40(FP)
+TEXT runtime·write_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - buf
+ MOVW 16(R0), R2 // arg 3 - count
+ MOVW 0(R0), R0 // arg 1 - fd
+ CALL libc_write(SB)
+ CMP $-1, R0
+ BNE noerr
+ CALL libc_errno(SB)
+ MOVW (R0), R0 // errno
+ NEG R0, R0 // caller expects negative errno value
+noerr:
RET
- // In parent, return.
- CMP $0, R0
- BEQ 3(PC)
- MOVW R0, ret+40(FP)
+TEXT runtime·pipe2_trampoline(SB),NOSPLIT,$0
+ MOVW 8(R0), R1 // arg 2 - flags
+ MOVD 0(R0), R0 // arg 1 - filedes
+ CALL libc_pipe2(SB)
+ CMP $-1, R0
+ BNE noerr
+ CALL libc_errno(SB)
+ MOVW (R0), R0 // errno
+ NEG R0, R0 // caller expects negative errno value
+noerr:
RET
- // Initialise m, g.
- MOVD R5, g
- MOVD R4, g_m(g)
+TEXT runtime·setitimer_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - new
+ MOVD 16(R0), R2 // arg 3 - old
+ MOVW 0(R0), R0 // arg 1 - which
+ CALL libc_setitimer(SB)
+ RET
- // Call fn.
- BL (R6)
+TEXT runtime·usleep_trampoline(SB),NOSPLIT,$0
+ MOVD 0(R0), R0 // arg 1 - usec
+ CALL libc_usleep(SB)
+ RET
- // fn should never return.
- MOVD $2, R8 // crash if reached
- MOVD R8, (R8)
+TEXT runtime·sysctl_trampoline(SB),NOSPLIT,$0
+ MOVW 8(R0), R1 // arg 2 - miblen
+ MOVD 16(R0), R2 // arg 3 - out
+ MOVD 24(R0), R3 // arg 4 - size
+ MOVD 32(R0), R4 // arg 5 - dst
+ MOVD 40(R0), R5 // arg 6 - ndst
+ MOVD 0(R0), R0 // arg 1 - mib
+ CALL libc_sysctl(SB)
RET
-TEXT runtime·sigaltstack(SB),NOSPLIT,$0
- MOVD new+0(FP), R0 // arg 1 - new sigaltstack
- MOVD old+8(FP), R1 // arg 2 - old sigaltstack
- MOVD $288, R8 // sys_sigaltstack
- INVOKE_SYSCALL
- BCC 3(PC)
- MOVD $0, R8 // crash on syscall failure
- MOVD R8, (R8)
+TEXT runtime·kqueue_trampoline(SB),NOSPLIT,$0
+ CALL libc_kqueue(SB)
RET
-TEXT runtime·osyield(SB),NOSPLIT,$0
- MOVD $298, R8 // sys_sched_yield
- INVOKE_SYSCALL
+TEXT runtime·kevent_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - keventt
+ MOVW 16(R0), R2 // arg 3 - nch
+ MOVD 24(R0), R3 // arg 4 - ev
+ MOVW 32(R0), R4 // arg 5 - nev
+ MOVD 40(R0), R5 // arg 6 - ts
+ MOVW 0(R0), R0 // arg 1 - kq
+ CALL libc_kevent(SB)
+ CMP $-1, R0
+ BNE noerr
+ CALL libc_errno(SB)
+ MOVW (R0), R0 // errno
+ NEG R0, R0 // caller expects negative errno value
+noerr:
RET
-TEXT runtime·thrsleep(SB),NOSPLIT,$0
- MOVD ident+0(FP), R0 // arg 1 - ident
- MOVW clock_id+8(FP), R1 // arg 2 - clock_id
- MOVD tsp+16(FP), R2 // arg 3 - tsp
- MOVD lock+24(FP), R3 // arg 4 - lock
- MOVD abort+32(FP), R4 // arg 5 - abort
- MOVD $94, R8 // sys___thrsleep
- INVOKE_SYSCALL
- MOVW R0, ret+40(FP)
+TEXT runtime·clock_gettime_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - tp
+ MOVD 0(R0), R0 // arg 1 - clock_id
+ CALL libc_clock_gettime(SB)
+ CMP $-1, R0
+ BNE 3(PC)
+ MOVD $0, R0 // crash on failure
+ MOVD R0, (R0)
RET
-TEXT runtime·thrwakeup(SB),NOSPLIT,$0
- MOVD ident+0(FP), R0 // arg 1 - ident
- MOVW n+8(FP), R1 // arg 2 - n
- MOVD $301, R8 // sys___thrwakeup
- INVOKE_SYSCALL
- MOVW R0, ret+16(FP)
+TEXT runtime·fcntl_trampoline(SB),NOSPLIT,$0
+ MOVW 4(R0), R1 // arg 2 - cmd
+ MOVW 8(R0), R2 // arg 3 - arg
+ MOVW 0(R0), R0 // arg 1 - fd
+ MOVD $0, R3 // vararg
+ CALL libc_fcntl(SB)
RET
-TEXT runtime·sysctl(SB),NOSPLIT,$0
- MOVD mib+0(FP), R0 // arg 1 - mib
- MOVW miblen+8(FP), R1 // arg 2 - miblen
- MOVD out+16(FP), R2 // arg 3 - out
- MOVD size+24(FP), R3 // arg 4 - size
- MOVD dst+32(FP), R4 // arg 5 - dest
- MOVD ndst+40(FP), R5 // arg 6 - newlen
- MOVD $202, R8 // sys___sysctl
- INVOKE_SYSCALL
- BCC 2(PC)
- NEG R0, R0
- MOVW R0, ret+48(FP)
+TEXT runtime·sigaction_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - new
+ MOVD 16(R0), R2 // arg 3 - old
+ MOVW 0(R0), R0 // arg 1 - sig
+ CALL libc_sigaction(SB)
+ CMP $-1, R0
+ BNE 3(PC)
+ MOVD $0, R0 // crash on syscall failure
+ MOVD R0, (R0)
RET
-// int32 runtime·kqueue(void);
-TEXT runtime·kqueue(SB),NOSPLIT,$0
- MOVD $269, R8 // sys_kqueue
- INVOKE_SYSCALL
- BCC 2(PC)
- NEG R0, R0
- MOVW R0, ret+0(FP)
+TEXT runtime·sigprocmask_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - new
+ MOVD 16(R0), R2 // arg 3 - old
+ MOVW 0(R0), R0 // arg 1 - how
+ CALL libc_pthread_sigmask(SB)
+ CMP $-1, R0
+ BNE 3(PC)
+ MOVD $0, R0 // crash on syscall failure
+ MOVD R0, (R0)
RET
-// int32 runtime·kevent(int kq, Kevent *changelist, int nchanges, Kevent *eventlist, int nevents, Timespec *timeout);
-TEXT runtime·kevent(SB),NOSPLIT,$0
- MOVW kq+0(FP), R0 // arg 1 - kq
- MOVD ch+8(FP), R1 // arg 2 - changelist
- MOVW nch+16(FP), R2 // arg 3 - nchanges
- MOVD ev+24(FP), R3 // arg 4 - eventlist
- MOVW nev+32(FP), R4 // arg 5 - nevents
- MOVD ts+40(FP), R5 // arg 6 - timeout
- MOVD $72, R8 // sys_kevent
- INVOKE_SYSCALL
- BCC 2(PC)
- NEG R0, R0
- MOVW R0, ret+48(FP)
+TEXT runtime·sigaltstack_trampoline(SB),NOSPLIT,$0
+ MOVD 8(R0), R1 // arg 2 - old
+ MOVD 0(R0), R0 // arg 1 - new
+ CALL libc_sigaltstack(SB)
+ CMP $-1, R0
+ BNE 3(PC)
+ MOVD $0, R0 // crash on syscall failure
+ MOVD R0, (R0)
RET
-// func closeonexec(fd int32)
-TEXT runtime·closeonexec(SB),NOSPLIT,$0
- MOVW fd+0(FP), R0 // arg 1 - fd
- MOVD $2, R1 // arg 2 - cmd (F_SETFD)
- MOVD $1, R2 // arg 3 - arg (FD_CLOEXEC)
- MOVD $92, R8 // sys_fcntl
- INVOKE_SYSCALL
+// syscall calls a function in libc on behalf of the syscall package.
+// syscall takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall expects a 32-bit result and tests for 32-bit -1
+// to decide there was an error.
+TEXT runtime·syscall(SB),NOSPLIT,$0
+ MOVD R0, R19 // pointer to args
+
+ MOVD (0*8)(R19), R11 // fn
+ MOVD (1*8)(R19), R0 // a1
+ MOVD (2*8)(R19), R1 // a2
+ MOVD (3*8)(R19), R2 // a3
+ MOVD $0, R3 // vararg
+
+ CALL R11
+
+ MOVD R0, (4*8)(R19) // r1
+ MOVD R1, (5*8)(R19) // r2
+
+ // Standard libc functions return -1 on error
+ // and set errno.
+ CMPW $-1, R0
+ BNE ok
+
+ // Get error code from libc.
+ CALL libc_errno(SB)
+ MOVW (R0), R0
+ MOVD R0, (6*8)(R19) // err
+
+ok:
RET
-// func runtime·setNonblock(int32 fd)
-TEXT runtime·setNonblock(SB),NOSPLIT|NOFRAME,$0-4
- MOVW fd+0(FP), R0 // arg 1 - fd
- MOVD $3, R1 // arg 2 - cmd (F_GETFL)
- MOVD $0, R2 // arg 3
- MOVD $92, R8 // sys_fcntl
- INVOKE_SYSCALL
- MOVD $4, R2 // O_NONBLOCK
- ORR R0, R2 // arg 3 - flags
- MOVW fd+0(FP), R0 // arg 1 - fd
- MOVD $4, R1 // arg 2 - cmd (F_SETFL)
- MOVD $92, R8 // sys_fcntl
- INVOKE_SYSCALL
+// syscallX calls a function in libc on behalf of the syscall package.
+// syscallX takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscallX must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscallX is like syscall but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscallX(SB),NOSPLIT,$0
+ MOVD R0, R19 // pointer to args
+
+ MOVD (0*8)(R19), R11 // fn
+ MOVD (1*8)(R19), R0 // a1
+ MOVD (2*8)(R19), R1 // a2
+ MOVD (3*8)(R19), R2 // a3
+ MOVD $0, R3 // vararg
+
+ CALL R11
+
+ MOVD R0, (4*8)(R19) // r1
+ MOVD R1, (5*8)(R19) // r2
+
+ // Standard libc functions return -1 on error
+ // and set errno.
+ CMP $-1, R0
+ BNE ok
+
+ // Get error code from libc.
+ CALL libc_errno(SB)
+ MOVW (R0), R0
+ MOVD R0, (6*8)(R19) // err
+
+ok:
+ RET
+
+// syscall6 calls a function in libc on behalf of the syscall package.
+// syscall6 takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall6 must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall6 expects a 32-bit result and tests for 32-bit -1
+// to decide there was an error.
+TEXT runtime·syscall6(SB),NOSPLIT,$0
+ MOVD R0, R19 // pointer to args
+
+ MOVD (0*8)(R19), R11 // fn
+ MOVD (1*8)(R19), R0 // a1
+ MOVD (2*8)(R19), R1 // a2
+ MOVD (3*8)(R19), R2 // a3
+ MOVD (4*8)(R19), R3 // a4
+ MOVD (5*8)(R19), R4 // a5
+ MOVD (6*8)(R19), R5 // a6
+ MOVD $0, R6 // vararg
+
+ CALL R11
+
+ MOVD R0, (7*8)(R19) // r1
+ MOVD R1, (8*8)(R19) // r2
+
+ // Standard libc functions return -1 on error
+ // and set errno.
+ CMPW $-1, R0
+ BNE ok
+
+ // Get error code from libc.
+ CALL libc_errno(SB)
+ MOVW (R0), R0
+ MOVD R0, (9*8)(R19) // err
+
+ok:
+ RET
+
+// syscall6X calls a function in libc on behalf of the syscall package.
+// syscall6X takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall6X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall6X is like syscall6 but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscall6X(SB),NOSPLIT,$0
+ MOVD R0, R19 // pointer to args
+
+ MOVD (0*8)(R19), R11 // fn
+ MOVD (1*8)(R19), R0 // a1
+ MOVD (2*8)(R19), R1 // a2
+ MOVD (3*8)(R19), R2 // a3
+ MOVD (4*8)(R19), R3 // a4
+ MOVD (5*8)(R19), R4 // a5
+ MOVD (6*8)(R19), R5 // a6
+ MOVD $0, R6 // vararg
+
+ CALL R11
+
+ MOVD R0, (7*8)(R19) // r1
+ MOVD R1, (8*8)(R19) // r2
+
+ // Standard libc functions return -1 on error
+ // and set errno.
+ CMP $-1, R0
+ BNE ok
+
+ // Get error code from libc.
+ CALL libc_errno(SB)
+ MOVW (R0), R0
+ MOVD R0, (9*8)(R19) // err
+
+ok:
+ RET
+
+// syscall10 calls a function in libc on behalf of the syscall package.
+// syscall10 takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// a7 uintptr
+// a8 uintptr
+// a9 uintptr
+// a10 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall10 must be called on the g0 stack with the
+// C calling convention (use libcCall).
+TEXT runtime·syscall10(SB),NOSPLIT,$0
+ MOVD R0, R19 // pointer to args
+
+ MOVD (0*8)(R19), R11 // fn
+ MOVD (1*8)(R19), R0 // a1
+ MOVD (2*8)(R19), R1 // a2
+ MOVD (3*8)(R19), R2 // a3
+ MOVD (4*8)(R19), R3 // a4
+ MOVD (5*8)(R19), R4 // a5
+ MOVD (6*8)(R19), R5 // a6
+ MOVD (7*8)(R19), R6 // a7
+ MOVD (8*8)(R19), R7 // a8
+ MOVD (9*8)(R19), R8 // a9
+ MOVD (10*8)(R19), R9 // a10
+ MOVD $0, R10 // vararg
+
+ CALL R11
+
+ MOVD R0, (11*8)(R19) // r1
+ MOVD R1, (12*8)(R19) // r2
+
+ // Standard libc functions return -1 on error
+ // and set errno.
+ CMPW $-1, R0
+ BNE ok
+
+ // Get error code from libc.
+ CALL libc_errno(SB)
+ MOVW (R0), R0
+ MOVD R0, (13*8)(R19) // err
+
+ok:
+ RET
+
+// syscall10X calls a function in libc on behalf of the syscall package.
+// syscall10X takes a pointer to a struct like:
+// struct {
+// fn uintptr
+// a1 uintptr
+// a2 uintptr
+// a3 uintptr
+// a4 uintptr
+// a5 uintptr
+// a6 uintptr
+// a7 uintptr
+// a8 uintptr
+// a9 uintptr
+// a10 uintptr
+// r1 uintptr
+// r2 uintptr
+// err uintptr
+// }
+// syscall10X must be called on the g0 stack with the
+// C calling convention (use libcCall).
+//
+// syscall10X is like syscall10 but expects a 64-bit result
+// and tests for 64-bit -1 to decide there was an error.
+TEXT runtime·syscall10X(SB),NOSPLIT,$0
+ MOVD R0, R19 // pointer to args
+
+ MOVD (0*8)(R19), R11 // fn
+ MOVD (1*8)(R19), R0 // a1
+ MOVD (2*8)(R19), R1 // a2
+ MOVD (3*8)(R19), R2 // a3
+ MOVD (4*8)(R19), R3 // a4
+ MOVD (5*8)(R19), R4 // a5
+ MOVD (6*8)(R19), R5 // a6
+ MOVD (7*8)(R19), R6 // a7
+ MOVD (8*8)(R19), R7 // a8
+ MOVD (9*8)(R19), R8 // a9
+ MOVD (10*8)(R19), R9 // a10
+ MOVD $0, R10 // vararg
+
+ CALL R11
+
+ MOVD R0, (11*8)(R19) // r1
+ MOVD R1, (12*8)(R19) // r2
+
+ // Standard libc functions return -1 on error
+ // and set errno.
+ CMP $-1, R0
+ BNE ok
+
+ // Get error code from libc.
+ CALL libc_errno(SB)
+ MOVW (R0), R0
+ MOVD R0, (13*8)(R19) // err
+
+ok:
RET
diff --git a/src/runtime/sys_wasm.go b/src/runtime/sys_wasm.go
index 9bf710ba0e..057ed4ccd9 100644
--- a/src/runtime/sys_wasm.go
+++ b/src/runtime/sys_wasm.go
@@ -27,13 +27,9 @@ func wasmTruncU()
func wasmExit(code int32)
// adjust Gobuf as it if executed a call to fn with context ctxt
-// and then did an immediate gosave.
+// and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
- if sys.RegSize > sys.PtrSize {
- sp -= sys.PtrSize
- *(*uintptr)(unsafe.Pointer(sp)) = 0
- }
sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
diff --git a/src/runtime/sys_windows_386.s b/src/runtime/sys_windows_386.s
index ef8a3dd3c2..4f00c58c16 100644
--- a/src/runtime/sys_windows_386.s
+++ b/src/runtime/sys_windows_386.s
@@ -66,11 +66,6 @@ TEXT runtime·getlasterror(SB),NOSPLIT,$0
MOVL AX, ret+0(FP)
RET
-TEXT runtime·setlasterror(SB),NOSPLIT,$0
- MOVL err+0(FP), AX
- MOVL AX, 0x34(FS)
- RET
-
// Called by Windows as a Vectored Exception Handler (VEH).
// First argument is pointer to struct containing
// exception record and context pointers.
@@ -99,7 +94,7 @@ TEXT sigtramp<>(SB),NOSPLIT,$0-0
JNE 2(PC)
CALL runtime·badsignal2(SB)
- // save g and SP in case of stack switch
+ // save g in case of stack switch
MOVL DX, 32(SP) // g
MOVL SP, 36(SP)
@@ -113,13 +108,9 @@ TEXT sigtramp<>(SB),NOSPLIT,$0-0
get_tls(BP)
MOVL BX, g(BP)
MOVL (g_sched+gobuf_sp)(BX), DI
- // make it look like mstart called us on g0, to stop traceback
- SUBL $4, DI
- MOVL $runtime·mstart(SB), 0(DI)
- // traceback will think that we've done SUBL
- // on this stack, so subtract them here to match.
- // (we need room for sighandler arguments anyway).
+ // make room for sighandler arguments
// and re-save old SP for restoring later.
+ // (note that the 36(DI) here must match the 36(SP) above.)
SUBL $40, DI
MOVL SP, 36(DI)
MOVL DI, SP
@@ -137,7 +128,7 @@ g0:
// switch back to original stack and g
// no-op if we never left.
MOVL 36(SP), SP
- MOVL 32(SP), DX
+ MOVL 32(SP), DX // note: different SP
get_tls(BP)
MOVL DX, g(BP)
@@ -183,7 +174,7 @@ TEXT runtime·profileloop(SB),NOSPLIT,$0
ADDL $12, SP
JMP CX
-TEXT runtime·externalthreadhandler(SB),NOSPLIT,$0
+TEXT runtime·externalthreadhandler(SB),NOSPLIT|TOPFRAME,$0
PUSHL BP
MOVL SP, BP
PUSHL BX
@@ -347,60 +338,11 @@ TEXT runtime·setldt(SB),NOSPLIT,$0
MOVL CX, 0x14(FS)
RET
-// onosstack calls fn on OS stack.
-// func onosstack(fn unsafe.Pointer, arg uint32)
-TEXT runtime·onosstack(SB),NOSPLIT,$0
- MOVL fn+0(FP), AX // to hide from 8l
- MOVL arg+4(FP), BX
-
- // Execute call on m->g0 stack, in case we are not actually
- // calling a system call wrapper, like when running under WINE.
- get_tls(CX)
- CMPL CX, $0
- JNE 3(PC)
- // Not a Go-managed thread. Do not switch stack.
- CALL AX
- RET
-
- MOVL g(CX), BP
- MOVL g_m(BP), BP
-
- // leave pc/sp for cpu profiler
- MOVL (SP), SI
- MOVL SI, m_libcallpc(BP)
- MOVL g(CX), SI
- MOVL SI, m_libcallg(BP)
- // sp must be the last, because once async cpu profiler finds
- // all three values to be non-zero, it will use them
- LEAL fn+0(FP), SI
- MOVL SI, m_libcallsp(BP)
-
- MOVL m_g0(BP), SI
- CMPL g(CX), SI
- JNE switch
- // executing on m->g0 already
- CALL AX
- JMP ret
-
-switch:
- // Switch to m->g0 stack and back.
- MOVL (g_sched+gobuf_sp)(SI), SI
- MOVL SP, -4(SI)
- LEAL -4(SI), SP
- CALL AX
- MOVL 0(SP), SP
-
-ret:
- get_tls(CX)
- MOVL g(CX), BP
- MOVL g_m(BP), BP
- MOVL $0, m_libcallsp(BP)
- RET
-
-// Runs on OS stack. duration (in 100ns units) is in BX.
-TEXT runtime·usleep2(SB),NOSPLIT,$20
- // Want negative 100ns units.
- NEGL BX
+// Runs on OS stack.
+// duration (in -100ns units) is in dt+0(FP).
+// g may be nil.
+TEXT runtime·usleep2(SB),NOSPLIT,$20-4
+ MOVL dt+0(FP), BX
MOVL $-1, hi-4(SP)
MOVL BX, lo-8(SP)
LEAL lo-8(SP), BX
@@ -413,17 +355,15 @@ TEXT runtime·usleep2(SB),NOSPLIT,$20
MOVL BP, SP
RET
-// Runs on OS stack. duration (in 100ns units) is in BX.
-TEXT runtime·usleep2HighRes(SB),NOSPLIT,$36
- get_tls(CX)
- CMPL CX, $0
- JE gisnotset
-
- // Want negative 100ns units.
- NEGL BX
+// Runs on OS stack.
+// duration (in -100ns units) is in dt+0(FP).
+// g is valid.
+TEXT runtime·usleep2HighRes(SB),NOSPLIT,$36-4
+ MOVL dt+0(FP), BX
MOVL $-1, hi-4(SP)
MOVL BX, lo-8(SP)
+ get_tls(CX)
MOVL g(CX), CX
MOVL g_m(CX), CX
MOVL (m_mOS+mOS_highResTimer)(CX), CX
@@ -452,12 +392,6 @@ TEXT runtime·usleep2HighRes(SB),NOSPLIT,$36
RET
-gisnotset:
- // TLS is not configured. Call usleep2 instead.
- MOVL $runtime·usleep2(SB), AX
- CALL AX
- RET
-
// Runs on OS stack.
TEXT runtime·switchtothread(SB),NOSPLIT,$0
MOVL SP, BP
diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s
index d1690cad58..aba2811e59 100644
--- a/src/runtime/sys_windows_amd64.s
+++ b/src/runtime/sys_windows_amd64.s
@@ -103,12 +103,6 @@ TEXT runtime·getlasterror(SB),NOSPLIT,$0
MOVL AX, ret+0(FP)
RET
-TEXT runtime·setlasterror(SB),NOSPLIT,$0
- MOVL err+0(FP), AX
- MOVQ 0x30(GS), CX
- MOVL AX, 0x68(CX)
- RET
-
// Called by Windows as a Vectored Exception Handler (VEH).
// First argument is pointer to struct containing
// exception record and context pointers.
@@ -157,16 +151,10 @@ TEXT sigtramp<>(SB),NOSPLIT|NOFRAME,$0-0
get_tls(BP)
MOVQ BX, g(BP)
MOVQ (g_sched+gobuf_sp)(BX), DI
- // make it look like mstart called us on g0, to stop traceback
- SUBQ $8, DI
- MOVQ $runtime·mstart(SB), SI
- MOVQ SI, 0(DI)
- // traceback will think that we've done PUSHFQ and SUBQ
- // on this stack, so subtract them here to match.
- // (we need room for sighandler arguments anyway).
+ // make room for sighandler arguments
// and re-save old SP for restoring later.
- SUBQ $(112+8), DI
- // save g, save old stack pointer.
+ // (note that the 104(DI) here must match the 104(SP) above.)
+ SUBQ $120, DI
MOVQ SP, 104(DI)
MOVQ DI, SP
@@ -227,7 +215,7 @@ TEXT runtime·profileloop(SB),NOSPLIT|NOFRAME,$8
CALL runtime·externalthreadhandler(SB)
RET
-TEXT runtime·externalthreadhandler(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·externalthreadhandler(SB),NOSPLIT|NOFRAME|TOPFRAME,$0
PUSHQ BP
MOVQ SP, BP
PUSHQ BX
@@ -388,61 +376,16 @@ TEXT runtime·settls(SB),NOSPLIT,$0
MOVQ DI, 0x28(GS)
RET
-// func onosstack(fn unsafe.Pointer, arg uint32)
-TEXT runtime·onosstack(SB),NOSPLIT,$0
- MOVQ fn+0(FP), AX // to hide from 6l
- MOVL arg+8(FP), BX
-
- // Execute call on m->g0 stack, in case we are not actually
- // calling a system call wrapper, like when running under WINE.
- get_tls(R15)
- CMPQ R15, $0
- JNE 3(PC)
- // Not a Go-managed thread. Do not switch stack.
- CALL AX
- RET
-
- MOVQ g(R15), R13
- MOVQ g_m(R13), R13
-
- // leave pc/sp for cpu profiler
- MOVQ (SP), R12
- MOVQ R12, m_libcallpc(R13)
- MOVQ g(R15), R12
- MOVQ R12, m_libcallg(R13)
- // sp must be the last, because once async cpu profiler finds
- // all three values to be non-zero, it will use them
- LEAQ fn+0(FP), R12
- MOVQ R12, m_libcallsp(R13)
-
- MOVQ m_g0(R13), R14
- CMPQ g(R15), R14
- JNE switch
- // executing on m->g0 already
- CALL AX
- JMP ret
-
-switch:
- // Switch to m->g0 stack and back.
- MOVQ (g_sched+gobuf_sp)(R14), R14
- MOVQ SP, -8(R14)
- LEAQ -8(R14), SP
- CALL AX
- MOVQ 0(SP), SP
-
-ret:
- MOVQ $0, m_libcallsp(R13)
- RET
-
-// Runs on OS stack. duration (in 100ns units) is in BX.
+// Runs on OS stack.
+// duration (in -100ns units) is in dt+0(FP).
+// g may be nil.
// The function leaves room for 4 syscall parameters
// (as per windows amd64 calling convention).
-TEXT runtime·usleep2(SB),NOSPLIT|NOFRAME,$48
+TEXT runtime·usleep2(SB),NOSPLIT|NOFRAME,$48-4
+ MOVLQSX dt+0(FP), BX
MOVQ SP, AX
ANDQ $~15, SP // alignment as per Windows requirement
MOVQ AX, 40(SP)
- // Want negative 100ns units.
- NEGQ BX
LEAQ 32(SP), R8 // ptime
MOVQ BX, (R8)
MOVQ $-1, CX // handle
@@ -452,11 +395,11 @@ TEXT runtime·usleep2(SB),NOSPLIT|NOFRAME,$48
MOVQ 40(SP), SP
RET
-// Runs on OS stack. duration (in 100ns units) is in BX.
-TEXT runtime·usleep2HighRes(SB),NOSPLIT|NOFRAME,$72
+// Runs on OS stack. duration (in -100ns units) is in dt+0(FP).
+// g is valid.
+TEXT runtime·usleep2HighRes(SB),NOSPLIT|NOFRAME,$72-4
+ MOVLQSX dt+0(FP), BX
get_tls(CX)
- CMPQ CX, $0
- JE gisnotset
MOVQ SP, AX
ANDQ $~15, SP // alignment as per Windows requirement
@@ -466,8 +409,6 @@ TEXT runtime·usleep2HighRes(SB),NOSPLIT|NOFRAME,$72
MOVQ g_m(CX), CX
MOVQ (m_mOS+mOS_highResTimer)(CX), CX // hTimer
MOVQ CX, 48(SP) // save hTimer for later
- // Want negative 100ns units.
- NEGQ BX
LEAQ 56(SP), DX // lpDueTime
MOVQ BX, (DX)
MOVQ $0, R8 // lPeriod
@@ -487,12 +428,6 @@ TEXT runtime·usleep2HighRes(SB),NOSPLIT|NOFRAME,$72
MOVQ 64(SP), SP
RET
-gisnotset:
- // TLS is not configured. Call usleep2 instead.
- MOVQ $runtime·usleep2(SB), AX
- CALL AX
- RET
-
// Runs on OS stack.
TEXT runtime·switchtothread(SB),NOSPLIT|NOFRAME,$0
MOVQ SP, AX
diff --git a/src/runtime/sys_windows_arm.s b/src/runtime/sys_windows_arm.s
index fe267080cc..cd230ccffd 100644
--- a/src/runtime/sys_windows_arm.s
+++ b/src/runtime/sys_windows_arm.s
@@ -6,6 +6,8 @@
#include "go_tls.h"
#include "textflag.h"
+// Note: For system ABI, R0-R3 are args, R4-R11 are callee-save.
+
// void runtime·asmstdcall(void *c);
TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
MOVM.DB.W [R4, R5, R14], (R13) // push {r4, r5, lr}
@@ -103,11 +105,6 @@ TEXT runtime·getlasterror(SB),NOSPLIT,$0
MOVW R0, ret+0(FP)
RET
-TEXT runtime·setlasterror(SB),NOSPLIT|NOFRAME,$0
- MRC 15, 0, R1, C13, C0, 2
- MOVW R0, 0x34(R1)
- RET
-
// Called by Windows as a Vectored Exception Handler (VEH).
// First argument is pointer to struct containing
// exception record and context pointers.
@@ -144,11 +141,10 @@ TEXT sigtramp<>(SB),NOSPLIT|NOFRAME,$0
MOVW (g_sched+gobuf_sp)(g), R3 // R3 = g->gobuf.sp
BL runtime·save_g(SB)
- // traceback will think that we've done PUSH and SUB
- // on this stack, so subtract them here to match.
- // (we need room for sighandler arguments anyway).
+ // make room for sighandler arguments
// and re-save old SP for restoring later.
- SUB $(40+8+20), R3
+ // (note that the 24(R3) here must match the 24(R13) above.)
+ SUB $40, R3
MOVW R13, 24(R3) // save old stack pointer
MOVW R3, R13 // switch stack
@@ -156,22 +152,14 @@ g0:
MOVW 0(R6), R2 // R2 = ExceptionPointers->ExceptionRecord
MOVW 4(R6), R3 // R3 = ExceptionPointers->ContextRecord
- // make it look like mstart called us on g0, to stop traceback
- MOVW $runtime·mstart(SB), R4
-
- MOVW R4, 0(R13) // Save link register for traceback
+ MOVW $0, R4
+ MOVW R4, 0(R13) // No saved link register.
MOVW R2, 4(R13) // Move arg0 (ExceptionRecord) into position
MOVW R3, 8(R13) // Move arg1 (ContextRecord) into position
MOVW R5, 12(R13) // Move arg2 (original g) into position
BL (R7) // Call the go routine
MOVW 16(R13), R4 // Fetch return value from stack
- // Compute the value of the g0 stack pointer after deallocating
- // this frame, then allocating 8 bytes. We may need to store
- // the resume SP and PC on the g0 stack to work around
- // control flow guard when we resume from the exception.
- ADD $(40+20), R13, R12
-
// switch back to original stack and g
MOVW 24(R13), R13
MOVW 20(R13), g
@@ -188,42 +176,45 @@ done:
BEQ return
// Check if we need to set up the control flow guard workaround.
- // On Windows/ARM, the stack pointer must lie within system
- // stack limits when we resume from exception.
+ // On Windows, the stack pointer in the context must lie within
+ // system stack limits when we resume from exception.
// Store the resume SP and PC on the g0 stack,
- // and return to returntramp on the g0 stack. returntramp
+ // and return to sigresume on the g0 stack. sigresume
// pops the saved PC and SP from the g0 stack, resuming execution
// at the desired location.
- // If returntramp has already been set up by a previous exception
+ // If sigresume has already been set up by a previous exception
// handler, don't clobber the stored SP and PC on the stack.
MOVW 4(R3), R3 // PEXCEPTION_POINTERS->Context
- MOVW 0x40(R3), R2 // load PC from context record
- MOVW $returntramp<>(SB), R1
+ MOVW context_pc(R3), R2 // load PC from context record
+ MOVW $sigresume<>(SB), R1
CMP R1, R2
B.EQ return // do not clobber saved SP/PC
- // Save resume SP and PC on g0 stack
- MOVW 0x38(R3), R2 // load SP from context record
- MOVW R2, 0(R12) // Store resume SP on g0 stack
- MOVW 0x40(R3), R2 // load PC from context record
- MOVW R2, 4(R12) // Store resume PC on g0 stack
+ // Save resume SP and PC into R0, R1.
+ MOVW context_spr(R3), R2
+ MOVW R2, context_r0(R3)
+ MOVW context_pc(R3), R2
+ MOVW R2, context_r1(R3)
- // Set up context record to return to returntramp on g0 stack
- MOVW R12, 0x38(R3) // save g0 stack pointer
- // in context record
- MOVW $returntramp<>(SB), R2 // save resume address
- MOVW R2, 0x40(R3) // in context record
+ // Set up context record to return to sigresume on g0 stack
+ MOVW R12, context_spr(R3)
+ MOVW $sigresume<>(SB), R2
+ MOVW R2, context_pc(R3)
return:
B (R14) // return
-//
// Trampoline to resume execution from exception handler.
// This is part of the control flow guard workaround.
// It switches stacks and jumps to the continuation address.
-//
-TEXT returntramp<>(SB),NOSPLIT|NOFRAME,$0
- MOVM.IA (R13), [R13, R15] // ldm sp, [sp, pc]
+// R0 and R1 are set above at the end of sigtramp<>
+// in the context that starts executing at sigresume<>.
+TEXT sigresume<>(SB),NOSPLIT|NOFRAME,$0
+ // Important: do not smash LR,
+ // which is set to a live value when handling
+ // a signal by pushing a call to sigpanic onto the stack.
+ MOVW R0, R13
+ B (R1)
TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·exceptionhandler(SB), R1
@@ -259,16 +250,17 @@ TEXT runtime·profileloop(SB),NOSPLIT|NOFRAME,$0
// +----------------+
// 12| argument (r0) |
//---+----------------+
-// 8 | param1 |
+// 8 | param1 | (also return value for called Go function)
// +----------------+
// 4 | param0 |
// +----------------+
-// 0 | retval |
+// 0 | slot for LR |
// +----------------+
//
-TEXT runtime·externalthreadhandler(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·externalthreadhandler(SB),NOSPLIT|NOFRAME|TOPFRAME,$0
MOVM.DB.W [R4-R11, R14], (R13) // push {r4-r11, lr}
SUB $(m__size + g__size + 20), R13 // space for locals
+ MOVW R14, 0(R13) // push LR again for anything unwinding the stack
MOVW R0, 12(R13)
MOVW R1, 16(R13)
@@ -307,7 +299,7 @@ TEXT runtime·externalthreadhandler(SB),NOSPLIT|NOFRAME,$0
MOVW $0, g
BL runtime·save_g(SB)
- MOVW 0(R13), R0 // load return value
+ MOVW 8(R13), R0 // load return value
ADD $(m__size + g__size + 20), R13 // free locals
MOVM.IA.W (R13), [R4-R11, R15] // pop {r4-r11, pc}
@@ -359,9 +351,6 @@ TEXT runtime·tstart_stdcall(SB),NOSPLIT|NOFRAME,$0
MOVW R0, g_m(g)
BL runtime·save_g(SB)
- // do per-thread TLS initialization
- BL init_thread_tls<>(SB)
-
// Layout new m scheduler stack on os stack.
MOVW R13, R0
MOVW R0, g_stack+stack_hi(g)
@@ -377,79 +366,11 @@ TEXT runtime·tstart_stdcall(SB),NOSPLIT|NOFRAME,$0
MOVW $0, R0
MOVM.IA.W (R13), [R4-R11, R15] // pop {r4-r11, pc}
-// onosstack calls fn on OS stack.
-// adapted from asm_arm.s : systemstack
-// func onosstack(fn unsafe.Pointer, arg uint32)
-TEXT runtime·onosstack(SB),NOSPLIT,$0
- MOVW fn+0(FP), R5 // R5 = fn
- MOVW arg+4(FP), R6 // R6 = arg
-
- // This function can be called when there is no g,
- // for example, when we are handling a callback on a non-go thread.
- // In this case we're already on the system stack.
- CMP $0, g
- BEQ noswitch
-
- MOVW g_m(g), R1 // R1 = m
-
- MOVW m_gsignal(R1), R2 // R2 = gsignal
- CMP g, R2
- B.EQ noswitch
-
- MOVW m_g0(R1), R2 // R2 = g0
- CMP g, R2
- B.EQ noswitch
-
- MOVW m_curg(R1), R3
- CMP g, R3
- B.EQ switch
-
- // Bad: g is not gsignal, not g0, not curg. What is it?
- // Hide call from linker nosplit analysis.
- MOVW $runtime·badsystemstack(SB), R0
- BL (R0)
- B runtime·abort(SB)
-
-switch:
- // save our state in g->sched. Pretend to
- // be systemstack_switch if the G stack is scanned.
- MOVW $runtime·systemstack_switch(SB), R3
- ADD $4, R3, R3 // get past push {lr}
- MOVW R3, (g_sched+gobuf_pc)(g)
- MOVW R13, (g_sched+gobuf_sp)(g)
- MOVW LR, (g_sched+gobuf_lr)(g)
- MOVW g, (g_sched+gobuf_g)(g)
-
- // switch to g0
- MOVW R2, g
- MOVW (g_sched+gobuf_sp)(R2), R3
- // make it look like mstart called systemstack on g0, to stop traceback
- SUB $4, R3, R3
- MOVW $runtime·mstart(SB), R4
- MOVW R4, 0(R3)
- MOVW R3, R13
-
- // call target function
- MOVW R6, R0 // arg
- BL (R5)
-
- // switch back to g
- MOVW g_m(g), R1
- MOVW m_curg(R1), g
- MOVW (g_sched+gobuf_sp)(g), R13
- MOVW $0, R3
- MOVW R3, (g_sched+gobuf_sp)(g)
- RET
-
-noswitch:
- // Using a tail call here cleans up tracebacks since we won't stop
- // at an intermediate systemstack.
- MOVW.P 4(R13), R14 // restore LR
- MOVW R6, R0 // arg
- B (R5)
-
-// Runs on OS stack. Duration (in 100ns units) is in R0.
-TEXT runtime·usleep2(SB),NOSPLIT|NOFRAME,$0
+// Runs on OS stack.
+// duration (in -100ns units) is in dt+0(FP).
+// g may be nil.
+TEXT runtime·usleep2(SB),NOSPLIT|NOFRAME,$0-4
+ MOVW dt+0(FP), R0
MOVM.DB.W [R4, R14], (R13) // push {r4, lr}
MOVW R13, R4 // Save SP
SUB $8, R13 // R13 = R13 - 8
@@ -465,9 +386,11 @@ TEXT runtime·usleep2(SB),NOSPLIT|NOFRAME,$0
MOVW R4, R13 // Restore SP
MOVM.IA.W (R13), [R4, R15] // pop {R4, pc}
-// Runs on OS stack. Duration (in 100ns units) is in R0.
+// Runs on OS stack.
+// duration (in -100ns units) is in dt+0(FP).
+// g is valid.
// TODO: neeeds to be implemented properly.
-TEXT runtime·usleep2HighRes(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·usleep2HighRes(SB),NOSPLIT|NOFRAME,$0-4
B runtime·abort(SB)
// Runs on OS stack.
@@ -497,7 +420,7 @@ TEXT runtime·read_tls_fallback(SB),NOSPLIT|NOFRAME,$0
#define time_hi1 4
#define time_hi2 8
-TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
+TEXT runtime·nanotime1(SB),NOSPLIT|NOFRAME,$0-8
MOVW $0, R0
MOVB runtime·useQPCTime(SB), R0
CMP $0, R0
@@ -521,9 +444,8 @@ loop:
RET
useQPC:
B runtime·nanotimeQPC(SB) // tail call
- RET
-TEXT time·now(SB),NOSPLIT,$0-20
+TEXT time·now(SB),NOSPLIT|NOFRAME,$0-20
MOVW $0, R0
MOVB runtime·useQPCTime(SB), R0
CMP $0, R0
@@ -597,8 +519,7 @@ wall:
MOVW R1,nsec+8(FP)
RET
useQPC:
- B runtime·nanotimeQPC(SB) // tail call
- RET
+ B runtime·nowQPC(SB) // tail call
// save_g saves the g register (R10) into thread local memory
// so that we can call externally compiled
@@ -656,39 +577,8 @@ TEXT runtime·_initcgo(SB),NOSPLIT|NOFRAME,$0
MOVW $runtime·tls_g(SB), R1
MOVW R0, (R1)
- BL init_thread_tls<>(SB)
-
MOVW R4, R13
MOVM.IA.W (R13), [R4, R15] // pop {r4, pc}
-// void init_thread_tls()
-//
-// Does per-thread TLS initialization. Saves a pointer to the TLS slot
-// holding G, in the current m.
-//
-// g->m->tls[0] = &_TEB->TlsSlots[tls_g]
-//
-// The purpose of this is to enable the profiling handler to get the
-// current g associated with the thread. We cannot use m->curg because curg
-// only holds the current user g. If the thread is executing system code or
-// external code, m->curg will be NULL. The thread's TLS slot always holds
-// the current g, so save a reference to this location so the profiling
-// handler can get the real g from the thread's m.
-//
-// Clobbers R0-R3
-TEXT init_thread_tls<>(SB),NOSPLIT|NOFRAME,$0
- // compute &_TEB->TlsSlots[tls_g]
- MRC 15, 0, R0, C13, C0, 2
- ADD $0xe10, R0
- MOVW $runtime·tls_g(SB), R1
- MOVW (R1), R1
- MOVW R1<<2, R1
- ADD R1, R0
-
- // save in g->m->tls[0]
- MOVW g_m(g), R1
- MOVW R0, m_tls(R1)
- RET
-
// Holds the TLS Slot, which was allocated by TlsAlloc()
GLOBL runtime·tls_g+0(SB), NOPTR, $4
diff --git a/src/runtime/sys_windows_arm64.s b/src/runtime/sys_windows_arm64.s
new file mode 100644
index 0000000000..53960488f9
--- /dev/null
+++ b/src/runtime/sys_windows_arm64.s
@@ -0,0 +1,579 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "textflag.h"
+#include "funcdata.h"
+
+// Offsets into Thread Environment Block (pointer in R18)
+#define TEB_error 0x68
+#define TEB_TlsSlots 0x1480
+
+// Note: R0-R7 are args, R8 is indirect return value address,
+// R9-R15 are caller-save, R19-R29 are callee-save.
+//
+// load_g and save_g (in tls_arm64.s) clobber R27 (REGTMP) and R0.
+
+// void runtime·asmstdcall(void *c);
+TEXT runtime·asmstdcall(SB),NOSPLIT|NOFRAME,$0
+ STP.W (R29, R30), -32(RSP) // allocate C ABI stack frame
+ STP (R19, R20), 16(RSP) // save old R19, R20
+ MOVD R0, R19 // save libcall pointer
+ MOVD RSP, R20 // save stack pointer
+
+ // SetLastError(0)
+ MOVD $0, TEB_error(R18_PLATFORM)
+ MOVD libcall_args(R19), R12 // libcall->args
+
+ // Do we have more than 8 arguments?
+ MOVD libcall_n(R19), R0
+ CMP $0, R0; BEQ _0args
+ CMP $1, R0; BEQ _1args
+ CMP $2, R0; BEQ _2args
+ CMP $3, R0; BEQ _3args
+ CMP $4, R0; BEQ _4args
+ CMP $5, R0; BEQ _5args
+ CMP $6, R0; BEQ _6args
+ CMP $7, R0; BEQ _7args
+ CMP $8, R0; BEQ _8args
+
+ // Reserve stack space for remaining args
+ SUB $8, R0, R2
+ ADD $1, R2, R3 // make even number of words for stack alignment
+ AND $~1, R3
+ LSL $3, R3
+ SUB R3, RSP
+
+ // R4: size of stack arguments (n-8)*8
+ // R5: &args[8]
+ // R6: loop counter, from 0 to (n-8)*8
+ // R7: scratch
+ // R8: copy of RSP - (R2)(RSP) assembles as (R2)(ZR)
+ SUB $8, R0, R4
+ LSL $3, R4
+ ADD $(8*8), R12, R5
+ MOVD $0, R6
+ MOVD RSP, R8
+stackargs:
+ MOVD (R6)(R5), R7
+ MOVD R7, (R6)(R8)
+ ADD $8, R6
+ CMP R6, R4
+ BNE stackargs
+
+_8args:
+ MOVD (7*8)(R12), R7
+_7args:
+ MOVD (6*8)(R12), R6
+_6args:
+ MOVD (5*8)(R12), R5
+_5args:
+ MOVD (4*8)(R12), R4
+_4args:
+ MOVD (3*8)(R12), R3
+_3args:
+ MOVD (2*8)(R12), R2
+_2args:
+ MOVD (1*8)(R12), R1
+_1args:
+ MOVD (0*8)(R12), R0
+_0args:
+
+ MOVD libcall_fn(R19), R12 // branch to libcall->fn
+ BL (R12)
+
+ MOVD R20, RSP // free stack space
+ MOVD R0, libcall_r1(R19) // save return value to libcall->r1
+ // TODO(rsc) floating point like amd64 in libcall->r2?
+
+ // GetLastError
+ MOVD TEB_error(R18_PLATFORM), R0
+ MOVD R0, libcall_err(R19)
+
+ // Restore callee-saved registers.
+ LDP 16(RSP), (R19, R20)
+ LDP.P 32(RSP), (R29, R30)
+ RET
+
+TEXT runtime·badsignal2(SB),NOSPLIT,$16-0
+ NO_LOCAL_POINTERS
+
+ // stderr
+ MOVD runtime·_GetStdHandle(SB), R1
+ MOVD $-12, R0
+ SUB $16, RSP // skip over saved frame pointer below RSP
+ BL (R1)
+ ADD $16, RSP
+
+ // handle in R0 already
+ MOVD $runtime·badsignalmsg(SB), R1 // lpBuffer
+ MOVD $runtime·badsignallen(SB), R2 // lpNumberOfBytesToWrite
+ MOVD (R2), R2
+ MOVD R13, R3 // lpNumberOfBytesWritten
+ MOVD $0, R4 // lpOverlapped
+ MOVD runtime·_WriteFile(SB), R12
+ SUB $16, RSP // skip over saved frame pointer below RSP
+ BL (R12)
+ ADD $16, RSP
+
+ RET
+
+TEXT runtime·getlasterror(SB),NOSPLIT|NOFRAME,$0
+ MOVD TEB_error(R18_PLATFORM), R0
+ MOVD R0, ret+0(FP)
+ RET
+
+#define SAVE_R19_TO_R28(offset) \
+ MOVD R19, savedR19+((offset)+0*8)(SP); \
+ MOVD R20, savedR20+((offset)+1*8)(SP); \
+ MOVD R21, savedR21+((offset)+2*8)(SP); \
+ MOVD R22, savedR22+((offset)+3*8)(SP); \
+ MOVD R23, savedR23+((offset)+4*8)(SP); \
+ MOVD R24, savedR24+((offset)+5*8)(SP); \
+ MOVD R25, savedR25+((offset)+6*8)(SP); \
+ MOVD R26, savedR26+((offset)+7*8)(SP); \
+ MOVD R27, savedR27+((offset)+8*8)(SP); \
+ MOVD g, savedR28+((offset)+9*8)(SP);
+
+#define RESTORE_R19_TO_R28(offset) \
+ MOVD savedR19+((offset)+0*8)(SP), R19; \
+ MOVD savedR20+((offset)+1*8)(SP), R20; \
+ MOVD savedR21+((offset)+2*8)(SP), R21; \
+ MOVD savedR22+((offset)+3*8)(SP), R22; \
+ MOVD savedR23+((offset)+4*8)(SP), R23; \
+ MOVD savedR24+((offset)+5*8)(SP), R24; \
+ MOVD savedR25+((offset)+6*8)(SP), R25; \
+ MOVD savedR26+((offset)+7*8)(SP), R26; \
+ MOVD savedR27+((offset)+8*8)(SP), R27; \
+ MOVD savedR28+((offset)+9*8)(SP), g; /* R28 */
+
+// Called by Windows as a Vectored Exception Handler (VEH).
+// First argument is pointer to struct containing
+// exception record and context pointers.
+// Handler function is stored in R1
+// Return 0 for 'not handled', -1 for handled.
+// int32_t sigtramp(
+// PEXCEPTION_POINTERS ExceptionInfo,
+// func *GoExceptionHandler);
+TEXT sigtramp<>(SB),NOSPLIT|NOFRAME,$0
+ // Save R0, R1 (args) as well as LR, R27, R28 (callee-save).
+ MOVD R0, R5
+ MOVD R1, R6
+ MOVD LR, R7
+ MOVD R27, R16 // saved R27 (callee-save)
+ MOVD g, R17 // saved R28 (callee-save from Windows, not really g)
+
+ BL runtime·load_g(SB) // smashes R0, R27, R28 (g)
+ CMP $0, g // is there a current g?
+ BNE 2(PC)
+ BL runtime·badsignal2(SB)
+
+ // Do we need to switch to the g0 stack?
+ MOVD g, R3 // R3 = oldg (for sigtramp_g0)
+ MOVD g_m(g), R2 // R2 = m
+ MOVD m_g0(R2), R2 // R2 = g0
+ CMP g, R2 // if curg == g0
+ BNE switch
+
+ // No: on g0 stack already, tail call to sigtramp_g0.
+ // Restore all the callee-saves so sigtramp_g0 can return to our caller.
+ // We also pass R2 = g0, R3 = oldg, both set above.
+ MOVD R5, R0
+ MOVD R6, R1
+ MOVD R7, LR
+ MOVD R16, R27 // restore R27
+ MOVD R17, g // restore R28
+ B sigtramp_g0<>(SB)
+
+switch:
+ // switch to g0 stack (but do not update g - that's sigtramp_g0's job)
+ MOVD RSP, R8
+ MOVD (g_sched+gobuf_sp)(R2), R4 // R4 = g->gobuf.sp
+ SUB $(6*8), R4 // alloc space for saves - 2 words below SP for frame pointer, 3 for us to use, 1 for alignment
+ MOVD R4, RSP // switch to g0 stack
+
+ MOVD $0, (0*8)(RSP) // fake saved LR
+ MOVD R7, (1*8)(RSP) // saved LR
+ MOVD R8, (2*8)(RSP) // saved SP
+
+ MOVD R5, R0 // original args
+ MOVD R6, R1 // original args
+ MOVD R16, R27
+ MOVD R17, g // R28
+ BL sigtramp_g0<>(SB)
+
+ // switch back to original stack; g already updated
+ MOVD (1*8)(RSP), R7 // saved LR
+ MOVD (2*8)(RSP), R8 // saved SP
+ MOVD R7, LR
+ MOVD R8, RSP
+ RET
+
+// sigtramp_g0 is running on the g0 stack, with R2 = g0, R3 = oldg.
+// But g itself is not set - that's R28, a callee-save register,
+// and it still holds the value from the Windows DLL caller.
+TEXT sigtramp_g0<>(SB),NOSPLIT,$128
+ NO_LOCAL_POINTERS
+
+ // Push C callee-save registers R19-R28. LR, FP already saved.
+ SAVE_R19_TO_R28(-10*8)
+
+ MOVD 0(R0), R5 // R5 = ExceptionPointers->ExceptionRecord
+ MOVD 8(R0), R6 // R6 = ExceptionPointers->ContextRecord
+ MOVD R6, context-(11*8)(SP)
+
+ MOVD R2, g // g0
+ BL runtime·save_g(SB) // smashes R0
+
+ MOVD R5, (1*8)(RSP) // arg0 (ExceptionRecord)
+ MOVD R6, (2*8)(RSP) // arg1 (ContextRecord)
+ MOVD R3, (3*8)(RSP) // arg2 (original g)
+ MOVD R3, oldg-(12*8)(SP)
+ BL (R1)
+ MOVD oldg-(12*8)(SP), g
+ BL runtime·save_g(SB) // smashes R0
+ MOVW (4*8)(RSP), R0 // return value (0 or -1)
+
+ // if return value is CONTINUE_SEARCH, do not set up control
+ // flow guard workaround
+ CMP $0, R0
+ BEQ return
+
+ // Check if we need to set up the control flow guard workaround.
+ // On Windows, the stack pointer in the context must lie within
+ // system stack limits when we resume from exception.
+ // Store the resume SP and PC in alternate registers
+ // and return to sigresume on the g0 stack.
+ // sigresume makes no use of the stack at all,
+ // loading SP from R0 and jumping to R1.
+ // Note that smashing R0 and R1 is only safe because we know sigpanic
+ // will not actually return to the original frame, so the registers
+ // are effectively dead. But this does mean we can't use the
+ // same mechanism for async preemption.
+ MOVD context-(11*8)(SP), R6
+ MOVD context_pc(R6), R2 // load PC from context record
+ MOVD $sigresume<>(SB), R1
+
+ CMP R1, R2
+ BEQ return // do not clobber saved SP/PC
+
+ // Save resume SP and PC into R0, R1.
+ MOVD context_xsp(R6), R2
+ MOVD R2, (context_x+0*8)(R6)
+ MOVD context_pc(R6), R2
+ MOVD R2, (context_x+1*8)(R6)
+
+ // Set up context record to return to sigresume on g0 stack
+ MOVD RSP, R2
+ MOVD R2, context_xsp(R6)
+ MOVD $sigresume<>(SB), R2
+ MOVD R2, context_pc(R6)
+
+return:
+ RESTORE_R19_TO_R28(-10*8) // smashes g
+ RET
+
+// Trampoline to resume execution from exception handler.
+// This is part of the control flow guard workaround.
+// It switches stacks and jumps to the continuation address.
+// R0 and R1 are set above at the end of sigtramp<>
+// in the context that starts executing at sigresume<>.
+TEXT sigresume<>(SB),NOSPLIT|NOFRAME,$0
+ // Important: do not smash LR,
+ // which is set to a live value when handling
+ // a signal by pushing a call to sigpanic onto the stack.
+ MOVD R0, RSP
+ B (R1)
+
+TEXT runtime·exceptiontramp(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·exceptionhandler(SB), R1
+ B sigtramp<>(SB)
+
+TEXT runtime·firstcontinuetramp(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·firstcontinuehandler(SB), R1
+ B sigtramp<>(SB)
+
+TEXT runtime·lastcontinuetramp(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·lastcontinuehandler(SB), R1
+ B sigtramp<>(SB)
+
+TEXT runtime·ctrlhandler(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·ctrlhandler1(SB), R1
+ B runtime·externalthreadhandler(SB)
+
+TEXT runtime·profileloop(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·profileloop1(SB), R1
+ B runtime·externalthreadhandler(SB)
+
+// externalthreadhander called with R0 = uint32 arg, R1 = Go function f.
+// Need to call f(arg), which returns a uint32, and return it in R0.
+TEXT runtime·externalthreadhandler(SB),NOSPLIT|TOPFRAME,$96-0
+ NO_LOCAL_POINTERS
+
+ // Push C callee-save registers R19-R28. LR, FP already saved.
+ SAVE_R19_TO_R28(-10*8)
+
+ // Allocate space for args, saved R0+R1, g, and m structures.
+ // Hide from nosplit check.
+ #define extra ((64+g__size+m__size+15)&~15)
+ SUB $extra, RSP, R2 // hide from nosplit overflow check
+ MOVD R2, RSP
+
+ // Save R0 and R1 (our args).
+ MOVD R0, 32(RSP)
+ MOVD R1, 40(RSP)
+
+ // Zero out m and g structures.
+ MOVD $64(RSP), R0
+ MOVD R0, 8(RSP)
+ MOVD $(m__size + g__size), R0
+ MOVD R0, 16(RSP)
+ MOVD $0, 0(RSP) // not-saved LR
+ BL runtime·memclrNoHeapPointers(SB)
+
+ // Initialize m and g structures.
+ MOVD $64(RSP), g
+ MOVD $g__size(g), R3 // m
+ MOVD R3, g_m(g) // g->m = m
+ MOVD g, m_g0(R3) // m->g0 = g
+ MOVD g, m_curg(R3) // m->curg = g
+ MOVD RSP, R0
+ MOVD R0, g_stack+stack_hi(g)
+ SUB $(32*1024), R0
+ MOVD R0, (g_stack+stack_lo)(g)
+ MOVD R0, g_stackguard0(g)
+ MOVD R0, g_stackguard1(g)
+ BL runtime·save_g(SB)
+
+ // Call function.
+ MOVD 32(RSP), R0
+ MOVD 40(RSP), R1
+ MOVW R0, 8(RSP)
+ BL (R1)
+
+ // Clear g.
+ MOVD $0, g
+ BL runtime·save_g(SB)
+
+ // Load return value (save_g would have smashed)
+ MOVW (2*8)(RSP), R0
+
+ ADD $extra, RSP, R2
+ MOVD R2, RSP
+ #undef extra
+
+ RESTORE_R19_TO_R28(-10*8)
+ RET
+
+GLOBL runtime·cbctxts(SB), NOPTR, $4
+
+TEXT runtime·callbackasm1(SB),NOSPLIT,$208-0
+ NO_LOCAL_POINTERS
+
+ // On entry, the trampoline in zcallback_windows_arm64.s left
+ // the callback index in R12 (which is volatile in the C ABI).
+
+ // Save callback register arguments R0-R7.
+ // We do this at the top of the frame so they're contiguous with stack arguments.
+ MOVD R0, arg0-(8*8)(SP)
+ MOVD R1, arg1-(7*8)(SP)
+ MOVD R2, arg2-(6*8)(SP)
+ MOVD R3, arg3-(5*8)(SP)
+ MOVD R4, arg4-(4*8)(SP)
+ MOVD R5, arg5-(3*8)(SP)
+ MOVD R6, arg6-(2*8)(SP)
+ MOVD R7, arg7-(1*8)(SP)
+
+ // Push C callee-save registers R19-R28.
+ // LR, FP already saved.
+ SAVE_R19_TO_R28(-18*8)
+
+ // Create a struct callbackArgs on our stack.
+ MOVD $cbargs-(18*8+callbackArgs__size)(SP), R13
+ MOVD R12, callbackArgs_index(R13) // callback index
+ MOVD $arg0-(8*8)(SP), R0
+ MOVD R0, callbackArgs_args(R13) // address of args vector
+ MOVD $0, R0
+ MOVD R0, callbackArgs_result(R13) // result
+
+ // Call cgocallback, which will call callbackWrap(frame).
+ MOVD $·callbackWrap(SB), R0 // PC of function to call
+ MOVD R13, R1 // frame (&callbackArgs{...})
+ MOVD $0, R2 // context
+ MOVD R0, (1*8)(RSP)
+ MOVD R1, (2*8)(RSP)
+ MOVD R2, (3*8)(RSP)
+ BL runtime·cgocallback(SB)
+
+ // Get callback result.
+ MOVD $cbargs-(18*8+callbackArgs__size)(SP), R13
+ MOVD callbackArgs_result(R13), R0
+
+ RESTORE_R19_TO_R28(-18*8)
+
+ RET
+
+// uint32 tstart_stdcall(M *newm);
+TEXT runtime·tstart_stdcall(SB),NOSPLIT,$96-0
+ SAVE_R19_TO_R28(-10*8)
+
+ MOVD m_g0(R0), g
+ MOVD R0, g_m(g)
+ BL runtime·save_g(SB)
+
+ // Set up stack guards for OS stack.
+ MOVD RSP, R0
+ MOVD R0, g_stack+stack_hi(g)
+ SUB $(64*1024), R0
+ MOVD R0, (g_stack+stack_lo)(g)
+ MOVD R0, g_stackguard0(g)
+ MOVD R0, g_stackguard1(g)
+
+ BL runtime·emptyfunc(SB) // fault if stack check is wrong
+ BL runtime·mstart(SB)
+
+ RESTORE_R19_TO_R28(-10*8)
+
+ // Exit the thread.
+ MOVD $0, R0
+ RET
+
+// Runs on OS stack.
+// duration (in -100ns units) is in dt+0(FP).
+// g may be nil.
+TEXT runtime·usleep2(SB),NOSPLIT,$32-4
+ MOVW dt+0(FP), R0
+ MOVD $16(RSP), R2 // R2 = pTime
+ MOVD R0, 0(R2) // *pTime = -dt
+ MOVD $-1, R0 // R0 = handle
+ MOVD $0, R1 // R1 = FALSE (alertable)
+ MOVD runtime·_NtWaitForSingleObject(SB), R3
+ SUB $16, RSP // skip over saved frame pointer below RSP
+ BL (R3)
+ ADD $16, RSP
+ RET
+
+// Runs on OS stack.
+// duration (in -100ns units) is in dt+0(FP).
+// g is valid.
+// TODO: neeeds to be implemented properly.
+TEXT runtime·usleep2HighRes(SB),NOSPLIT,$0-4
+ B runtime·abort(SB)
+
+// Runs on OS stack.
+TEXT runtime·switchtothread(SB),NOSPLIT,$16-0
+ MOVD runtime·_SwitchToThread(SB), R0
+ SUB $16, RSP // skip over saved frame pointer below RSP
+ BL (R0)
+ ADD $16, RSP
+ RET
+
+// See http://www.dcl.hpi.uni-potsdam.de/research/WRK/2007/08/getting-os-information-the-kuser_shared_data-structure/
+// Must read hi1, then lo, then hi2. The snapshot is valid if hi1 == hi2.
+#define _INTERRUPT_TIME 0x7ffe0008
+#define _SYSTEM_TIME 0x7ffe0014
+#define time_lo 0
+#define time_hi1 4
+#define time_hi2 8
+
+TEXT runtime·nanotime1(SB),NOSPLIT|NOFRAME,$0-8
+ MOVB runtime·useQPCTime(SB), R0
+ CMP $0, R0
+ BNE useQPC
+ MOVD $_INTERRUPT_TIME, R3
+loop:
+ MOVWU time_hi1(R3), R1
+ MOVWU time_lo(R3), R0
+ MOVWU time_hi2(R3), R2
+ CMP R1, R2
+ BNE loop
+
+ // wintime = R1:R0, multiply by 100
+ ORR R1<<32, R0
+ MOVD $100, R1
+ MUL R1, R0
+ MOVD R0, ret+0(FP)
+ RET
+useQPC:
+ B runtime·nanotimeQPC(SB) // tail call
+
+TEXT time·now(SB),NOSPLIT|NOFRAME,$0-24
+ MOVB runtime·useQPCTime(SB), R0
+ CMP $0, R0
+ BNE useQPC
+ MOVD $_INTERRUPT_TIME, R3
+loop:
+ MOVWU time_hi1(R3), R1
+ MOVWU time_lo(R3), R0
+ MOVWU time_hi2(R3), R2
+ CMP R1, R2
+ BNE loop
+
+ // wintime = R1:R0, multiply by 100
+ ORR R1<<32, R0
+ MOVD $100, R1
+ MUL R1, R0
+ MOVD R0, mono+16(FP)
+
+ MOVD $_SYSTEM_TIME, R3
+wall:
+ MOVWU time_hi1(R3), R1
+ MOVWU time_lo(R3), R0
+ MOVWU time_hi2(R3), R2
+ CMP R1, R2
+ BNE wall
+
+ // w = R1:R0 in 100ns units
+ // convert to Unix epoch (but still 100ns units)
+ #define delta 116444736000000000
+ ORR R1<<32, R0
+ SUB $delta, R0
+
+ // Convert to nSec
+ MOVD $100, R1
+ MUL R1, R0
+
+ // Code stolen from compiler output for:
+ //
+ // var x uint64
+ // func f() (sec uint64, nsec uint32) { return x / 1000000000, uint32(x % 100000000) }
+ //
+ LSR $1, R0, R1
+ MOVD $-8543223759426509416, R2
+ UMULH R2, R1, R1
+ LSR $28, R1, R1
+ MOVD R1, sec+0(FP)
+ MOVD $-6067343680855748867, R1
+ UMULH R0, R1, R1
+ LSR $26, R1, R1
+ MOVD $100000000, R2
+ MSUB R1, R0, R2, R0
+ MOVW R0, nsec+8(FP)
+ RET
+useQPC:
+ B runtime·nowQPC(SB) // tail call
+
+// This is called from rt0_go, which runs on the system stack
+// using the initial stack allocated by the OS.
+// It calls back into standard C using the BL below.
+TEXT runtime·wintls(SB),NOSPLIT,$0
+ // Allocate a TLS slot to hold g across calls to external code
+ MOVD runtime·_TlsAlloc(SB), R0
+ SUB $16, RSP // skip over saved frame pointer below RSP
+ BL (R0)
+ ADD $16, RSP
+
+ // Assert that slot is less than 64 so we can use _TEB->TlsSlots
+ CMP $64, R0
+ BLT ok
+ MOVD $runtime·abort(SB), R1
+ BL (R1)
+ok:
+
+ // Save offset from R18 into tls_g.
+ LSL $3, R1
+ ADD $TEB_TlsSlots, R1
+ MOVD R1, runtime·tls_g(SB)
+ RET
diff --git a/src/runtime/sys_x86.go b/src/runtime/sys_x86.go
index f917cb8bd7..8f21585d28 100644
--- a/src/runtime/sys_x86.go
+++ b/src/runtime/sys_x86.go
@@ -12,13 +12,9 @@ import (
)
// adjust Gobuf as if it executed a call to fn with context ctxt
-// and then did an immediate gosave.
+// and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
sp := buf.sp
- if sys.RegSize > sys.PtrSize {
- sp -= sys.PtrSize
- *(*uintptr)(unsafe.Pointer(sp)) = 0
- }
sp -= sys.PtrSize
*(*uintptr)(unsafe.Pointer(sp)) = buf.pc
buf.sp = sp
diff --git a/src/runtime/syscall2_solaris.go b/src/runtime/syscall2_solaris.go
index e098e8006a..3310489202 100644
--- a/src/runtime/syscall2_solaris.go
+++ b/src/runtime/syscall2_solaris.go
@@ -15,7 +15,6 @@ import _ "unsafe" // for go:linkname
//go:cgo_import_dynamic libc_gethostname gethostname "libc.so"
//go:cgo_import_dynamic libc_getpid getpid "libc.so"
//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
-//go:cgo_import_dynamic libc_pipe pipe "libc.so"
//go:cgo_import_dynamic libc_setgid setgid "libc.so"
//go:cgo_import_dynamic libc_setgroups setgroups "libc.so"
//go:cgo_import_dynamic libc_setsid setsid "libc.so"
@@ -33,7 +32,6 @@ import _ "unsafe" // for go:linkname
//go:linkname libc_gethostname libc_gethostname
//go:linkname libc_getpid libc_getpid
//go:linkname libc_ioctl libc_ioctl
-//go:linkname libc_pipe libc_pipe
//go:linkname libc_setgid libc_setgid
//go:linkname libc_setgroups libc_setgroups
//go:linkname libc_setsid libc_setsid
diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go
index 7835b492f7..7cf9318bdb 100644
--- a/src/runtime/syscall_windows.go
+++ b/src/runtime/syscall_windows.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "internal/abi"
"runtime/internal/sys"
"unsafe"
)
@@ -70,8 +71,8 @@ func callbackasmAddr(i int) uintptr {
panic("unsupported architecture")
case "386", "amd64":
entrySize = 5
- case "arm":
- // On ARM, each entry is a MOV instruction
+ case "arm", "arm64":
+ // On ARM and ARM64, each entry is a MOV instruction
// followed by a branch instruction
entrySize = 8
}
@@ -115,13 +116,14 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) {
// registers and the stack.
panic("compileCallback: argument size is larger than uintptr")
}
- if k := t.kind & kindMask; (GOARCH == "amd64" || GOARCH == "arm") && (k == kindFloat32 || k == kindFloat64) {
+ if k := t.kind & kindMask; GOARCH != "386" && (k == kindFloat32 || k == kindFloat64) {
// In fastcall, floating-point arguments in
// the first four positions are passed in
// floating-point registers, which we don't
// currently spill. arm passes floating-point
// arguments in VFP registers, which we also
// don't support.
+ // So basically we only support 386.
panic("compileCallback: float arguments not supported")
}
@@ -146,6 +148,7 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) {
}
// cdecl, stdcall, fastcall, and arm pad arguments to word size.
+ // TODO(rsc): On arm and arm64 do we need to skip the caller's saved LR?
src += sys.PtrSize
// The Go ABI packs arguments.
dst += t.size
@@ -242,7 +245,11 @@ func callbackWrap(a *callbackArgs) {
// Even though this is copying back results, we can pass a nil
// type because those results must not require write barriers.
- reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.retOffset)+sys.PtrSize, uint32(c.retOffset))
+ //
+ // Pass a dummy RegArgs for now.
+ // TODO(mknyszek): Pass arguments in registers.
+ var regs abi.RegArgs
+ reflectcall(nil, unsafe.Pointer(c.fn), noescape(goArgs), uint32(c.retOffset)+sys.PtrSize, uint32(c.retOffset), uint32(c.retOffset)+sys.PtrSize, &regs)
// Extract the result.
a.result = *(*uintptr)(unsafe.Pointer(&frame[c.retOffset]))
diff --git a/src/runtime/testdata/testprog/deadlock.go b/src/runtime/testdata/testprog/deadlock.go
index 105d6a5faa..781acbd770 100644
--- a/src/runtime/testdata/testprog/deadlock.go
+++ b/src/runtime/testdata/testprog/deadlock.go
@@ -25,6 +25,7 @@ func init() {
register("RecursivePanic2", RecursivePanic2)
register("RecursivePanic3", RecursivePanic3)
register("RecursivePanic4", RecursivePanic4)
+ register("RecursivePanic5", RecursivePanic5)
register("GoexitExit", GoexitExit)
register("GoNil", GoNil)
register("MainGoroutineID", MainGoroutineID)
@@ -160,6 +161,44 @@ func RecursivePanic4() {
panic("first panic")
}
+// Test case where we have an open-coded defer higher up the stack (in two), and
+// in the current function (three) we recover in a defer while we still have
+// another defer to be processed.
+func RecursivePanic5() {
+ one()
+ panic("third panic")
+}
+
+//go:noinline
+func one() {
+ two()
+}
+
+//go:noinline
+func two() {
+ defer func() {
+ }()
+
+ three()
+}
+
+//go:noinline
+func three() {
+ defer func() {
+ }()
+
+ defer func() {
+ fmt.Println(recover())
+ }()
+
+ defer func() {
+ fmt.Println(recover())
+ panic("second panic")
+ }()
+
+ panic("first panic")
+}
+
func GoexitExit() {
println("t1")
go func() {
diff --git a/src/runtime/testdata/testwinsignal/main.go b/src/runtime/testdata/testwinsignal/main.go
new file mode 100644
index 0000000000..d8cd884ffa
--- /dev/null
+++ b/src/runtime/testdata/testwinsignal/main.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "time"
+)
+
+func main() {
+ c := make(chan os.Signal, 1)
+ signal.Notify(c)
+
+ fmt.Println("ready")
+ sig := <-c
+
+ time.Sleep(time.Second)
+ fmt.Println(sig)
+}
diff --git a/src/runtime/textflag.h b/src/runtime/textflag.h
index daca36d948..e727208cd0 100644
--- a/src/runtime/textflag.h
+++ b/src/runtime/textflag.h
@@ -35,3 +35,5 @@
// Function is the top of the call stack. Call stack unwinders should stop
// at this function.
#define TOPFRAME 2048
+// Function is an ABI wrapper.
+#define ABIWRAPPER 4096
diff --git a/src/runtime/time.go b/src/runtime/time.go
index d338705b7c..8ab2a03430 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -609,8 +609,14 @@ func moveTimers(pp *p, timers []*timer) {
for {
switch s := atomic.Load(&t.status); s {
case timerWaiting:
+ if !atomic.Cas(&t.status, s, timerMoving) {
+ continue
+ }
t.pp = 0
doaddtimer(pp, t)
+ if !atomic.Cas(&t.status, timerMoving, timerWaiting) {
+ badTimer()
+ }
break loop
case timerModifiedEarlier, timerModifiedLater:
if !atomic.Cas(&t.status, s, timerMoving) {
diff --git a/src/runtime/timestub2.go b/src/runtime/timestub2.go
index 6d73aabc35..68777ee4a9 100644
--- a/src/runtime/timestub2.go
+++ b/src/runtime/timestub2.go
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !aix
// +build !darwin
-// +build !windows
// +build !freebsd
-// +build !aix
+// +build !openbsd
// +build !solaris
+// +build !windows
package runtime
diff --git a/src/runtime/tls_arm64.h b/src/runtime/tls_arm64.h
index 0804fa3502..fe5e4cee12 100644
--- a/src/runtime/tls_arm64.h
+++ b/src/runtime/tls_arm64.h
@@ -41,8 +41,16 @@
#define MRS_TPIDR_R0 WORD $0xd53bd040 // MRS TPIDR_EL0, R0
#endif
+#ifdef GOOS_windows
+#define TLS_windows
+#endif
+#ifdef TLS_windows
+#define TLSG_IS_VARIABLE
+#define MRS_TPIDR_R0 MOVD R18_PLATFORM, R0
+#endif
+
// Define something that will break the build if
// the GOOS is unknown.
-#ifndef TPIDR
-#define MRS_TPIDR_R0 TPIDR_UNKNOWN
+#ifndef MRS_TPIDR_R0
+#define MRS_TPIDR_R0 unknown_TLS_implementation_in_tls_arm64_h
#endif
diff --git a/src/runtime/tls_arm64.s b/src/runtime/tls_arm64.s
index 3f02974d5b..52b3e8f222 100644
--- a/src/runtime/tls_arm64.s
+++ b/src/runtime/tls_arm64.s
@@ -9,10 +9,14 @@
#include "tls_arm64.h"
TEXT runtime·load_g(SB),NOSPLIT,$0
-#ifndef TLS_darwin
+#ifndef GOOS_darwin
+#ifndef GOOS_openbsd
+#ifndef GOOS_windows
MOVB runtime·iscgo(SB), R0
CBZ R0, nocgo
#endif
+#endif
+#endif
MRS_TPIDR_R0
#ifdef TLS_darwin
@@ -26,10 +30,14 @@ nocgo:
RET
TEXT runtime·save_g(SB),NOSPLIT,$0
-#ifndef TLS_darwin
+#ifndef GOOS_darwin
+#ifndef GOOS_openbsd
+#ifndef GOOS_windows
MOVB runtime·iscgo(SB), R0
CBZ R0, nocgo
#endif
+#endif
+#endif
MRS_TPIDR_R0
#ifdef TLS_darwin
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 0825e9e707..eb185eecd3 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -15,24 +15,9 @@ import (
// The most important fact about a given architecture is whether it uses a link register.
// On systems with link registers, the prologue for a non-leaf function stores the
// incoming value of LR at the bottom of the newly allocated stack frame.
-// On systems without link registers, the architecture pushes a return PC during
+// On systems without link registers (x86), the architecture pushes a return PC during
// the call instruction, so the return PC ends up above the stack frame.
// In this file, the return PC is always called LR, no matter how it was found.
-//
-// To date, the opposite of a link register architecture is an x86 architecture.
-// This code may need to change if some other kind of non-link-register
-// architecture comes along.
-//
-// The other important fact is the size of a pointer: on 32-bit systems the LR
-// takes up only 4 bytes on the stack, while on 64-bit systems it takes up 8 bytes.
-// Typically this is ptrSize.
-//
-// As an exception, amd64p32 had ptrSize == 4 but the CALL instruction still
-// stored an 8-byte return PC onto the stack. To accommodate this, we used regSize
-// as the size of the architecture-pushed return PC.
-//
-// usesLR is defined below in terms of minFrameSize, which is defined in
-// arch_$GOARCH.go. ptrSize and regSize are defined in stubs.go.
const usesLR = sys.MinFrameSize > 0
@@ -144,8 +129,8 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
frame.lr = 0
} else {
- frame.pc = uintptr(*(*sys.Uintreg)(unsafe.Pointer(frame.sp)))
- frame.sp += sys.RegSize
+ frame.pc = uintptr(*(*uintptr)(unsafe.Pointer(frame.sp)))
+ frame.sp += sys.PtrSize
}
}
@@ -180,6 +165,16 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
break
}
+ // Compute function info flags.
+ flag := f.flag
+ if f.funcID == funcID_cgocallback {
+ // cgocallback does write SP to switch from the g0 to the curg stack,
+ // but it carefully arranges that during the transition BOTH stacks
+ // have cgocallback frame valid for unwinding through.
+ // So we don't need to exclude it with the other SP-writing functions.
+ flag &^= funcFlag_SPWRITE
+ }
+
// Found an actual function.
// Derive frame pointer and link register.
if frame.fp == 0 {
@@ -196,6 +191,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
frame.pc = gp.m.curg.sched.pc
frame.fn = findfunc(frame.pc)
f = frame.fn
+ flag = f.flag
frame.sp = gp.m.curg.sched.sp
cgoCtxt = gp.m.curg.cgoCtxt
case funcID_systemstack:
@@ -203,29 +199,37 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
// stack transition.
frame.sp = gp.m.curg.sched.sp
cgoCtxt = gp.m.curg.cgoCtxt
+ flag &^= funcFlag_SPWRITE
}
}
frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &cache))
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
- frame.fp += sys.RegSize
+ frame.fp += sys.PtrSize
}
}
var flr funcInfo
- if topofstack(f, gp.m != nil && gp == gp.m.g0) {
+ if flag&funcFlag_TOPFRAME != 0 {
+ // This function marks the top of the stack. Stop the traceback.
frame.lr = 0
flr = funcInfo{}
- } else if usesLR && f.funcID == funcID_jmpdefer {
- // jmpdefer modifies SP/LR/PC non-atomically.
- // If a profiling interrupt arrives during jmpdefer,
- // the stack unwind may see a mismatched register set
- // and get confused. Stop if we see PC within jmpdefer
- // to avoid that confusion.
- // See golang.org/issue/8153.
+ } else if flag&funcFlag_SPWRITE != 0 {
+ // The function we are in does a write to SP that we don't know
+ // how to encode in the spdelta table. Examples include context
+ // switch routines like runtime.gogo but also any code that switches
+ // to the g0 stack to run host C code. Since we can't reliably unwind
+ // the SP (we might not even be on the stack we think we are),
+ // we stop the traceback here.
if callback != nil {
- throw("traceback_arm: found jmpdefer when tracing with callback")
+ // Finding an SPWRITE should only happen for a profiling signal, which can
+ // arrive at any time. For a GC stack traversal (callback != nil),
+ // we shouldn't see this case, and we must be sure to walk the
+ // entire stack or the GC is invalid. So crash.
+ println("traceback: unexpected SPWRITE function", funcname(f))
+ throw("traceback")
}
frame.lr = 0
+ flr = funcInfo{}
} else {
var lrPtr uintptr
if usesLR {
@@ -235,8 +239,8 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
}
} else {
if frame.lr == 0 {
- lrPtr = frame.fp - sys.RegSize
- frame.lr = uintptr(*(*sys.Uintreg)(unsafe.Pointer(lrPtr)))
+ lrPtr = frame.fp - sys.PtrSize
+ frame.lr = uintptr(*(*uintptr)(unsafe.Pointer(lrPtr)))
}
}
flr = findfunc(frame.lr)
@@ -266,13 +270,28 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
frame.varp = frame.fp
if !usesLR {
// On x86, call instruction pushes return PC before entering new function.
- frame.varp -= sys.RegSize
+ frame.varp -= sys.PtrSize
}
// For architectures with frame pointers, if there's
// a frame, then there's a saved frame pointer here.
- if frame.varp > frame.sp && (GOARCH == "amd64" || GOARCH == "arm64") {
- frame.varp -= sys.RegSize
+ //
+ // NOTE: This code is not as general as it looks.
+ // On x86, the ABI is to save the frame pointer word at the
+ // top of the stack frame, so we have to back down over it.
+ // On arm64, the frame pointer should be at the bottom of
+ // the stack (with R29 (aka FP) = RSP), in which case we would
+ // not want to do the subtraction here. But we started out without
+ // any frame pointer, and when we wanted to add it, we didn't
+ // want to break all the assembly doing direct writes to 8(RSP)
+ // to set the first parameter to a called function.
+ // So we decided to write the FP link *below* the stack pointer
+ // (with R29 = RSP - 8 in Go functions).
+ // This is technically ABI-compatible but not standard.
+ // And it happens to end up mimicking the x86 layout.
+ // Other architectures may make different decisions.
+ if frame.varp > frame.sp && framepointer_enabled {
+ frame.varp -= sys.PtrSize
}
// Derive size of arguments.
@@ -490,11 +509,7 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in
// before faking a call.
if usesLR && injectedCall {
x := *(*uintptr)(unsafe.Pointer(frame.sp))
- frame.sp += sys.MinFrameSize
- if GOARCH == "arm64" {
- // arm64 needs 16-byte aligned SP, always
- frame.sp += sys.PtrSize
- }
+ frame.sp += alignUp(sys.MinFrameSize, sys.StackAlign)
f = findfunc(frame.pc)
frame.fn = f
if !f.valid() {
@@ -917,17 +932,25 @@ func tracebackothers(me *g) {
level, _, _ := gotraceback()
// Show the current goroutine first, if we haven't already.
- g := getg()
- gp := g.m.curg
- if gp != nil && gp != me {
+ curgp := getg().m.curg
+ if curgp != nil && curgp != me {
print("\n")
- goroutineheader(gp)
- traceback(^uintptr(0), ^uintptr(0), 0, gp)
+ goroutineheader(curgp)
+ traceback(^uintptr(0), ^uintptr(0), 0, curgp)
}
- lock(&allglock)
- for _, gp := range allgs {
- if gp == me || gp == g.m.curg || readgstatus(gp) == _Gdead || isSystemGoroutine(gp, false) && level < 2 {
+ // We can't take allglock here because this may be during fatal
+ // throw/panic, where locking allglock could be out-of-order or a
+ // direct deadlock.
+ //
+ // Instead, use atomic access to allgs which requires no locking. We
+ // don't lock against concurrent creation of new Gs, but even with
+ // allglock we may miss Gs created after this loop.
+ ptr, length := atomicAllG()
+ for i := uintptr(0); i < length; i++ {
+ gp := atomicAllGIndex(ptr, i)
+
+ if gp == me || gp == curgp || readgstatus(gp) == _Gdead || isSystemGoroutine(gp, false) && level < 2 {
continue
}
print("\n")
@@ -936,14 +959,13 @@ func tracebackothers(me *g) {
// called from a signal handler initiated during a
// systemstack call. The original G is still in the
// running state, and we want to print its stack.
- if gp.m != g.m && readgstatus(gp)&^_Gscan == _Grunning {
+ if gp.m != getg().m && readgstatus(gp)&^_Gscan == _Grunning {
print("\tgoroutine running on other thread; stack unavailable\n")
printcreatedby(gp)
} else {
traceback(^uintptr(0), ^uintptr(0), 0, gp)
}
}
- unlock(&allglock)
}
// tracebackHexdump hexdumps part of stk around frame.sp and frame.fp
@@ -993,22 +1015,6 @@ func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) {
})
}
-// Does f mark the top of a goroutine stack?
-func topofstack(f funcInfo, g0 bool) bool {
- return f.funcID == funcID_goexit ||
- f.funcID == funcID_mstart ||
- f.funcID == funcID_mcall ||
- f.funcID == funcID_morestack ||
- f.funcID == funcID_rt0_go ||
- f.funcID == funcID_externalthreadhandler ||
- // asmcgocall is TOS on the system stack because it
- // switches to the system stack, but in this case we
- // can come back to the regular stack and still want
- // to be able to unwind through the call that appeared
- // on the regular stack.
- (g0 && f.funcID == funcID_asmcgocall)
-}
-
// isSystemGoroutine reports whether the goroutine g must be omitted
// in stack dumps and deadlock detector. This is any goroutine that
// starts at a runtime.* entry point, except for runtime.main,
diff --git a/src/runtime/type.go b/src/runtime/type.go
index 81455f3532..18fc4bbfad 100644
--- a/src/runtime/type.go
+++ b/src/runtime/type.go
@@ -383,7 +383,7 @@ type maptype struct {
}
// Note: flag values must match those used in the TMAP case
-// in ../cmd/compile/internal/gc/reflect.go:dtypesym.
+// in ../cmd/compile/internal/gc/reflect.go:writeType.
func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself
return mt.flags&1 != 0
}
diff --git a/src/runtime/wincallback.go b/src/runtime/wincallback.go
index fb452222da..cf3327c6fe 100644
--- a/src/runtime/wincallback.go
+++ b/src/runtime/wincallback.go
@@ -72,6 +72,34 @@ TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
}
}
+func genasmArm64() {
+ var buf bytes.Buffer
+
+ buf.WriteString(`// Code generated by wincallback.go using 'go generate'. DO NOT EDIT.
+
+// External code calls into callbackasm at an offset corresponding
+// to the callback index. Callbackasm is a table of MOV and B instructions.
+// The MOV instruction loads R12 with the callback index, and the
+// B instruction branches to callbackasm1.
+// callbackasm1 takes the callback index from R12 and
+// indexes into an array that stores information about each callback.
+// It then calls the Go implementation for that callback.
+#include "textflag.h"
+
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
+`)
+ for i := 0; i < maxCallback; i++ {
+ buf.WriteString(fmt.Sprintf("\tMOVD\t$%d, R12\n", i))
+ buf.WriteString("\tB\truntime·callbackasm1(SB)\n")
+ }
+
+ err := os.WriteFile("zcallback_windows_arm64.s", buf.Bytes(), 0666)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "wincallback: %s\n", err)
+ os.Exit(2)
+ }
+}
+
func gengo() {
var buf bytes.Buffer
@@ -91,5 +119,6 @@ const cb_max = %d // maximum number of windows callbacks allowed
func main() {
genasm386Amd64()
genasmArm()
+ genasmArm64()
gengo()
}
diff --git a/src/runtime/zcallback_windows_arm64.s b/src/runtime/zcallback_windows_arm64.s
new file mode 100644
index 0000000000..69fb05788c
--- /dev/null
+++ b/src/runtime/zcallback_windows_arm64.s
@@ -0,0 +1,4012 @@
+// Code generated by wincallback.go using 'go generate'. DO NOT EDIT.
+
+// External code calls into callbackasm at an offset corresponding
+// to the callback index. Callbackasm is a table of MOV and B instructions.
+// The MOV instruction loads R12 with the callback index, and the
+// B instruction branches to callbackasm1.
+// callbackasm1 takes the callback index from R12 and
+// indexes into an array that stores information about each callback.
+// It then calls the Go implementation for that callback.
+#include "textflag.h"
+
+TEXT runtime·callbackasm(SB),NOSPLIT|NOFRAME,$0
+ MOVD $0, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1, R12
+ B runtime·callbackasm1(SB)
+ MOVD $2, R12
+ B runtime·callbackasm1(SB)
+ MOVD $3, R12
+ B runtime·callbackasm1(SB)
+ MOVD $4, R12
+ B runtime·callbackasm1(SB)
+ MOVD $5, R12
+ B runtime·callbackasm1(SB)
+ MOVD $6, R12
+ B runtime·callbackasm1(SB)
+ MOVD $7, R12
+ B runtime·callbackasm1(SB)
+ MOVD $8, R12
+ B runtime·callbackasm1(SB)
+ MOVD $9, R12
+ B runtime·callbackasm1(SB)
+ MOVD $10, R12
+ B runtime·callbackasm1(SB)
+ MOVD $11, R12
+ B runtime·callbackasm1(SB)
+ MOVD $12, R12
+ B runtime·callbackasm1(SB)
+ MOVD $13, R12
+ B runtime·callbackasm1(SB)
+ MOVD $14, R12
+ B runtime·callbackasm1(SB)
+ MOVD $15, R12
+ B runtime·callbackasm1(SB)
+ MOVD $16, R12
+ B runtime·callbackasm1(SB)
+ MOVD $17, R12
+ B runtime·callbackasm1(SB)
+ MOVD $18, R12
+ B runtime·callbackasm1(SB)
+ MOVD $19, R12
+ B runtime·callbackasm1(SB)
+ MOVD $20, R12
+ B runtime·callbackasm1(SB)
+ MOVD $21, R12
+ B runtime·callbackasm1(SB)
+ MOVD $22, R12
+ B runtime·callbackasm1(SB)
+ MOVD $23, R12
+ B runtime·callbackasm1(SB)
+ MOVD $24, R12
+ B runtime·callbackasm1(SB)
+ MOVD $25, R12
+ B runtime·callbackasm1(SB)
+ MOVD $26, R12
+ B runtime·callbackasm1(SB)
+ MOVD $27, R12
+ B runtime·callbackasm1(SB)
+ MOVD $28, R12
+ B runtime·callbackasm1(SB)
+ MOVD $29, R12
+ B runtime·callbackasm1(SB)
+ MOVD $30, R12
+ B runtime·callbackasm1(SB)
+ MOVD $31, R12
+ B runtime·callbackasm1(SB)
+ MOVD $32, R12
+ B runtime·callbackasm1(SB)
+ MOVD $33, R12
+ B runtime·callbackasm1(SB)
+ MOVD $34, R12
+ B runtime·callbackasm1(SB)
+ MOVD $35, R12
+ B runtime·callbackasm1(SB)
+ MOVD $36, R12
+ B runtime·callbackasm1(SB)
+ MOVD $37, R12
+ B runtime·callbackasm1(SB)
+ MOVD $38, R12
+ B runtime·callbackasm1(SB)
+ MOVD $39, R12
+ B runtime·callbackasm1(SB)
+ MOVD $40, R12
+ B runtime·callbackasm1(SB)
+ MOVD $41, R12
+ B runtime·callbackasm1(SB)
+ MOVD $42, R12
+ B runtime·callbackasm1(SB)
+ MOVD $43, R12
+ B runtime·callbackasm1(SB)
+ MOVD $44, R12
+ B runtime·callbackasm1(SB)
+ MOVD $45, R12
+ B runtime·callbackasm1(SB)
+ MOVD $46, R12
+ B runtime·callbackasm1(SB)
+ MOVD $47, R12
+ B runtime·callbackasm1(SB)
+ MOVD $48, R12
+ B runtime·callbackasm1(SB)
+ MOVD $49, R12
+ B runtime·callbackasm1(SB)
+ MOVD $50, R12
+ B runtime·callbackasm1(SB)
+ MOVD $51, R12
+ B runtime·callbackasm1(SB)
+ MOVD $52, R12
+ B runtime·callbackasm1(SB)
+ MOVD $53, R12
+ B runtime·callbackasm1(SB)
+ MOVD $54, R12
+ B runtime·callbackasm1(SB)
+ MOVD $55, R12
+ B runtime·callbackasm1(SB)
+ MOVD $56, R12
+ B runtime·callbackasm1(SB)
+ MOVD $57, R12
+ B runtime·callbackasm1(SB)
+ MOVD $58, R12
+ B runtime·callbackasm1(SB)
+ MOVD $59, R12
+ B runtime·callbackasm1(SB)
+ MOVD $60, R12
+ B runtime·callbackasm1(SB)
+ MOVD $61, R12
+ B runtime·callbackasm1(SB)
+ MOVD $62, R12
+ B runtime·callbackasm1(SB)
+ MOVD $63, R12
+ B runtime·callbackasm1(SB)
+ MOVD $64, R12
+ B runtime·callbackasm1(SB)
+ MOVD $65, R12
+ B runtime·callbackasm1(SB)
+ MOVD $66, R12
+ B runtime·callbackasm1(SB)
+ MOVD $67, R12
+ B runtime·callbackasm1(SB)
+ MOVD $68, R12
+ B runtime·callbackasm1(SB)
+ MOVD $69, R12
+ B runtime·callbackasm1(SB)
+ MOVD $70, R12
+ B runtime·callbackasm1(SB)
+ MOVD $71, R12
+ B runtime·callbackasm1(SB)
+ MOVD $72, R12
+ B runtime·callbackasm1(SB)
+ MOVD $73, R12
+ B runtime·callbackasm1(SB)
+ MOVD $74, R12
+ B runtime·callbackasm1(SB)
+ MOVD $75, R12
+ B runtime·callbackasm1(SB)
+ MOVD $76, R12
+ B runtime·callbackasm1(SB)
+ MOVD $77, R12
+ B runtime·callbackasm1(SB)
+ MOVD $78, R12
+ B runtime·callbackasm1(SB)
+ MOVD $79, R12
+ B runtime·callbackasm1(SB)
+ MOVD $80, R12
+ B runtime·callbackasm1(SB)
+ MOVD $81, R12
+ B runtime·callbackasm1(SB)
+ MOVD $82, R12
+ B runtime·callbackasm1(SB)
+ MOVD $83, R12
+ B runtime·callbackasm1(SB)
+ MOVD $84, R12
+ B runtime·callbackasm1(SB)
+ MOVD $85, R12
+ B runtime·callbackasm1(SB)
+ MOVD $86, R12
+ B runtime·callbackasm1(SB)
+ MOVD $87, R12
+ B runtime·callbackasm1(SB)
+ MOVD $88, R12
+ B runtime·callbackasm1(SB)
+ MOVD $89, R12
+ B runtime·callbackasm1(SB)
+ MOVD $90, R12
+ B runtime·callbackasm1(SB)
+ MOVD $91, R12
+ B runtime·callbackasm1(SB)
+ MOVD $92, R12
+ B runtime·callbackasm1(SB)
+ MOVD $93, R12
+ B runtime·callbackasm1(SB)
+ MOVD $94, R12
+ B runtime·callbackasm1(SB)
+ MOVD $95, R12
+ B runtime·callbackasm1(SB)
+ MOVD $96, R12
+ B runtime·callbackasm1(SB)
+ MOVD $97, R12
+ B runtime·callbackasm1(SB)
+ MOVD $98, R12
+ B runtime·callbackasm1(SB)
+ MOVD $99, R12
+ B runtime·callbackasm1(SB)
+ MOVD $100, R12
+ B runtime·callbackasm1(SB)
+ MOVD $101, R12
+ B runtime·callbackasm1(SB)
+ MOVD $102, R12
+ B runtime·callbackasm1(SB)
+ MOVD $103, R12
+ B runtime·callbackasm1(SB)
+ MOVD $104, R12
+ B runtime·callbackasm1(SB)
+ MOVD $105, R12
+ B runtime·callbackasm1(SB)
+ MOVD $106, R12
+ B runtime·callbackasm1(SB)
+ MOVD $107, R12
+ B runtime·callbackasm1(SB)
+ MOVD $108, R12
+ B runtime·callbackasm1(SB)
+ MOVD $109, R12
+ B runtime·callbackasm1(SB)
+ MOVD $110, R12
+ B runtime·callbackasm1(SB)
+ MOVD $111, R12
+ B runtime·callbackasm1(SB)
+ MOVD $112, R12
+ B runtime·callbackasm1(SB)
+ MOVD $113, R12
+ B runtime·callbackasm1(SB)
+ MOVD $114, R12
+ B runtime·callbackasm1(SB)
+ MOVD $115, R12
+ B runtime·callbackasm1(SB)
+ MOVD $116, R12
+ B runtime·callbackasm1(SB)
+ MOVD $117, R12
+ B runtime·callbackasm1(SB)
+ MOVD $118, R12
+ B runtime·callbackasm1(SB)
+ MOVD $119, R12
+ B runtime·callbackasm1(SB)
+ MOVD $120, R12
+ B runtime·callbackasm1(SB)
+ MOVD $121, R12
+ B runtime·callbackasm1(SB)
+ MOVD $122, R12
+ B runtime·callbackasm1(SB)
+ MOVD $123, R12
+ B runtime·callbackasm1(SB)
+ MOVD $124, R12
+ B runtime·callbackasm1(SB)
+ MOVD $125, R12
+ B runtime·callbackasm1(SB)
+ MOVD $126, R12
+ B runtime·callbackasm1(SB)
+ MOVD $127, R12
+ B runtime·callbackasm1(SB)
+ MOVD $128, R12
+ B runtime·callbackasm1(SB)
+ MOVD $129, R12
+ B runtime·callbackasm1(SB)
+ MOVD $130, R12
+ B runtime·callbackasm1(SB)
+ MOVD $131, R12
+ B runtime·callbackasm1(SB)
+ MOVD $132, R12
+ B runtime·callbackasm1(SB)
+ MOVD $133, R12
+ B runtime·callbackasm1(SB)
+ MOVD $134, R12
+ B runtime·callbackasm1(SB)
+ MOVD $135, R12
+ B runtime·callbackasm1(SB)
+ MOVD $136, R12
+ B runtime·callbackasm1(SB)
+ MOVD $137, R12
+ B runtime·callbackasm1(SB)
+ MOVD $138, R12
+ B runtime·callbackasm1(SB)
+ MOVD $139, R12
+ B runtime·callbackasm1(SB)
+ MOVD $140, R12
+ B runtime·callbackasm1(SB)
+ MOVD $141, R12
+ B runtime·callbackasm1(SB)
+ MOVD $142, R12
+ B runtime·callbackasm1(SB)
+ MOVD $143, R12
+ B runtime·callbackasm1(SB)
+ MOVD $144, R12
+ B runtime·callbackasm1(SB)
+ MOVD $145, R12
+ B runtime·callbackasm1(SB)
+ MOVD $146, R12
+ B runtime·callbackasm1(SB)
+ MOVD $147, R12
+ B runtime·callbackasm1(SB)
+ MOVD $148, R12
+ B runtime·callbackasm1(SB)
+ MOVD $149, R12
+ B runtime·callbackasm1(SB)
+ MOVD $150, R12
+ B runtime·callbackasm1(SB)
+ MOVD $151, R12
+ B runtime·callbackasm1(SB)
+ MOVD $152, R12
+ B runtime·callbackasm1(SB)
+ MOVD $153, R12
+ B runtime·callbackasm1(SB)
+ MOVD $154, R12
+ B runtime·callbackasm1(SB)
+ MOVD $155, R12
+ B runtime·callbackasm1(SB)
+ MOVD $156, R12
+ B runtime·callbackasm1(SB)
+ MOVD $157, R12
+ B runtime·callbackasm1(SB)
+ MOVD $158, R12
+ B runtime·callbackasm1(SB)
+ MOVD $159, R12
+ B runtime·callbackasm1(SB)
+ MOVD $160, R12
+ B runtime·callbackasm1(SB)
+ MOVD $161, R12
+ B runtime·callbackasm1(SB)
+ MOVD $162, R12
+ B runtime·callbackasm1(SB)
+ MOVD $163, R12
+ B runtime·callbackasm1(SB)
+ MOVD $164, R12
+ B runtime·callbackasm1(SB)
+ MOVD $165, R12
+ B runtime·callbackasm1(SB)
+ MOVD $166, R12
+ B runtime·callbackasm1(SB)
+ MOVD $167, R12
+ B runtime·callbackasm1(SB)
+ MOVD $168, R12
+ B runtime·callbackasm1(SB)
+ MOVD $169, R12
+ B runtime·callbackasm1(SB)
+ MOVD $170, R12
+ B runtime·callbackasm1(SB)
+ MOVD $171, R12
+ B runtime·callbackasm1(SB)
+ MOVD $172, R12
+ B runtime·callbackasm1(SB)
+ MOVD $173, R12
+ B runtime·callbackasm1(SB)
+ MOVD $174, R12
+ B runtime·callbackasm1(SB)
+ MOVD $175, R12
+ B runtime·callbackasm1(SB)
+ MOVD $176, R12
+ B runtime·callbackasm1(SB)
+ MOVD $177, R12
+ B runtime·callbackasm1(SB)
+ MOVD $178, R12
+ B runtime·callbackasm1(SB)
+ MOVD $179, R12
+ B runtime·callbackasm1(SB)
+ MOVD $180, R12
+ B runtime·callbackasm1(SB)
+ MOVD $181, R12
+ B runtime·callbackasm1(SB)
+ MOVD $182, R12
+ B runtime·callbackasm1(SB)
+ MOVD $183, R12
+ B runtime·callbackasm1(SB)
+ MOVD $184, R12
+ B runtime·callbackasm1(SB)
+ MOVD $185, R12
+ B runtime·callbackasm1(SB)
+ MOVD $186, R12
+ B runtime·callbackasm1(SB)
+ MOVD $187, R12
+ B runtime·callbackasm1(SB)
+ MOVD $188, R12
+ B runtime·callbackasm1(SB)
+ MOVD $189, R12
+ B runtime·callbackasm1(SB)
+ MOVD $190, R12
+ B runtime·callbackasm1(SB)
+ MOVD $191, R12
+ B runtime·callbackasm1(SB)
+ MOVD $192, R12
+ B runtime·callbackasm1(SB)
+ MOVD $193, R12
+ B runtime·callbackasm1(SB)
+ MOVD $194, R12
+ B runtime·callbackasm1(SB)
+ MOVD $195, R12
+ B runtime·callbackasm1(SB)
+ MOVD $196, R12
+ B runtime·callbackasm1(SB)
+ MOVD $197, R12
+ B runtime·callbackasm1(SB)
+ MOVD $198, R12
+ B runtime·callbackasm1(SB)
+ MOVD $199, R12
+ B runtime·callbackasm1(SB)
+ MOVD $200, R12
+ B runtime·callbackasm1(SB)
+ MOVD $201, R12
+ B runtime·callbackasm1(SB)
+ MOVD $202, R12
+ B runtime·callbackasm1(SB)
+ MOVD $203, R12
+ B runtime·callbackasm1(SB)
+ MOVD $204, R12
+ B runtime·callbackasm1(SB)
+ MOVD $205, R12
+ B runtime·callbackasm1(SB)
+ MOVD $206, R12
+ B runtime·callbackasm1(SB)
+ MOVD $207, R12
+ B runtime·callbackasm1(SB)
+ MOVD $208, R12
+ B runtime·callbackasm1(SB)
+ MOVD $209, R12
+ B runtime·callbackasm1(SB)
+ MOVD $210, R12
+ B runtime·callbackasm1(SB)
+ MOVD $211, R12
+ B runtime·callbackasm1(SB)
+ MOVD $212, R12
+ B runtime·callbackasm1(SB)
+ MOVD $213, R12
+ B runtime·callbackasm1(SB)
+ MOVD $214, R12
+ B runtime·callbackasm1(SB)
+ MOVD $215, R12
+ B runtime·callbackasm1(SB)
+ MOVD $216, R12
+ B runtime·callbackasm1(SB)
+ MOVD $217, R12
+ B runtime·callbackasm1(SB)
+ MOVD $218, R12
+ B runtime·callbackasm1(SB)
+ MOVD $219, R12
+ B runtime·callbackasm1(SB)
+ MOVD $220, R12
+ B runtime·callbackasm1(SB)
+ MOVD $221, R12
+ B runtime·callbackasm1(SB)
+ MOVD $222, R12
+ B runtime·callbackasm1(SB)
+ MOVD $223, R12
+ B runtime·callbackasm1(SB)
+ MOVD $224, R12
+ B runtime·callbackasm1(SB)
+ MOVD $225, R12
+ B runtime·callbackasm1(SB)
+ MOVD $226, R12
+ B runtime·callbackasm1(SB)
+ MOVD $227, R12
+ B runtime·callbackasm1(SB)
+ MOVD $228, R12
+ B runtime·callbackasm1(SB)
+ MOVD $229, R12
+ B runtime·callbackasm1(SB)
+ MOVD $230, R12
+ B runtime·callbackasm1(SB)
+ MOVD $231, R12
+ B runtime·callbackasm1(SB)
+ MOVD $232, R12
+ B runtime·callbackasm1(SB)
+ MOVD $233, R12
+ B runtime·callbackasm1(SB)
+ MOVD $234, R12
+ B runtime·callbackasm1(SB)
+ MOVD $235, R12
+ B runtime·callbackasm1(SB)
+ MOVD $236, R12
+ B runtime·callbackasm1(SB)
+ MOVD $237, R12
+ B runtime·callbackasm1(SB)
+ MOVD $238, R12
+ B runtime·callbackasm1(SB)
+ MOVD $239, R12
+ B runtime·callbackasm1(SB)
+ MOVD $240, R12
+ B runtime·callbackasm1(SB)
+ MOVD $241, R12
+ B runtime·callbackasm1(SB)
+ MOVD $242, R12
+ B runtime·callbackasm1(SB)
+ MOVD $243, R12
+ B runtime·callbackasm1(SB)
+ MOVD $244, R12
+ B runtime·callbackasm1(SB)
+ MOVD $245, R12
+ B runtime·callbackasm1(SB)
+ MOVD $246, R12
+ B runtime·callbackasm1(SB)
+ MOVD $247, R12
+ B runtime·callbackasm1(SB)
+ MOVD $248, R12
+ B runtime·callbackasm1(SB)
+ MOVD $249, R12
+ B runtime·callbackasm1(SB)
+ MOVD $250, R12
+ B runtime·callbackasm1(SB)
+ MOVD $251, R12
+ B runtime·callbackasm1(SB)
+ MOVD $252, R12
+ B runtime·callbackasm1(SB)
+ MOVD $253, R12
+ B runtime·callbackasm1(SB)
+ MOVD $254, R12
+ B runtime·callbackasm1(SB)
+ MOVD $255, R12
+ B runtime·callbackasm1(SB)
+ MOVD $256, R12
+ B runtime·callbackasm1(SB)
+ MOVD $257, R12
+ B runtime·callbackasm1(SB)
+ MOVD $258, R12
+ B runtime·callbackasm1(SB)
+ MOVD $259, R12
+ B runtime·callbackasm1(SB)
+ MOVD $260, R12
+ B runtime·callbackasm1(SB)
+ MOVD $261, R12
+ B runtime·callbackasm1(SB)
+ MOVD $262, R12
+ B runtime·callbackasm1(SB)
+ MOVD $263, R12
+ B runtime·callbackasm1(SB)
+ MOVD $264, R12
+ B runtime·callbackasm1(SB)
+ MOVD $265, R12
+ B runtime·callbackasm1(SB)
+ MOVD $266, R12
+ B runtime·callbackasm1(SB)
+ MOVD $267, R12
+ B runtime·callbackasm1(SB)
+ MOVD $268, R12
+ B runtime·callbackasm1(SB)
+ MOVD $269, R12
+ B runtime·callbackasm1(SB)
+ MOVD $270, R12
+ B runtime·callbackasm1(SB)
+ MOVD $271, R12
+ B runtime·callbackasm1(SB)
+ MOVD $272, R12
+ B runtime·callbackasm1(SB)
+ MOVD $273, R12
+ B runtime·callbackasm1(SB)
+ MOVD $274, R12
+ B runtime·callbackasm1(SB)
+ MOVD $275, R12
+ B runtime·callbackasm1(SB)
+ MOVD $276, R12
+ B runtime·callbackasm1(SB)
+ MOVD $277, R12
+ B runtime·callbackasm1(SB)
+ MOVD $278, R12
+ B runtime·callbackasm1(SB)
+ MOVD $279, R12
+ B runtime·callbackasm1(SB)
+ MOVD $280, R12
+ B runtime·callbackasm1(SB)
+ MOVD $281, R12
+ B runtime·callbackasm1(SB)
+ MOVD $282, R12
+ B runtime·callbackasm1(SB)
+ MOVD $283, R12
+ B runtime·callbackasm1(SB)
+ MOVD $284, R12
+ B runtime·callbackasm1(SB)
+ MOVD $285, R12
+ B runtime·callbackasm1(SB)
+ MOVD $286, R12
+ B runtime·callbackasm1(SB)
+ MOVD $287, R12
+ B runtime·callbackasm1(SB)
+ MOVD $288, R12
+ B runtime·callbackasm1(SB)
+ MOVD $289, R12
+ B runtime·callbackasm1(SB)
+ MOVD $290, R12
+ B runtime·callbackasm1(SB)
+ MOVD $291, R12
+ B runtime·callbackasm1(SB)
+ MOVD $292, R12
+ B runtime·callbackasm1(SB)
+ MOVD $293, R12
+ B runtime·callbackasm1(SB)
+ MOVD $294, R12
+ B runtime·callbackasm1(SB)
+ MOVD $295, R12
+ B runtime·callbackasm1(SB)
+ MOVD $296, R12
+ B runtime·callbackasm1(SB)
+ MOVD $297, R12
+ B runtime·callbackasm1(SB)
+ MOVD $298, R12
+ B runtime·callbackasm1(SB)
+ MOVD $299, R12
+ B runtime·callbackasm1(SB)
+ MOVD $300, R12
+ B runtime·callbackasm1(SB)
+ MOVD $301, R12
+ B runtime·callbackasm1(SB)
+ MOVD $302, R12
+ B runtime·callbackasm1(SB)
+ MOVD $303, R12
+ B runtime·callbackasm1(SB)
+ MOVD $304, R12
+ B runtime·callbackasm1(SB)
+ MOVD $305, R12
+ B runtime·callbackasm1(SB)
+ MOVD $306, R12
+ B runtime·callbackasm1(SB)
+ MOVD $307, R12
+ B runtime·callbackasm1(SB)
+ MOVD $308, R12
+ B runtime·callbackasm1(SB)
+ MOVD $309, R12
+ B runtime·callbackasm1(SB)
+ MOVD $310, R12
+ B runtime·callbackasm1(SB)
+ MOVD $311, R12
+ B runtime·callbackasm1(SB)
+ MOVD $312, R12
+ B runtime·callbackasm1(SB)
+ MOVD $313, R12
+ B runtime·callbackasm1(SB)
+ MOVD $314, R12
+ B runtime·callbackasm1(SB)
+ MOVD $315, R12
+ B runtime·callbackasm1(SB)
+ MOVD $316, R12
+ B runtime·callbackasm1(SB)
+ MOVD $317, R12
+ B runtime·callbackasm1(SB)
+ MOVD $318, R12
+ B runtime·callbackasm1(SB)
+ MOVD $319, R12
+ B runtime·callbackasm1(SB)
+ MOVD $320, R12
+ B runtime·callbackasm1(SB)
+ MOVD $321, R12
+ B runtime·callbackasm1(SB)
+ MOVD $322, R12
+ B runtime·callbackasm1(SB)
+ MOVD $323, R12
+ B runtime·callbackasm1(SB)
+ MOVD $324, R12
+ B runtime·callbackasm1(SB)
+ MOVD $325, R12
+ B runtime·callbackasm1(SB)
+ MOVD $326, R12
+ B runtime·callbackasm1(SB)
+ MOVD $327, R12
+ B runtime·callbackasm1(SB)
+ MOVD $328, R12
+ B runtime·callbackasm1(SB)
+ MOVD $329, R12
+ B runtime·callbackasm1(SB)
+ MOVD $330, R12
+ B runtime·callbackasm1(SB)
+ MOVD $331, R12
+ B runtime·callbackasm1(SB)
+ MOVD $332, R12
+ B runtime·callbackasm1(SB)
+ MOVD $333, R12
+ B runtime·callbackasm1(SB)
+ MOVD $334, R12
+ B runtime·callbackasm1(SB)
+ MOVD $335, R12
+ B runtime·callbackasm1(SB)
+ MOVD $336, R12
+ B runtime·callbackasm1(SB)
+ MOVD $337, R12
+ B runtime·callbackasm1(SB)
+ MOVD $338, R12
+ B runtime·callbackasm1(SB)
+ MOVD $339, R12
+ B runtime·callbackasm1(SB)
+ MOVD $340, R12
+ B runtime·callbackasm1(SB)
+ MOVD $341, R12
+ B runtime·callbackasm1(SB)
+ MOVD $342, R12
+ B runtime·callbackasm1(SB)
+ MOVD $343, R12
+ B runtime·callbackasm1(SB)
+ MOVD $344, R12
+ B runtime·callbackasm1(SB)
+ MOVD $345, R12
+ B runtime·callbackasm1(SB)
+ MOVD $346, R12
+ B runtime·callbackasm1(SB)
+ MOVD $347, R12
+ B runtime·callbackasm1(SB)
+ MOVD $348, R12
+ B runtime·callbackasm1(SB)
+ MOVD $349, R12
+ B runtime·callbackasm1(SB)
+ MOVD $350, R12
+ B runtime·callbackasm1(SB)
+ MOVD $351, R12
+ B runtime·callbackasm1(SB)
+ MOVD $352, R12
+ B runtime·callbackasm1(SB)
+ MOVD $353, R12
+ B runtime·callbackasm1(SB)
+ MOVD $354, R12
+ B runtime·callbackasm1(SB)
+ MOVD $355, R12
+ B runtime·callbackasm1(SB)
+ MOVD $356, R12
+ B runtime·callbackasm1(SB)
+ MOVD $357, R12
+ B runtime·callbackasm1(SB)
+ MOVD $358, R12
+ B runtime·callbackasm1(SB)
+ MOVD $359, R12
+ B runtime·callbackasm1(SB)
+ MOVD $360, R12
+ B runtime·callbackasm1(SB)
+ MOVD $361, R12
+ B runtime·callbackasm1(SB)
+ MOVD $362, R12
+ B runtime·callbackasm1(SB)
+ MOVD $363, R12
+ B runtime·callbackasm1(SB)
+ MOVD $364, R12
+ B runtime·callbackasm1(SB)
+ MOVD $365, R12
+ B runtime·callbackasm1(SB)
+ MOVD $366, R12
+ B runtime·callbackasm1(SB)
+ MOVD $367, R12
+ B runtime·callbackasm1(SB)
+ MOVD $368, R12
+ B runtime·callbackasm1(SB)
+ MOVD $369, R12
+ B runtime·callbackasm1(SB)
+ MOVD $370, R12
+ B runtime·callbackasm1(SB)
+ MOVD $371, R12
+ B runtime·callbackasm1(SB)
+ MOVD $372, R12
+ B runtime·callbackasm1(SB)
+ MOVD $373, R12
+ B runtime·callbackasm1(SB)
+ MOVD $374, R12
+ B runtime·callbackasm1(SB)
+ MOVD $375, R12
+ B runtime·callbackasm1(SB)
+ MOVD $376, R12
+ B runtime·callbackasm1(SB)
+ MOVD $377, R12
+ B runtime·callbackasm1(SB)
+ MOVD $378, R12
+ B runtime·callbackasm1(SB)
+ MOVD $379, R12
+ B runtime·callbackasm1(SB)
+ MOVD $380, R12
+ B runtime·callbackasm1(SB)
+ MOVD $381, R12
+ B runtime·callbackasm1(SB)
+ MOVD $382, R12
+ B runtime·callbackasm1(SB)
+ MOVD $383, R12
+ B runtime·callbackasm1(SB)
+ MOVD $384, R12
+ B runtime·callbackasm1(SB)
+ MOVD $385, R12
+ B runtime·callbackasm1(SB)
+ MOVD $386, R12
+ B runtime·callbackasm1(SB)
+ MOVD $387, R12
+ B runtime·callbackasm1(SB)
+ MOVD $388, R12
+ B runtime·callbackasm1(SB)
+ MOVD $389, R12
+ B runtime·callbackasm1(SB)
+ MOVD $390, R12
+ B runtime·callbackasm1(SB)
+ MOVD $391, R12
+ B runtime·callbackasm1(SB)
+ MOVD $392, R12
+ B runtime·callbackasm1(SB)
+ MOVD $393, R12
+ B runtime·callbackasm1(SB)
+ MOVD $394, R12
+ B runtime·callbackasm1(SB)
+ MOVD $395, R12
+ B runtime·callbackasm1(SB)
+ MOVD $396, R12
+ B runtime·callbackasm1(SB)
+ MOVD $397, R12
+ B runtime·callbackasm1(SB)
+ MOVD $398, R12
+ B runtime·callbackasm1(SB)
+ MOVD $399, R12
+ B runtime·callbackasm1(SB)
+ MOVD $400, R12
+ B runtime·callbackasm1(SB)
+ MOVD $401, R12
+ B runtime·callbackasm1(SB)
+ MOVD $402, R12
+ B runtime·callbackasm1(SB)
+ MOVD $403, R12
+ B runtime·callbackasm1(SB)
+ MOVD $404, R12
+ B runtime·callbackasm1(SB)
+ MOVD $405, R12
+ B runtime·callbackasm1(SB)
+ MOVD $406, R12
+ B runtime·callbackasm1(SB)
+ MOVD $407, R12
+ B runtime·callbackasm1(SB)
+ MOVD $408, R12
+ B runtime·callbackasm1(SB)
+ MOVD $409, R12
+ B runtime·callbackasm1(SB)
+ MOVD $410, R12
+ B runtime·callbackasm1(SB)
+ MOVD $411, R12
+ B runtime·callbackasm1(SB)
+ MOVD $412, R12
+ B runtime·callbackasm1(SB)
+ MOVD $413, R12
+ B runtime·callbackasm1(SB)
+ MOVD $414, R12
+ B runtime·callbackasm1(SB)
+ MOVD $415, R12
+ B runtime·callbackasm1(SB)
+ MOVD $416, R12
+ B runtime·callbackasm1(SB)
+ MOVD $417, R12
+ B runtime·callbackasm1(SB)
+ MOVD $418, R12
+ B runtime·callbackasm1(SB)
+ MOVD $419, R12
+ B runtime·callbackasm1(SB)
+ MOVD $420, R12
+ B runtime·callbackasm1(SB)
+ MOVD $421, R12
+ B runtime·callbackasm1(SB)
+ MOVD $422, R12
+ B runtime·callbackasm1(SB)
+ MOVD $423, R12
+ B runtime·callbackasm1(SB)
+ MOVD $424, R12
+ B runtime·callbackasm1(SB)
+ MOVD $425, R12
+ B runtime·callbackasm1(SB)
+ MOVD $426, R12
+ B runtime·callbackasm1(SB)
+ MOVD $427, R12
+ B runtime·callbackasm1(SB)
+ MOVD $428, R12
+ B runtime·callbackasm1(SB)
+ MOVD $429, R12
+ B runtime·callbackasm1(SB)
+ MOVD $430, R12
+ B runtime·callbackasm1(SB)
+ MOVD $431, R12
+ B runtime·callbackasm1(SB)
+ MOVD $432, R12
+ B runtime·callbackasm1(SB)
+ MOVD $433, R12
+ B runtime·callbackasm1(SB)
+ MOVD $434, R12
+ B runtime·callbackasm1(SB)
+ MOVD $435, R12
+ B runtime·callbackasm1(SB)
+ MOVD $436, R12
+ B runtime·callbackasm1(SB)
+ MOVD $437, R12
+ B runtime·callbackasm1(SB)
+ MOVD $438, R12
+ B runtime·callbackasm1(SB)
+ MOVD $439, R12
+ B runtime·callbackasm1(SB)
+ MOVD $440, R12
+ B runtime·callbackasm1(SB)
+ MOVD $441, R12
+ B runtime·callbackasm1(SB)
+ MOVD $442, R12
+ B runtime·callbackasm1(SB)
+ MOVD $443, R12
+ B runtime·callbackasm1(SB)
+ MOVD $444, R12
+ B runtime·callbackasm1(SB)
+ MOVD $445, R12
+ B runtime·callbackasm1(SB)
+ MOVD $446, R12
+ B runtime·callbackasm1(SB)
+ MOVD $447, R12
+ B runtime·callbackasm1(SB)
+ MOVD $448, R12
+ B runtime·callbackasm1(SB)
+ MOVD $449, R12
+ B runtime·callbackasm1(SB)
+ MOVD $450, R12
+ B runtime·callbackasm1(SB)
+ MOVD $451, R12
+ B runtime·callbackasm1(SB)
+ MOVD $452, R12
+ B runtime·callbackasm1(SB)
+ MOVD $453, R12
+ B runtime·callbackasm1(SB)
+ MOVD $454, R12
+ B runtime·callbackasm1(SB)
+ MOVD $455, R12
+ B runtime·callbackasm1(SB)
+ MOVD $456, R12
+ B runtime·callbackasm1(SB)
+ MOVD $457, R12
+ B runtime·callbackasm1(SB)
+ MOVD $458, R12
+ B runtime·callbackasm1(SB)
+ MOVD $459, R12
+ B runtime·callbackasm1(SB)
+ MOVD $460, R12
+ B runtime·callbackasm1(SB)
+ MOVD $461, R12
+ B runtime·callbackasm1(SB)
+ MOVD $462, R12
+ B runtime·callbackasm1(SB)
+ MOVD $463, R12
+ B runtime·callbackasm1(SB)
+ MOVD $464, R12
+ B runtime·callbackasm1(SB)
+ MOVD $465, R12
+ B runtime·callbackasm1(SB)
+ MOVD $466, R12
+ B runtime·callbackasm1(SB)
+ MOVD $467, R12
+ B runtime·callbackasm1(SB)
+ MOVD $468, R12
+ B runtime·callbackasm1(SB)
+ MOVD $469, R12
+ B runtime·callbackasm1(SB)
+ MOVD $470, R12
+ B runtime·callbackasm1(SB)
+ MOVD $471, R12
+ B runtime·callbackasm1(SB)
+ MOVD $472, R12
+ B runtime·callbackasm1(SB)
+ MOVD $473, R12
+ B runtime·callbackasm1(SB)
+ MOVD $474, R12
+ B runtime·callbackasm1(SB)
+ MOVD $475, R12
+ B runtime·callbackasm1(SB)
+ MOVD $476, R12
+ B runtime·callbackasm1(SB)
+ MOVD $477, R12
+ B runtime·callbackasm1(SB)
+ MOVD $478, R12
+ B runtime·callbackasm1(SB)
+ MOVD $479, R12
+ B runtime·callbackasm1(SB)
+ MOVD $480, R12
+ B runtime·callbackasm1(SB)
+ MOVD $481, R12
+ B runtime·callbackasm1(SB)
+ MOVD $482, R12
+ B runtime·callbackasm1(SB)
+ MOVD $483, R12
+ B runtime·callbackasm1(SB)
+ MOVD $484, R12
+ B runtime·callbackasm1(SB)
+ MOVD $485, R12
+ B runtime·callbackasm1(SB)
+ MOVD $486, R12
+ B runtime·callbackasm1(SB)
+ MOVD $487, R12
+ B runtime·callbackasm1(SB)
+ MOVD $488, R12
+ B runtime·callbackasm1(SB)
+ MOVD $489, R12
+ B runtime·callbackasm1(SB)
+ MOVD $490, R12
+ B runtime·callbackasm1(SB)
+ MOVD $491, R12
+ B runtime·callbackasm1(SB)
+ MOVD $492, R12
+ B runtime·callbackasm1(SB)
+ MOVD $493, R12
+ B runtime·callbackasm1(SB)
+ MOVD $494, R12
+ B runtime·callbackasm1(SB)
+ MOVD $495, R12
+ B runtime·callbackasm1(SB)
+ MOVD $496, R12
+ B runtime·callbackasm1(SB)
+ MOVD $497, R12
+ B runtime·callbackasm1(SB)
+ MOVD $498, R12
+ B runtime·callbackasm1(SB)
+ MOVD $499, R12
+ B runtime·callbackasm1(SB)
+ MOVD $500, R12
+ B runtime·callbackasm1(SB)
+ MOVD $501, R12
+ B runtime·callbackasm1(SB)
+ MOVD $502, R12
+ B runtime·callbackasm1(SB)
+ MOVD $503, R12
+ B runtime·callbackasm1(SB)
+ MOVD $504, R12
+ B runtime·callbackasm1(SB)
+ MOVD $505, R12
+ B runtime·callbackasm1(SB)
+ MOVD $506, R12
+ B runtime·callbackasm1(SB)
+ MOVD $507, R12
+ B runtime·callbackasm1(SB)
+ MOVD $508, R12
+ B runtime·callbackasm1(SB)
+ MOVD $509, R12
+ B runtime·callbackasm1(SB)
+ MOVD $510, R12
+ B runtime·callbackasm1(SB)
+ MOVD $511, R12
+ B runtime·callbackasm1(SB)
+ MOVD $512, R12
+ B runtime·callbackasm1(SB)
+ MOVD $513, R12
+ B runtime·callbackasm1(SB)
+ MOVD $514, R12
+ B runtime·callbackasm1(SB)
+ MOVD $515, R12
+ B runtime·callbackasm1(SB)
+ MOVD $516, R12
+ B runtime·callbackasm1(SB)
+ MOVD $517, R12
+ B runtime·callbackasm1(SB)
+ MOVD $518, R12
+ B runtime·callbackasm1(SB)
+ MOVD $519, R12
+ B runtime·callbackasm1(SB)
+ MOVD $520, R12
+ B runtime·callbackasm1(SB)
+ MOVD $521, R12
+ B runtime·callbackasm1(SB)
+ MOVD $522, R12
+ B runtime·callbackasm1(SB)
+ MOVD $523, R12
+ B runtime·callbackasm1(SB)
+ MOVD $524, R12
+ B runtime·callbackasm1(SB)
+ MOVD $525, R12
+ B runtime·callbackasm1(SB)
+ MOVD $526, R12
+ B runtime·callbackasm1(SB)
+ MOVD $527, R12
+ B runtime·callbackasm1(SB)
+ MOVD $528, R12
+ B runtime·callbackasm1(SB)
+ MOVD $529, R12
+ B runtime·callbackasm1(SB)
+ MOVD $530, R12
+ B runtime·callbackasm1(SB)
+ MOVD $531, R12
+ B runtime·callbackasm1(SB)
+ MOVD $532, R12
+ B runtime·callbackasm1(SB)
+ MOVD $533, R12
+ B runtime·callbackasm1(SB)
+ MOVD $534, R12
+ B runtime·callbackasm1(SB)
+ MOVD $535, R12
+ B runtime·callbackasm1(SB)
+ MOVD $536, R12
+ B runtime·callbackasm1(SB)
+ MOVD $537, R12
+ B runtime·callbackasm1(SB)
+ MOVD $538, R12
+ B runtime·callbackasm1(SB)
+ MOVD $539, R12
+ B runtime·callbackasm1(SB)
+ MOVD $540, R12
+ B runtime·callbackasm1(SB)
+ MOVD $541, R12
+ B runtime·callbackasm1(SB)
+ MOVD $542, R12
+ B runtime·callbackasm1(SB)
+ MOVD $543, R12
+ B runtime·callbackasm1(SB)
+ MOVD $544, R12
+ B runtime·callbackasm1(SB)
+ MOVD $545, R12
+ B runtime·callbackasm1(SB)
+ MOVD $546, R12
+ B runtime·callbackasm1(SB)
+ MOVD $547, R12
+ B runtime·callbackasm1(SB)
+ MOVD $548, R12
+ B runtime·callbackasm1(SB)
+ MOVD $549, R12
+ B runtime·callbackasm1(SB)
+ MOVD $550, R12
+ B runtime·callbackasm1(SB)
+ MOVD $551, R12
+ B runtime·callbackasm1(SB)
+ MOVD $552, R12
+ B runtime·callbackasm1(SB)
+ MOVD $553, R12
+ B runtime·callbackasm1(SB)
+ MOVD $554, R12
+ B runtime·callbackasm1(SB)
+ MOVD $555, R12
+ B runtime·callbackasm1(SB)
+ MOVD $556, R12
+ B runtime·callbackasm1(SB)
+ MOVD $557, R12
+ B runtime·callbackasm1(SB)
+ MOVD $558, R12
+ B runtime·callbackasm1(SB)
+ MOVD $559, R12
+ B runtime·callbackasm1(SB)
+ MOVD $560, R12
+ B runtime·callbackasm1(SB)
+ MOVD $561, R12
+ B runtime·callbackasm1(SB)
+ MOVD $562, R12
+ B runtime·callbackasm1(SB)
+ MOVD $563, R12
+ B runtime·callbackasm1(SB)
+ MOVD $564, R12
+ B runtime·callbackasm1(SB)
+ MOVD $565, R12
+ B runtime·callbackasm1(SB)
+ MOVD $566, R12
+ B runtime·callbackasm1(SB)
+ MOVD $567, R12
+ B runtime·callbackasm1(SB)
+ MOVD $568, R12
+ B runtime·callbackasm1(SB)
+ MOVD $569, R12
+ B runtime·callbackasm1(SB)
+ MOVD $570, R12
+ B runtime·callbackasm1(SB)
+ MOVD $571, R12
+ B runtime·callbackasm1(SB)
+ MOVD $572, R12
+ B runtime·callbackasm1(SB)
+ MOVD $573, R12
+ B runtime·callbackasm1(SB)
+ MOVD $574, R12
+ B runtime·callbackasm1(SB)
+ MOVD $575, R12
+ B runtime·callbackasm1(SB)
+ MOVD $576, R12
+ B runtime·callbackasm1(SB)
+ MOVD $577, R12
+ B runtime·callbackasm1(SB)
+ MOVD $578, R12
+ B runtime·callbackasm1(SB)
+ MOVD $579, R12
+ B runtime·callbackasm1(SB)
+ MOVD $580, R12
+ B runtime·callbackasm1(SB)
+ MOVD $581, R12
+ B runtime·callbackasm1(SB)
+ MOVD $582, R12
+ B runtime·callbackasm1(SB)
+ MOVD $583, R12
+ B runtime·callbackasm1(SB)
+ MOVD $584, R12
+ B runtime·callbackasm1(SB)
+ MOVD $585, R12
+ B runtime·callbackasm1(SB)
+ MOVD $586, R12
+ B runtime·callbackasm1(SB)
+ MOVD $587, R12
+ B runtime·callbackasm1(SB)
+ MOVD $588, R12
+ B runtime·callbackasm1(SB)
+ MOVD $589, R12
+ B runtime·callbackasm1(SB)
+ MOVD $590, R12
+ B runtime·callbackasm1(SB)
+ MOVD $591, R12
+ B runtime·callbackasm1(SB)
+ MOVD $592, R12
+ B runtime·callbackasm1(SB)
+ MOVD $593, R12
+ B runtime·callbackasm1(SB)
+ MOVD $594, R12
+ B runtime·callbackasm1(SB)
+ MOVD $595, R12
+ B runtime·callbackasm1(SB)
+ MOVD $596, R12
+ B runtime·callbackasm1(SB)
+ MOVD $597, R12
+ B runtime·callbackasm1(SB)
+ MOVD $598, R12
+ B runtime·callbackasm1(SB)
+ MOVD $599, R12
+ B runtime·callbackasm1(SB)
+ MOVD $600, R12
+ B runtime·callbackasm1(SB)
+ MOVD $601, R12
+ B runtime·callbackasm1(SB)
+ MOVD $602, R12
+ B runtime·callbackasm1(SB)
+ MOVD $603, R12
+ B runtime·callbackasm1(SB)
+ MOVD $604, R12
+ B runtime·callbackasm1(SB)
+ MOVD $605, R12
+ B runtime·callbackasm1(SB)
+ MOVD $606, R12
+ B runtime·callbackasm1(SB)
+ MOVD $607, R12
+ B runtime·callbackasm1(SB)
+ MOVD $608, R12
+ B runtime·callbackasm1(SB)
+ MOVD $609, R12
+ B runtime·callbackasm1(SB)
+ MOVD $610, R12
+ B runtime·callbackasm1(SB)
+ MOVD $611, R12
+ B runtime·callbackasm1(SB)
+ MOVD $612, R12
+ B runtime·callbackasm1(SB)
+ MOVD $613, R12
+ B runtime·callbackasm1(SB)
+ MOVD $614, R12
+ B runtime·callbackasm1(SB)
+ MOVD $615, R12
+ B runtime·callbackasm1(SB)
+ MOVD $616, R12
+ B runtime·callbackasm1(SB)
+ MOVD $617, R12
+ B runtime·callbackasm1(SB)
+ MOVD $618, R12
+ B runtime·callbackasm1(SB)
+ MOVD $619, R12
+ B runtime·callbackasm1(SB)
+ MOVD $620, R12
+ B runtime·callbackasm1(SB)
+ MOVD $621, R12
+ B runtime·callbackasm1(SB)
+ MOVD $622, R12
+ B runtime·callbackasm1(SB)
+ MOVD $623, R12
+ B runtime·callbackasm1(SB)
+ MOVD $624, R12
+ B runtime·callbackasm1(SB)
+ MOVD $625, R12
+ B runtime·callbackasm1(SB)
+ MOVD $626, R12
+ B runtime·callbackasm1(SB)
+ MOVD $627, R12
+ B runtime·callbackasm1(SB)
+ MOVD $628, R12
+ B runtime·callbackasm1(SB)
+ MOVD $629, R12
+ B runtime·callbackasm1(SB)
+ MOVD $630, R12
+ B runtime·callbackasm1(SB)
+ MOVD $631, R12
+ B runtime·callbackasm1(SB)
+ MOVD $632, R12
+ B runtime·callbackasm1(SB)
+ MOVD $633, R12
+ B runtime·callbackasm1(SB)
+ MOVD $634, R12
+ B runtime·callbackasm1(SB)
+ MOVD $635, R12
+ B runtime·callbackasm1(SB)
+ MOVD $636, R12
+ B runtime·callbackasm1(SB)
+ MOVD $637, R12
+ B runtime·callbackasm1(SB)
+ MOVD $638, R12
+ B runtime·callbackasm1(SB)
+ MOVD $639, R12
+ B runtime·callbackasm1(SB)
+ MOVD $640, R12
+ B runtime·callbackasm1(SB)
+ MOVD $641, R12
+ B runtime·callbackasm1(SB)
+ MOVD $642, R12
+ B runtime·callbackasm1(SB)
+ MOVD $643, R12
+ B runtime·callbackasm1(SB)
+ MOVD $644, R12
+ B runtime·callbackasm1(SB)
+ MOVD $645, R12
+ B runtime·callbackasm1(SB)
+ MOVD $646, R12
+ B runtime·callbackasm1(SB)
+ MOVD $647, R12
+ B runtime·callbackasm1(SB)
+ MOVD $648, R12
+ B runtime·callbackasm1(SB)
+ MOVD $649, R12
+ B runtime·callbackasm1(SB)
+ MOVD $650, R12
+ B runtime·callbackasm1(SB)
+ MOVD $651, R12
+ B runtime·callbackasm1(SB)
+ MOVD $652, R12
+ B runtime·callbackasm1(SB)
+ MOVD $653, R12
+ B runtime·callbackasm1(SB)
+ MOVD $654, R12
+ B runtime·callbackasm1(SB)
+ MOVD $655, R12
+ B runtime·callbackasm1(SB)
+ MOVD $656, R12
+ B runtime·callbackasm1(SB)
+ MOVD $657, R12
+ B runtime·callbackasm1(SB)
+ MOVD $658, R12
+ B runtime·callbackasm1(SB)
+ MOVD $659, R12
+ B runtime·callbackasm1(SB)
+ MOVD $660, R12
+ B runtime·callbackasm1(SB)
+ MOVD $661, R12
+ B runtime·callbackasm1(SB)
+ MOVD $662, R12
+ B runtime·callbackasm1(SB)
+ MOVD $663, R12
+ B runtime·callbackasm1(SB)
+ MOVD $664, R12
+ B runtime·callbackasm1(SB)
+ MOVD $665, R12
+ B runtime·callbackasm1(SB)
+ MOVD $666, R12
+ B runtime·callbackasm1(SB)
+ MOVD $667, R12
+ B runtime·callbackasm1(SB)
+ MOVD $668, R12
+ B runtime·callbackasm1(SB)
+ MOVD $669, R12
+ B runtime·callbackasm1(SB)
+ MOVD $670, R12
+ B runtime·callbackasm1(SB)
+ MOVD $671, R12
+ B runtime·callbackasm1(SB)
+ MOVD $672, R12
+ B runtime·callbackasm1(SB)
+ MOVD $673, R12
+ B runtime·callbackasm1(SB)
+ MOVD $674, R12
+ B runtime·callbackasm1(SB)
+ MOVD $675, R12
+ B runtime·callbackasm1(SB)
+ MOVD $676, R12
+ B runtime·callbackasm1(SB)
+ MOVD $677, R12
+ B runtime·callbackasm1(SB)
+ MOVD $678, R12
+ B runtime·callbackasm1(SB)
+ MOVD $679, R12
+ B runtime·callbackasm1(SB)
+ MOVD $680, R12
+ B runtime·callbackasm1(SB)
+ MOVD $681, R12
+ B runtime·callbackasm1(SB)
+ MOVD $682, R12
+ B runtime·callbackasm1(SB)
+ MOVD $683, R12
+ B runtime·callbackasm1(SB)
+ MOVD $684, R12
+ B runtime·callbackasm1(SB)
+ MOVD $685, R12
+ B runtime·callbackasm1(SB)
+ MOVD $686, R12
+ B runtime·callbackasm1(SB)
+ MOVD $687, R12
+ B runtime·callbackasm1(SB)
+ MOVD $688, R12
+ B runtime·callbackasm1(SB)
+ MOVD $689, R12
+ B runtime·callbackasm1(SB)
+ MOVD $690, R12
+ B runtime·callbackasm1(SB)
+ MOVD $691, R12
+ B runtime·callbackasm1(SB)
+ MOVD $692, R12
+ B runtime·callbackasm1(SB)
+ MOVD $693, R12
+ B runtime·callbackasm1(SB)
+ MOVD $694, R12
+ B runtime·callbackasm1(SB)
+ MOVD $695, R12
+ B runtime·callbackasm1(SB)
+ MOVD $696, R12
+ B runtime·callbackasm1(SB)
+ MOVD $697, R12
+ B runtime·callbackasm1(SB)
+ MOVD $698, R12
+ B runtime·callbackasm1(SB)
+ MOVD $699, R12
+ B runtime·callbackasm1(SB)
+ MOVD $700, R12
+ B runtime·callbackasm1(SB)
+ MOVD $701, R12
+ B runtime·callbackasm1(SB)
+ MOVD $702, R12
+ B runtime·callbackasm1(SB)
+ MOVD $703, R12
+ B runtime·callbackasm1(SB)
+ MOVD $704, R12
+ B runtime·callbackasm1(SB)
+ MOVD $705, R12
+ B runtime·callbackasm1(SB)
+ MOVD $706, R12
+ B runtime·callbackasm1(SB)
+ MOVD $707, R12
+ B runtime·callbackasm1(SB)
+ MOVD $708, R12
+ B runtime·callbackasm1(SB)
+ MOVD $709, R12
+ B runtime·callbackasm1(SB)
+ MOVD $710, R12
+ B runtime·callbackasm1(SB)
+ MOVD $711, R12
+ B runtime·callbackasm1(SB)
+ MOVD $712, R12
+ B runtime·callbackasm1(SB)
+ MOVD $713, R12
+ B runtime·callbackasm1(SB)
+ MOVD $714, R12
+ B runtime·callbackasm1(SB)
+ MOVD $715, R12
+ B runtime·callbackasm1(SB)
+ MOVD $716, R12
+ B runtime·callbackasm1(SB)
+ MOVD $717, R12
+ B runtime·callbackasm1(SB)
+ MOVD $718, R12
+ B runtime·callbackasm1(SB)
+ MOVD $719, R12
+ B runtime·callbackasm1(SB)
+ MOVD $720, R12
+ B runtime·callbackasm1(SB)
+ MOVD $721, R12
+ B runtime·callbackasm1(SB)
+ MOVD $722, R12
+ B runtime·callbackasm1(SB)
+ MOVD $723, R12
+ B runtime·callbackasm1(SB)
+ MOVD $724, R12
+ B runtime·callbackasm1(SB)
+ MOVD $725, R12
+ B runtime·callbackasm1(SB)
+ MOVD $726, R12
+ B runtime·callbackasm1(SB)
+ MOVD $727, R12
+ B runtime·callbackasm1(SB)
+ MOVD $728, R12
+ B runtime·callbackasm1(SB)
+ MOVD $729, R12
+ B runtime·callbackasm1(SB)
+ MOVD $730, R12
+ B runtime·callbackasm1(SB)
+ MOVD $731, R12
+ B runtime·callbackasm1(SB)
+ MOVD $732, R12
+ B runtime·callbackasm1(SB)
+ MOVD $733, R12
+ B runtime·callbackasm1(SB)
+ MOVD $734, R12
+ B runtime·callbackasm1(SB)
+ MOVD $735, R12
+ B runtime·callbackasm1(SB)
+ MOVD $736, R12
+ B runtime·callbackasm1(SB)
+ MOVD $737, R12
+ B runtime·callbackasm1(SB)
+ MOVD $738, R12
+ B runtime·callbackasm1(SB)
+ MOVD $739, R12
+ B runtime·callbackasm1(SB)
+ MOVD $740, R12
+ B runtime·callbackasm1(SB)
+ MOVD $741, R12
+ B runtime·callbackasm1(SB)
+ MOVD $742, R12
+ B runtime·callbackasm1(SB)
+ MOVD $743, R12
+ B runtime·callbackasm1(SB)
+ MOVD $744, R12
+ B runtime·callbackasm1(SB)
+ MOVD $745, R12
+ B runtime·callbackasm1(SB)
+ MOVD $746, R12
+ B runtime·callbackasm1(SB)
+ MOVD $747, R12
+ B runtime·callbackasm1(SB)
+ MOVD $748, R12
+ B runtime·callbackasm1(SB)
+ MOVD $749, R12
+ B runtime·callbackasm1(SB)
+ MOVD $750, R12
+ B runtime·callbackasm1(SB)
+ MOVD $751, R12
+ B runtime·callbackasm1(SB)
+ MOVD $752, R12
+ B runtime·callbackasm1(SB)
+ MOVD $753, R12
+ B runtime·callbackasm1(SB)
+ MOVD $754, R12
+ B runtime·callbackasm1(SB)
+ MOVD $755, R12
+ B runtime·callbackasm1(SB)
+ MOVD $756, R12
+ B runtime·callbackasm1(SB)
+ MOVD $757, R12
+ B runtime·callbackasm1(SB)
+ MOVD $758, R12
+ B runtime·callbackasm1(SB)
+ MOVD $759, R12
+ B runtime·callbackasm1(SB)
+ MOVD $760, R12
+ B runtime·callbackasm1(SB)
+ MOVD $761, R12
+ B runtime·callbackasm1(SB)
+ MOVD $762, R12
+ B runtime·callbackasm1(SB)
+ MOVD $763, R12
+ B runtime·callbackasm1(SB)
+ MOVD $764, R12
+ B runtime·callbackasm1(SB)
+ MOVD $765, R12
+ B runtime·callbackasm1(SB)
+ MOVD $766, R12
+ B runtime·callbackasm1(SB)
+ MOVD $767, R12
+ B runtime·callbackasm1(SB)
+ MOVD $768, R12
+ B runtime·callbackasm1(SB)
+ MOVD $769, R12
+ B runtime·callbackasm1(SB)
+ MOVD $770, R12
+ B runtime·callbackasm1(SB)
+ MOVD $771, R12
+ B runtime·callbackasm1(SB)
+ MOVD $772, R12
+ B runtime·callbackasm1(SB)
+ MOVD $773, R12
+ B runtime·callbackasm1(SB)
+ MOVD $774, R12
+ B runtime·callbackasm1(SB)
+ MOVD $775, R12
+ B runtime·callbackasm1(SB)
+ MOVD $776, R12
+ B runtime·callbackasm1(SB)
+ MOVD $777, R12
+ B runtime·callbackasm1(SB)
+ MOVD $778, R12
+ B runtime·callbackasm1(SB)
+ MOVD $779, R12
+ B runtime·callbackasm1(SB)
+ MOVD $780, R12
+ B runtime·callbackasm1(SB)
+ MOVD $781, R12
+ B runtime·callbackasm1(SB)
+ MOVD $782, R12
+ B runtime·callbackasm1(SB)
+ MOVD $783, R12
+ B runtime·callbackasm1(SB)
+ MOVD $784, R12
+ B runtime·callbackasm1(SB)
+ MOVD $785, R12
+ B runtime·callbackasm1(SB)
+ MOVD $786, R12
+ B runtime·callbackasm1(SB)
+ MOVD $787, R12
+ B runtime·callbackasm1(SB)
+ MOVD $788, R12
+ B runtime·callbackasm1(SB)
+ MOVD $789, R12
+ B runtime·callbackasm1(SB)
+ MOVD $790, R12
+ B runtime·callbackasm1(SB)
+ MOVD $791, R12
+ B runtime·callbackasm1(SB)
+ MOVD $792, R12
+ B runtime·callbackasm1(SB)
+ MOVD $793, R12
+ B runtime·callbackasm1(SB)
+ MOVD $794, R12
+ B runtime·callbackasm1(SB)
+ MOVD $795, R12
+ B runtime·callbackasm1(SB)
+ MOVD $796, R12
+ B runtime·callbackasm1(SB)
+ MOVD $797, R12
+ B runtime·callbackasm1(SB)
+ MOVD $798, R12
+ B runtime·callbackasm1(SB)
+ MOVD $799, R12
+ B runtime·callbackasm1(SB)
+ MOVD $800, R12
+ B runtime·callbackasm1(SB)
+ MOVD $801, R12
+ B runtime·callbackasm1(SB)
+ MOVD $802, R12
+ B runtime·callbackasm1(SB)
+ MOVD $803, R12
+ B runtime·callbackasm1(SB)
+ MOVD $804, R12
+ B runtime·callbackasm1(SB)
+ MOVD $805, R12
+ B runtime·callbackasm1(SB)
+ MOVD $806, R12
+ B runtime·callbackasm1(SB)
+ MOVD $807, R12
+ B runtime·callbackasm1(SB)
+ MOVD $808, R12
+ B runtime·callbackasm1(SB)
+ MOVD $809, R12
+ B runtime·callbackasm1(SB)
+ MOVD $810, R12
+ B runtime·callbackasm1(SB)
+ MOVD $811, R12
+ B runtime·callbackasm1(SB)
+ MOVD $812, R12
+ B runtime·callbackasm1(SB)
+ MOVD $813, R12
+ B runtime·callbackasm1(SB)
+ MOVD $814, R12
+ B runtime·callbackasm1(SB)
+ MOVD $815, R12
+ B runtime·callbackasm1(SB)
+ MOVD $816, R12
+ B runtime·callbackasm1(SB)
+ MOVD $817, R12
+ B runtime·callbackasm1(SB)
+ MOVD $818, R12
+ B runtime·callbackasm1(SB)
+ MOVD $819, R12
+ B runtime·callbackasm1(SB)
+ MOVD $820, R12
+ B runtime·callbackasm1(SB)
+ MOVD $821, R12
+ B runtime·callbackasm1(SB)
+ MOVD $822, R12
+ B runtime·callbackasm1(SB)
+ MOVD $823, R12
+ B runtime·callbackasm1(SB)
+ MOVD $824, R12
+ B runtime·callbackasm1(SB)
+ MOVD $825, R12
+ B runtime·callbackasm1(SB)
+ MOVD $826, R12
+ B runtime·callbackasm1(SB)
+ MOVD $827, R12
+ B runtime·callbackasm1(SB)
+ MOVD $828, R12
+ B runtime·callbackasm1(SB)
+ MOVD $829, R12
+ B runtime·callbackasm1(SB)
+ MOVD $830, R12
+ B runtime·callbackasm1(SB)
+ MOVD $831, R12
+ B runtime·callbackasm1(SB)
+ MOVD $832, R12
+ B runtime·callbackasm1(SB)
+ MOVD $833, R12
+ B runtime·callbackasm1(SB)
+ MOVD $834, R12
+ B runtime·callbackasm1(SB)
+ MOVD $835, R12
+ B runtime·callbackasm1(SB)
+ MOVD $836, R12
+ B runtime·callbackasm1(SB)
+ MOVD $837, R12
+ B runtime·callbackasm1(SB)
+ MOVD $838, R12
+ B runtime·callbackasm1(SB)
+ MOVD $839, R12
+ B runtime·callbackasm1(SB)
+ MOVD $840, R12
+ B runtime·callbackasm1(SB)
+ MOVD $841, R12
+ B runtime·callbackasm1(SB)
+ MOVD $842, R12
+ B runtime·callbackasm1(SB)
+ MOVD $843, R12
+ B runtime·callbackasm1(SB)
+ MOVD $844, R12
+ B runtime·callbackasm1(SB)
+ MOVD $845, R12
+ B runtime·callbackasm1(SB)
+ MOVD $846, R12
+ B runtime·callbackasm1(SB)
+ MOVD $847, R12
+ B runtime·callbackasm1(SB)
+ MOVD $848, R12
+ B runtime·callbackasm1(SB)
+ MOVD $849, R12
+ B runtime·callbackasm1(SB)
+ MOVD $850, R12
+ B runtime·callbackasm1(SB)
+ MOVD $851, R12
+ B runtime·callbackasm1(SB)
+ MOVD $852, R12
+ B runtime·callbackasm1(SB)
+ MOVD $853, R12
+ B runtime·callbackasm1(SB)
+ MOVD $854, R12
+ B runtime·callbackasm1(SB)
+ MOVD $855, R12
+ B runtime·callbackasm1(SB)
+ MOVD $856, R12
+ B runtime·callbackasm1(SB)
+ MOVD $857, R12
+ B runtime·callbackasm1(SB)
+ MOVD $858, R12
+ B runtime·callbackasm1(SB)
+ MOVD $859, R12
+ B runtime·callbackasm1(SB)
+ MOVD $860, R12
+ B runtime·callbackasm1(SB)
+ MOVD $861, R12
+ B runtime·callbackasm1(SB)
+ MOVD $862, R12
+ B runtime·callbackasm1(SB)
+ MOVD $863, R12
+ B runtime·callbackasm1(SB)
+ MOVD $864, R12
+ B runtime·callbackasm1(SB)
+ MOVD $865, R12
+ B runtime·callbackasm1(SB)
+ MOVD $866, R12
+ B runtime·callbackasm1(SB)
+ MOVD $867, R12
+ B runtime·callbackasm1(SB)
+ MOVD $868, R12
+ B runtime·callbackasm1(SB)
+ MOVD $869, R12
+ B runtime·callbackasm1(SB)
+ MOVD $870, R12
+ B runtime·callbackasm1(SB)
+ MOVD $871, R12
+ B runtime·callbackasm1(SB)
+ MOVD $872, R12
+ B runtime·callbackasm1(SB)
+ MOVD $873, R12
+ B runtime·callbackasm1(SB)
+ MOVD $874, R12
+ B runtime·callbackasm1(SB)
+ MOVD $875, R12
+ B runtime·callbackasm1(SB)
+ MOVD $876, R12
+ B runtime·callbackasm1(SB)
+ MOVD $877, R12
+ B runtime·callbackasm1(SB)
+ MOVD $878, R12
+ B runtime·callbackasm1(SB)
+ MOVD $879, R12
+ B runtime·callbackasm1(SB)
+ MOVD $880, R12
+ B runtime·callbackasm1(SB)
+ MOVD $881, R12
+ B runtime·callbackasm1(SB)
+ MOVD $882, R12
+ B runtime·callbackasm1(SB)
+ MOVD $883, R12
+ B runtime·callbackasm1(SB)
+ MOVD $884, R12
+ B runtime·callbackasm1(SB)
+ MOVD $885, R12
+ B runtime·callbackasm1(SB)
+ MOVD $886, R12
+ B runtime·callbackasm1(SB)
+ MOVD $887, R12
+ B runtime·callbackasm1(SB)
+ MOVD $888, R12
+ B runtime·callbackasm1(SB)
+ MOVD $889, R12
+ B runtime·callbackasm1(SB)
+ MOVD $890, R12
+ B runtime·callbackasm1(SB)
+ MOVD $891, R12
+ B runtime·callbackasm1(SB)
+ MOVD $892, R12
+ B runtime·callbackasm1(SB)
+ MOVD $893, R12
+ B runtime·callbackasm1(SB)
+ MOVD $894, R12
+ B runtime·callbackasm1(SB)
+ MOVD $895, R12
+ B runtime·callbackasm1(SB)
+ MOVD $896, R12
+ B runtime·callbackasm1(SB)
+ MOVD $897, R12
+ B runtime·callbackasm1(SB)
+ MOVD $898, R12
+ B runtime·callbackasm1(SB)
+ MOVD $899, R12
+ B runtime·callbackasm1(SB)
+ MOVD $900, R12
+ B runtime·callbackasm1(SB)
+ MOVD $901, R12
+ B runtime·callbackasm1(SB)
+ MOVD $902, R12
+ B runtime·callbackasm1(SB)
+ MOVD $903, R12
+ B runtime·callbackasm1(SB)
+ MOVD $904, R12
+ B runtime·callbackasm1(SB)
+ MOVD $905, R12
+ B runtime·callbackasm1(SB)
+ MOVD $906, R12
+ B runtime·callbackasm1(SB)
+ MOVD $907, R12
+ B runtime·callbackasm1(SB)
+ MOVD $908, R12
+ B runtime·callbackasm1(SB)
+ MOVD $909, R12
+ B runtime·callbackasm1(SB)
+ MOVD $910, R12
+ B runtime·callbackasm1(SB)
+ MOVD $911, R12
+ B runtime·callbackasm1(SB)
+ MOVD $912, R12
+ B runtime·callbackasm1(SB)
+ MOVD $913, R12
+ B runtime·callbackasm1(SB)
+ MOVD $914, R12
+ B runtime·callbackasm1(SB)
+ MOVD $915, R12
+ B runtime·callbackasm1(SB)
+ MOVD $916, R12
+ B runtime·callbackasm1(SB)
+ MOVD $917, R12
+ B runtime·callbackasm1(SB)
+ MOVD $918, R12
+ B runtime·callbackasm1(SB)
+ MOVD $919, R12
+ B runtime·callbackasm1(SB)
+ MOVD $920, R12
+ B runtime·callbackasm1(SB)
+ MOVD $921, R12
+ B runtime·callbackasm1(SB)
+ MOVD $922, R12
+ B runtime·callbackasm1(SB)
+ MOVD $923, R12
+ B runtime·callbackasm1(SB)
+ MOVD $924, R12
+ B runtime·callbackasm1(SB)
+ MOVD $925, R12
+ B runtime·callbackasm1(SB)
+ MOVD $926, R12
+ B runtime·callbackasm1(SB)
+ MOVD $927, R12
+ B runtime·callbackasm1(SB)
+ MOVD $928, R12
+ B runtime·callbackasm1(SB)
+ MOVD $929, R12
+ B runtime·callbackasm1(SB)
+ MOVD $930, R12
+ B runtime·callbackasm1(SB)
+ MOVD $931, R12
+ B runtime·callbackasm1(SB)
+ MOVD $932, R12
+ B runtime·callbackasm1(SB)
+ MOVD $933, R12
+ B runtime·callbackasm1(SB)
+ MOVD $934, R12
+ B runtime·callbackasm1(SB)
+ MOVD $935, R12
+ B runtime·callbackasm1(SB)
+ MOVD $936, R12
+ B runtime·callbackasm1(SB)
+ MOVD $937, R12
+ B runtime·callbackasm1(SB)
+ MOVD $938, R12
+ B runtime·callbackasm1(SB)
+ MOVD $939, R12
+ B runtime·callbackasm1(SB)
+ MOVD $940, R12
+ B runtime·callbackasm1(SB)
+ MOVD $941, R12
+ B runtime·callbackasm1(SB)
+ MOVD $942, R12
+ B runtime·callbackasm1(SB)
+ MOVD $943, R12
+ B runtime·callbackasm1(SB)
+ MOVD $944, R12
+ B runtime·callbackasm1(SB)
+ MOVD $945, R12
+ B runtime·callbackasm1(SB)
+ MOVD $946, R12
+ B runtime·callbackasm1(SB)
+ MOVD $947, R12
+ B runtime·callbackasm1(SB)
+ MOVD $948, R12
+ B runtime·callbackasm1(SB)
+ MOVD $949, R12
+ B runtime·callbackasm1(SB)
+ MOVD $950, R12
+ B runtime·callbackasm1(SB)
+ MOVD $951, R12
+ B runtime·callbackasm1(SB)
+ MOVD $952, R12
+ B runtime·callbackasm1(SB)
+ MOVD $953, R12
+ B runtime·callbackasm1(SB)
+ MOVD $954, R12
+ B runtime·callbackasm1(SB)
+ MOVD $955, R12
+ B runtime·callbackasm1(SB)
+ MOVD $956, R12
+ B runtime·callbackasm1(SB)
+ MOVD $957, R12
+ B runtime·callbackasm1(SB)
+ MOVD $958, R12
+ B runtime·callbackasm1(SB)
+ MOVD $959, R12
+ B runtime·callbackasm1(SB)
+ MOVD $960, R12
+ B runtime·callbackasm1(SB)
+ MOVD $961, R12
+ B runtime·callbackasm1(SB)
+ MOVD $962, R12
+ B runtime·callbackasm1(SB)
+ MOVD $963, R12
+ B runtime·callbackasm1(SB)
+ MOVD $964, R12
+ B runtime·callbackasm1(SB)
+ MOVD $965, R12
+ B runtime·callbackasm1(SB)
+ MOVD $966, R12
+ B runtime·callbackasm1(SB)
+ MOVD $967, R12
+ B runtime·callbackasm1(SB)
+ MOVD $968, R12
+ B runtime·callbackasm1(SB)
+ MOVD $969, R12
+ B runtime·callbackasm1(SB)
+ MOVD $970, R12
+ B runtime·callbackasm1(SB)
+ MOVD $971, R12
+ B runtime·callbackasm1(SB)
+ MOVD $972, R12
+ B runtime·callbackasm1(SB)
+ MOVD $973, R12
+ B runtime·callbackasm1(SB)
+ MOVD $974, R12
+ B runtime·callbackasm1(SB)
+ MOVD $975, R12
+ B runtime·callbackasm1(SB)
+ MOVD $976, R12
+ B runtime·callbackasm1(SB)
+ MOVD $977, R12
+ B runtime·callbackasm1(SB)
+ MOVD $978, R12
+ B runtime·callbackasm1(SB)
+ MOVD $979, R12
+ B runtime·callbackasm1(SB)
+ MOVD $980, R12
+ B runtime·callbackasm1(SB)
+ MOVD $981, R12
+ B runtime·callbackasm1(SB)
+ MOVD $982, R12
+ B runtime·callbackasm1(SB)
+ MOVD $983, R12
+ B runtime·callbackasm1(SB)
+ MOVD $984, R12
+ B runtime·callbackasm1(SB)
+ MOVD $985, R12
+ B runtime·callbackasm1(SB)
+ MOVD $986, R12
+ B runtime·callbackasm1(SB)
+ MOVD $987, R12
+ B runtime·callbackasm1(SB)
+ MOVD $988, R12
+ B runtime·callbackasm1(SB)
+ MOVD $989, R12
+ B runtime·callbackasm1(SB)
+ MOVD $990, R12
+ B runtime·callbackasm1(SB)
+ MOVD $991, R12
+ B runtime·callbackasm1(SB)
+ MOVD $992, R12
+ B runtime·callbackasm1(SB)
+ MOVD $993, R12
+ B runtime·callbackasm1(SB)
+ MOVD $994, R12
+ B runtime·callbackasm1(SB)
+ MOVD $995, R12
+ B runtime·callbackasm1(SB)
+ MOVD $996, R12
+ B runtime·callbackasm1(SB)
+ MOVD $997, R12
+ B runtime·callbackasm1(SB)
+ MOVD $998, R12
+ B runtime·callbackasm1(SB)
+ MOVD $999, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1000, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1001, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1002, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1003, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1004, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1005, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1006, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1007, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1008, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1009, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1010, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1011, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1012, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1013, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1014, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1015, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1016, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1017, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1018, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1019, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1020, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1021, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1022, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1023, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1024, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1025, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1026, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1027, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1028, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1029, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1030, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1031, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1032, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1033, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1034, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1035, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1036, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1037, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1038, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1039, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1040, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1041, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1042, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1043, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1044, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1045, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1046, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1047, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1048, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1049, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1050, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1051, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1052, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1053, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1054, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1055, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1056, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1057, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1058, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1059, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1060, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1061, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1062, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1063, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1064, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1065, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1066, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1067, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1068, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1069, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1070, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1071, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1072, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1073, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1074, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1075, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1076, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1077, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1078, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1079, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1080, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1081, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1082, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1083, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1084, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1085, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1086, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1087, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1088, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1089, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1090, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1091, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1092, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1093, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1094, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1095, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1096, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1097, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1098, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1099, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1100, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1101, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1102, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1103, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1104, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1105, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1106, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1107, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1108, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1109, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1110, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1111, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1112, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1113, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1114, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1115, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1116, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1117, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1118, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1119, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1120, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1121, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1122, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1123, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1124, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1125, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1126, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1127, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1128, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1129, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1130, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1131, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1132, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1133, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1134, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1135, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1136, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1137, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1138, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1139, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1140, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1141, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1142, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1143, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1144, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1145, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1146, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1147, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1148, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1149, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1150, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1151, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1152, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1153, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1154, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1155, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1156, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1157, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1158, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1159, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1160, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1161, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1162, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1163, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1164, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1165, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1166, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1167, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1168, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1169, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1170, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1171, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1172, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1173, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1174, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1175, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1176, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1177, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1178, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1179, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1180, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1181, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1182, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1183, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1184, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1185, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1186, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1187, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1188, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1189, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1190, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1191, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1192, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1193, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1194, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1195, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1196, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1197, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1198, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1199, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1200, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1201, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1202, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1203, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1204, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1205, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1206, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1207, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1208, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1209, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1210, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1211, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1212, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1213, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1214, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1215, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1216, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1217, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1218, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1219, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1220, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1221, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1222, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1223, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1224, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1225, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1226, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1227, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1228, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1229, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1230, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1231, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1232, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1233, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1234, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1235, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1236, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1237, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1238, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1239, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1240, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1241, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1242, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1243, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1244, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1245, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1246, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1247, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1248, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1249, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1250, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1251, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1252, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1253, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1254, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1255, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1256, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1257, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1258, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1259, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1260, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1261, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1262, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1263, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1264, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1265, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1266, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1267, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1268, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1269, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1270, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1271, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1272, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1273, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1274, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1275, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1276, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1277, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1278, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1279, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1280, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1281, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1282, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1283, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1284, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1285, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1286, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1287, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1288, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1289, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1290, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1291, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1292, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1293, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1294, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1295, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1296, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1297, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1298, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1299, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1300, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1301, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1302, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1303, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1304, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1305, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1306, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1307, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1308, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1309, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1310, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1311, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1312, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1313, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1314, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1315, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1316, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1317, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1318, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1319, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1320, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1321, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1322, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1323, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1324, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1325, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1326, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1327, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1328, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1329, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1330, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1331, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1332, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1333, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1334, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1335, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1336, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1337, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1338, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1339, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1340, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1341, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1342, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1343, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1344, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1345, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1346, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1347, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1348, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1349, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1350, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1351, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1352, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1353, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1354, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1355, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1356, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1357, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1358, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1359, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1360, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1361, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1362, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1363, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1364, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1365, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1366, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1367, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1368, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1369, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1370, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1371, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1372, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1373, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1374, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1375, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1376, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1377, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1378, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1379, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1380, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1381, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1382, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1383, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1384, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1385, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1386, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1387, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1388, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1389, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1390, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1391, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1392, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1393, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1394, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1395, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1396, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1397, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1398, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1399, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1400, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1401, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1402, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1403, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1404, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1405, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1406, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1407, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1408, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1409, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1410, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1411, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1412, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1413, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1414, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1415, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1416, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1417, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1418, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1419, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1420, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1421, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1422, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1423, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1424, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1425, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1426, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1427, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1428, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1429, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1430, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1431, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1432, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1433, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1434, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1435, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1436, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1437, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1438, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1439, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1440, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1441, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1442, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1443, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1444, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1445, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1446, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1447, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1448, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1449, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1450, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1451, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1452, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1453, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1454, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1455, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1456, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1457, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1458, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1459, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1460, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1461, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1462, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1463, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1464, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1465, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1466, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1467, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1468, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1469, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1470, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1471, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1472, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1473, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1474, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1475, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1476, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1477, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1478, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1479, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1480, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1481, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1482, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1483, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1484, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1485, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1486, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1487, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1488, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1489, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1490, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1491, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1492, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1493, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1494, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1495, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1496, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1497, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1498, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1499, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1500, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1501, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1502, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1503, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1504, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1505, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1506, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1507, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1508, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1509, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1510, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1511, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1512, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1513, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1514, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1515, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1516, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1517, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1518, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1519, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1520, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1521, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1522, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1523, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1524, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1525, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1526, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1527, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1528, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1529, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1530, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1531, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1532, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1533, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1534, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1535, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1536, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1537, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1538, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1539, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1540, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1541, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1542, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1543, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1544, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1545, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1546, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1547, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1548, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1549, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1550, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1551, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1552, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1553, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1554, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1555, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1556, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1557, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1558, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1559, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1560, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1561, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1562, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1563, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1564, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1565, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1566, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1567, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1568, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1569, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1570, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1571, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1572, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1573, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1574, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1575, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1576, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1577, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1578, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1579, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1580, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1581, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1582, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1583, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1584, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1585, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1586, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1587, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1588, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1589, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1590, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1591, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1592, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1593, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1594, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1595, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1596, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1597, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1598, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1599, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1600, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1601, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1602, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1603, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1604, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1605, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1606, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1607, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1608, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1609, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1610, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1611, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1612, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1613, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1614, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1615, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1616, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1617, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1618, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1619, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1620, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1621, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1622, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1623, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1624, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1625, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1626, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1627, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1628, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1629, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1630, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1631, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1632, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1633, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1634, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1635, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1636, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1637, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1638, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1639, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1640, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1641, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1642, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1643, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1644, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1645, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1646, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1647, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1648, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1649, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1650, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1651, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1652, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1653, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1654, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1655, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1656, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1657, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1658, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1659, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1660, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1661, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1662, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1663, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1664, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1665, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1666, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1667, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1668, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1669, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1670, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1671, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1672, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1673, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1674, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1675, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1676, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1677, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1678, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1679, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1680, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1681, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1682, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1683, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1684, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1685, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1686, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1687, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1688, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1689, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1690, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1691, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1692, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1693, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1694, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1695, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1696, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1697, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1698, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1699, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1700, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1701, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1702, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1703, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1704, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1705, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1706, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1707, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1708, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1709, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1710, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1711, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1712, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1713, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1714, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1715, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1716, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1717, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1718, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1719, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1720, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1721, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1722, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1723, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1724, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1725, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1726, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1727, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1728, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1729, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1730, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1731, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1732, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1733, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1734, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1735, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1736, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1737, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1738, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1739, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1740, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1741, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1742, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1743, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1744, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1745, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1746, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1747, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1748, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1749, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1750, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1751, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1752, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1753, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1754, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1755, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1756, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1757, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1758, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1759, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1760, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1761, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1762, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1763, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1764, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1765, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1766, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1767, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1768, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1769, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1770, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1771, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1772, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1773, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1774, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1775, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1776, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1777, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1778, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1779, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1780, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1781, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1782, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1783, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1784, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1785, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1786, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1787, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1788, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1789, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1790, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1791, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1792, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1793, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1794, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1795, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1796, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1797, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1798, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1799, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1800, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1801, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1802, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1803, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1804, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1805, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1806, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1807, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1808, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1809, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1810, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1811, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1812, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1813, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1814, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1815, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1816, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1817, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1818, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1819, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1820, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1821, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1822, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1823, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1824, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1825, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1826, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1827, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1828, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1829, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1830, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1831, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1832, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1833, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1834, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1835, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1836, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1837, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1838, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1839, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1840, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1841, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1842, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1843, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1844, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1845, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1846, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1847, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1848, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1849, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1850, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1851, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1852, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1853, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1854, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1855, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1856, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1857, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1858, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1859, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1860, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1861, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1862, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1863, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1864, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1865, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1866, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1867, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1868, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1869, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1870, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1871, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1872, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1873, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1874, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1875, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1876, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1877, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1878, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1879, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1880, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1881, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1882, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1883, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1884, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1885, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1886, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1887, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1888, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1889, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1890, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1891, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1892, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1893, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1894, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1895, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1896, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1897, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1898, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1899, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1900, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1901, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1902, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1903, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1904, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1905, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1906, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1907, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1908, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1909, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1910, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1911, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1912, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1913, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1914, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1915, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1916, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1917, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1918, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1919, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1920, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1921, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1922, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1923, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1924, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1925, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1926, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1927, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1928, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1929, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1930, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1931, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1932, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1933, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1934, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1935, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1936, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1937, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1938, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1939, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1940, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1941, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1942, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1943, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1944, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1945, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1946, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1947, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1948, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1949, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1950, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1951, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1952, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1953, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1954, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1955, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1956, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1957, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1958, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1959, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1960, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1961, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1962, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1963, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1964, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1965, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1966, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1967, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1968, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1969, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1970, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1971, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1972, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1973, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1974, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1975, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1976, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1977, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1978, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1979, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1980, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1981, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1982, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1983, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1984, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1985, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1986, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1987, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1988, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1989, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1990, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1991, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1992, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1993, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1994, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1995, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1996, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1997, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1998, R12
+ B runtime·callbackasm1(SB)
+ MOVD $1999, R12
+ B runtime·callbackasm1(SB)